]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.1-201505031602.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.1-201505031602.patch
CommitLineData
e8242a6d
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index bfcb1a6..2dae09b 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index f499cd2..37a187f 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -884,7 +952,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -934,6 +1002,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -943,7 +1013,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -986,10 +1056,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1103,6 +1176,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1118,7 +1193,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1184,7 +1259,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer vmlinux-gdb.py
547+ signing_key.x509.signer vmlinux-gdb.py \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1223,7 +1301,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1389,6 +1467,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1529,17 +1609,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1551,11 +1635,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index a9a1195..e9b8417 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -101,6 +101,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index cf4c0c9..a87ecf5 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 563b92f..689d58e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -39,7 +39,7 @@ struct outer_cache_fns {
1842 /* This is an ARM L2C thing */
1843 void (*write_sec)(unsigned long, unsigned);
1844 void (*configure)(const struct l2x0_regs *);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index bfd662e..f6cbb02 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -127,6 +127,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a745a2a..481350a 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -80,6 +80,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -91,10 +92,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index f403541..b10df68 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index 72812a1..335f4f3 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -77,9 +77,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 }
2125
2126 #define init_thread_info (init_thread_union.thread_info)
2127@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index ce0786e..a80c264 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a, b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x, p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x, p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check((x), (p)); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x, p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x, p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check((x), (p)); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2260 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x, ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x), (ptr), __gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x, ptr, err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x), (ptr), err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x, ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x), (ptr), __pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x, ptr, err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x), (ptr), err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 672b219..4aa120a 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -48,6 +48,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -90,11 +171,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -479,7 +576,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -513,11 +612,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 0196327..50ac8895 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -444,7 +444,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index 2e11961..07f0704 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 69bda1a..755113a 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index 1d60beb..4aa25d5 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3058 * Register 0 and check for VMSAv7 or PMSAv7 */
3059 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 023ac90..0a69950 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 5560f74..1cc00ea 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1088,7 +1088,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3438index 318d127..9aab0d1 100644
3439--- a/arch/arm/mach-exynos/suspend.c
3440+++ b/arch/arm/mach-exynos/suspend.c
3441@@ -18,6 +18,7 @@
3442 #include <linux/syscore_ops.h>
3443 #include <linux/cpu_pm.h>
3444 #include <linux/io.h>
3445+#include <linux/irq.h>
3446 #include <linux/irqchip/arm-gic.h>
3447 #include <linux/err.h>
3448 #include <linux/regulator/machine.h>
3449@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3450 tmp |= pm_data->wake_disable_mask;
3451 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3452
3453- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3454- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3455+ pax_open_kernel();
3456+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3457+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3458+ pax_close_kernel();
3459
3460 register_syscore_ops(&exynos_pm_syscore_ops);
3461 suspend_set_ops(&exynos_suspend_ops);
3462diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3463index 0662087..004d163 100644
3464--- a/arch/arm/mach-keystone/keystone.c
3465+++ b/arch/arm/mach-keystone/keystone.c
3466@@ -27,7 +27,7 @@
3467
3468 #include "keystone.h"
3469
3470-static struct notifier_block platform_nb;
3471+static notifier_block_no_const platform_nb;
3472 static unsigned long keystone_dma_pfn_offset __read_mostly;
3473
3474 static int keystone_platform_notifier(struct notifier_block *nb,
3475diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3476index e46e9ea..9141c83 100644
3477--- a/arch/arm/mach-mvebu/coherency.c
3478+++ b/arch/arm/mach-mvebu/coherency.c
3479@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3480
3481 /*
3482 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3483- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3484+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3485 * is needed as a workaround for a deadlock issue between the PCIe
3486 * interface and the cache controller.
3487 */
3488@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3489 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3490
3491 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3492- mtype = MT_UNCACHED;
3493+ mtype = MT_UNCACHED_RW;
3494
3495 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3496 }
3497diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3498index b6443a4..20a0b74 100644
3499--- a/arch/arm/mach-omap2/board-n8x0.c
3500+++ b/arch/arm/mach-omap2/board-n8x0.c
3501@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3502 }
3503 #endif
3504
3505-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3506+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3507 .late_init = n8x0_menelaus_late_init,
3508 };
3509
3510diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3511index 79f49d9..70bf184 100644
3512--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3514@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3515 void (*resume)(void);
3516 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3517 void (*hotplug_restart)(void);
3518-};
3519+} __no_const;
3520
3521 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3522 static struct powerdomain *mpuss_pd;
3523@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3524 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3525 {}
3526
3527-struct cpu_pm_ops omap_pm_ops = {
3528+static struct cpu_pm_ops omap_pm_ops __read_only = {
3529 .finish_suspend = default_finish_suspend,
3530 .resume = dummy_cpu_resume,
3531 .scu_prepare = dummy_scu_prepare,
3532diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3533index 5305ec7..6d74045 100644
3534--- a/arch/arm/mach-omap2/omap-smp.c
3535+++ b/arch/arm/mach-omap2/omap-smp.c
3536@@ -19,6 +19,7 @@
3537 #include <linux/device.h>
3538 #include <linux/smp.h>
3539 #include <linux/io.h>
3540+#include <linux/irq.h>
3541 #include <linux/irqchip/arm-gic.h>
3542
3543 #include <asm/smp_scu.h>
3544diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3545index f961c46..4a453dc 100644
3546--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3547+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3548@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3549 return NOTIFY_OK;
3550 }
3551
3552-static struct notifier_block __refdata irq_hotplug_notifier = {
3553+static struct notifier_block irq_hotplug_notifier = {
3554 .notifier_call = irq_cpu_hotplug_notify,
3555 };
3556
3557diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3558index be9541e..821805f 100644
3559--- a/arch/arm/mach-omap2/omap_device.c
3560+++ b/arch/arm/mach-omap2/omap_device.c
3561@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3562 struct platform_device __init *omap_device_build(const char *pdev_name,
3563 int pdev_id,
3564 struct omap_hwmod *oh,
3565- void *pdata, int pdata_len)
3566+ const void *pdata, int pdata_len)
3567 {
3568 struct omap_hwmod *ohs[] = { oh };
3569
3570@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3571 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3572 int pdev_id,
3573 struct omap_hwmod **ohs,
3574- int oh_cnt, void *pdata,
3575+ int oh_cnt, const void *pdata,
3576 int pdata_len)
3577 {
3578 int ret = -ENOMEM;
3579diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3580index 78c02b3..c94109a 100644
3581--- a/arch/arm/mach-omap2/omap_device.h
3582+++ b/arch/arm/mach-omap2/omap_device.h
3583@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3584 /* Core code interface */
3585
3586 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3587- struct omap_hwmod *oh, void *pdata,
3588+ struct omap_hwmod *oh, const void *pdata,
3589 int pdata_len);
3590
3591 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3592 struct omap_hwmod **oh, int oh_cnt,
3593- void *pdata, int pdata_len);
3594+ const void *pdata, int pdata_len);
3595
3596 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3597 struct omap_hwmod **ohs, int oh_cnt);
3598diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3599index 355b089..2c9d7c3 100644
3600--- a/arch/arm/mach-omap2/omap_hwmod.c
3601+++ b/arch/arm/mach-omap2/omap_hwmod.c
3602@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3603 int (*init_clkdm)(struct omap_hwmod *oh);
3604 void (*update_context_lost)(struct omap_hwmod *oh);
3605 int (*get_context_lost)(struct omap_hwmod *oh);
3606-};
3607+} __no_const;
3608
3609 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3610-static struct omap_hwmod_soc_ops soc_ops;
3611+static struct omap_hwmod_soc_ops soc_ops __read_only;
3612
3613 /* omap_hwmod_list contains all registered struct omap_hwmods */
3614 static LIST_HEAD(omap_hwmod_list);
3615diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3616index 95fee54..cfa9cf1 100644
3617--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3618+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3619@@ -10,6 +10,7 @@
3620
3621 #include <linux/kernel.h>
3622 #include <linux/init.h>
3623+#include <asm/pgtable.h>
3624
3625 #include "powerdomain.h"
3626
3627@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3628
3629 void __init am43xx_powerdomains_init(void)
3630 {
3631- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_open_kernel();
3633+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_close_kernel();
3635 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3636 pwrdm_register_pwrdms(powerdomains_am43xx);
3637 pwrdm_complete_init();
3638diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3639index ff0a68c..b312aa0 100644
3640--- a/arch/arm/mach-omap2/wd_timer.c
3641+++ b/arch/arm/mach-omap2/wd_timer.c
3642@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3643 struct omap_hwmod *oh;
3644 char *oh_name = "wd_timer2";
3645 char *dev_name = "omap_wdt";
3646- struct omap_wd_timer_platform_data pdata;
3647+ static struct omap_wd_timer_platform_data pdata = {
3648+ .read_reset_sources = prm_read_reset_sources
3649+ };
3650
3651 if (!cpu_class_is_omap2() || of_have_populated_dt())
3652 return 0;
3653@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3654 return -EINVAL;
3655 }
3656
3657- pdata.read_reset_sources = prm_read_reset_sources;
3658-
3659 pdev = omap_device_build(dev_name, id, oh, &pdata,
3660 sizeof(struct omap_wd_timer_platform_data));
3661 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3662diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663index 4f25a7c..a81be85 100644
3664--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3665+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3666@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3667 bool entered_lp2 = false;
3668
3669 if (tegra_pending_sgi())
3670- ACCESS_ONCE(abort_flag) = true;
3671+ ACCESS_ONCE_RW(abort_flag) = true;
3672
3673 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3674
3675diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3676index ab95f53..4b977a7 100644
3677--- a/arch/arm/mach-tegra/irq.c
3678+++ b/arch/arm/mach-tegra/irq.c
3679@@ -20,6 +20,7 @@
3680 #include <linux/cpu_pm.h>
3681 #include <linux/interrupt.h>
3682 #include <linux/io.h>
3683+#include <linux/irq.h>
3684 #include <linux/irqchip/arm-gic.h>
3685 #include <linux/irq.h>
3686 #include <linux/kernel.h>
3687diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3688index 2cb587b..6ddfebf 100644
3689--- a/arch/arm/mach-ux500/pm.c
3690+++ b/arch/arm/mach-ux500/pm.c
3691@@ -10,6 +10,7 @@
3692 */
3693
3694 #include <linux/kernel.h>
3695+#include <linux/irq.h>
3696 #include <linux/irqchip/arm-gic.h>
3697 #include <linux/delay.h>
3698 #include <linux/io.h>
3699diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3700index 2dea8b5..6499da2 100644
3701--- a/arch/arm/mach-ux500/setup.h
3702+++ b/arch/arm/mach-ux500/setup.h
3703@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3704 .type = MT_DEVICE, \
3705 }
3706
3707-#define __MEM_DEV_DESC(x, sz) { \
3708- .virtual = IO_ADDRESS(x), \
3709- .pfn = __phys_to_pfn(x), \
3710- .length = sz, \
3711- .type = MT_MEMORY_RWX, \
3712-}
3713-
3714 extern struct smp_operations ux500_smp_ops;
3715 extern void ux500_cpu_die(unsigned int cpu);
3716
3717diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3718index 52d768f..5f93180 100644
3719--- a/arch/arm/mach-zynq/platsmp.c
3720+++ b/arch/arm/mach-zynq/platsmp.c
3721@@ -24,6 +24,7 @@
3722 #include <linux/io.h>
3723 #include <asm/cacheflush.h>
3724 #include <asm/smp_scu.h>
3725+#include <linux/irq.h>
3726 #include <linux/irqchip/arm-gic.h>
3727 #include "common.h"
3728
3729diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3730index 9b4f29e..bbf3bfa 100644
3731--- a/arch/arm/mm/Kconfig
3732+++ b/arch/arm/mm/Kconfig
3733@@ -446,6 +446,7 @@ config CPU_32v5
3734
3735 config CPU_32v6
3736 bool
3737+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3738 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3739
3740 config CPU_32v6K
3741@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3742
3743 config CPU_USE_DOMAINS
3744 bool
3745+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3746 help
3747 This option enables or disables the use of domain switching
3748 via the set_fs() function.
3749@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3750
3751 config KUSER_HELPERS
3752 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3753- depends on MMU
3754+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3755 default y
3756 help
3757 Warning: disabling this option may break user programs.
3758@@ -812,7 +814,7 @@ config KUSER_HELPERS
3759 See Documentation/arm/kernel_user_helpers.txt for details.
3760
3761 However, the fixed address nature of these helpers can be used
3762- by ROP (return orientated programming) authors when creating
3763+ by ROP (Return Oriented Programming) authors when creating
3764 exploits.
3765
3766 If all of the binaries and libraries which run on your platform
3767diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3768index 2c0c541..4585df9 100644
3769--- a/arch/arm/mm/alignment.c
3770+++ b/arch/arm/mm/alignment.c
3771@@ -216,10 +216,12 @@ union offset_union {
3772 #define __get16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v, a = addr; \
3775+ pax_open_userland(); \
3776 __get8_unaligned_check(ins,v,a,err); \
3777 val = v << ((BE) ? 8 : 0); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val |= v << ((BE) ? 0 : 8); \
3780+ pax_close_userland(); \
3781 if (err) \
3782 goto fault; \
3783 } while (0)
3784@@ -233,6 +235,7 @@ union offset_union {
3785 #define __get32_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 24 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792@@ -241,6 +244,7 @@ union offset_union {
3793 val |= v << ((BE) ? 8 : 16); \
3794 __get8_unaligned_check(ins,v,a,err); \
3795 val |= v << ((BE) ? 0 : 24); \
3796+ pax_close_userland(); \
3797 if (err) \
3798 goto fault; \
3799 } while (0)
3800@@ -254,6 +258,7 @@ union offset_union {
3801 #define __put16_unaligned_check(ins,val,addr) \
3802 do { \
3803 unsigned int err = 0, v = val, a = addr; \
3804+ pax_open_userland(); \
3805 __asm__( FIRST_BYTE_16 \
3806 ARM( "1: "ins" %1, [%2], #1\n" ) \
3807 THUMB( "1: "ins" %1, [%2]\n" ) \
3808@@ -273,6 +278,7 @@ union offset_union {
3809 " .popsection\n" \
3810 : "=r" (err), "=&r" (v), "=&r" (a) \
3811 : "0" (err), "1" (v), "2" (a)); \
3812+ pax_close_userland(); \
3813 if (err) \
3814 goto fault; \
3815 } while (0)
3816@@ -286,6 +292,7 @@ union offset_union {
3817 #define __put32_unaligned_check(ins,val,addr) \
3818 do { \
3819 unsigned int err = 0, v = val, a = addr; \
3820+ pax_open_userland(); \
3821 __asm__( FIRST_BYTE_32 \
3822 ARM( "1: "ins" %1, [%2], #1\n" ) \
3823 THUMB( "1: "ins" %1, [%2]\n" ) \
3824@@ -315,6 +322,7 @@ union offset_union {
3825 " .popsection\n" \
3826 : "=r" (err), "=&r" (v), "=&r" (a) \
3827 : "0" (err), "1" (v), "2" (a)); \
3828+ pax_close_userland(); \
3829 if (err) \
3830 goto fault; \
3831 } while (0)
3832diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3833index 8f15f70..d599a2b 100644
3834--- a/arch/arm/mm/cache-l2x0.c
3835+++ b/arch/arm/mm/cache-l2x0.c
3836@@ -43,7 +43,7 @@ struct l2c_init_data {
3837 void (*save)(void __iomem *);
3838 void (*configure)(void __iomem *);
3839 struct outer_cache_fns outer_cache;
3840-};
3841+} __do_const;
3842
3843 #define CACHE_LINE_SIZE 32
3844
3845diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3846index 845769e..4278fd7 100644
3847--- a/arch/arm/mm/context.c
3848+++ b/arch/arm/mm/context.c
3849@@ -43,7 +43,7 @@
3850 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3851
3852 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3853-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3854+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3855 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3856
3857 static DEFINE_PER_CPU(atomic64_t, active_asids);
3858@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3859 {
3860 static u32 cur_idx = 1;
3861 u64 asid = atomic64_read(&mm->context.id);
3862- u64 generation = atomic64_read(&asid_generation);
3863+ u64 generation = atomic64_read_unchecked(&asid_generation);
3864
3865 if (asid != 0) {
3866 /*
3867@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3868 */
3869 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3870 if (asid == NUM_USER_ASIDS) {
3871- generation = atomic64_add_return(ASID_FIRST_VERSION,
3872+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3873 &asid_generation);
3874 flush_context(cpu);
3875 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3876@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3877 cpu_set_reserved_ttbr0();
3878
3879 asid = atomic64_read(&mm->context.id);
3880- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3881+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3882 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3883 goto switch_mm_fastpath;
3884
3885 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3886 /* Check that our ASID belongs to the current generation. */
3887 asid = atomic64_read(&mm->context.id);
3888- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3889+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3890 asid = new_context(mm, cpu);
3891 atomic64_set(&mm->context.id, asid);
3892 }
3893diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3894index 6333d9c..fd09b46 100644
3895--- a/arch/arm/mm/fault.c
3896+++ b/arch/arm/mm/fault.c
3897@@ -25,6 +25,7 @@
3898 #include <asm/system_misc.h>
3899 #include <asm/system_info.h>
3900 #include <asm/tlbflush.h>
3901+#include <asm/sections.h>
3902
3903 #include "fault.h"
3904
3905@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3906 if (fixup_exception(regs))
3907 return;
3908
3909+#ifdef CONFIG_PAX_MEMORY_UDEREF
3910+ if (addr < TASK_SIZE) {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3917+ }
3918+#endif
3919+
3920+#ifdef CONFIG_PAX_KERNEXEC
3921+ if ((fsr & FSR_WRITE) &&
3922+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3923+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3924+ {
3925+ if (current->signal->curr_ip)
3926+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3927+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3928+ else
3929+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3931+ }
3932+#endif
3933+
3934 /*
3935 * No handler, we'll have to terminate things with extreme prejudice.
3936 */
3937@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3938 }
3939 #endif
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+ if (fsr & FSR_LNX_PF) {
3943+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3944+ do_group_exit(SIGKILL);
3945+ }
3946+#endif
3947+
3948 tsk->thread.address = addr;
3949 tsk->thread.error_code = fsr;
3950 tsk->thread.trap_no = 14;
3951@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3952 }
3953 #endif /* CONFIG_MMU */
3954
3955+#ifdef CONFIG_PAX_PAGEEXEC
3956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3957+{
3958+ long i;
3959+
3960+ printk(KERN_ERR "PAX: bytes at PC: ");
3961+ for (i = 0; i < 20; i++) {
3962+ unsigned char c;
3963+ if (get_user(c, (__force unsigned char __user *)pc+i))
3964+ printk(KERN_CONT "?? ");
3965+ else
3966+ printk(KERN_CONT "%02x ", c);
3967+ }
3968+ printk("\n");
3969+
3970+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3971+ for (i = -1; i < 20; i++) {
3972+ unsigned long c;
3973+ if (get_user(c, (__force unsigned long __user *)sp+i))
3974+ printk(KERN_CONT "???????? ");
3975+ else
3976+ printk(KERN_CONT "%08lx ", c);
3977+ }
3978+ printk("\n");
3979+}
3980+#endif
3981+
3982 /*
3983 * First Level Translation Fault Handler
3984 *
3985@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3986 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3987 struct siginfo info;
3988
3989+#ifdef CONFIG_PAX_MEMORY_UDEREF
3990+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3991+ if (current->signal->curr_ip)
3992+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3993+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3994+ else
3995+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3997+ goto die;
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, fsr, addr);
4007 show_pte(current->mm, addr);
4008@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4009 ifsr_info[nr].name = name;
4010 }
4011
4012+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4013+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4014+
4015 asmlinkage void __exception
4016 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4017 {
4018 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4019 struct siginfo info;
4020+ unsigned long pc = instruction_pointer(regs);
4021+
4022+ if (user_mode(regs)) {
4023+ unsigned long sigpage = current->mm->context.sigpage;
4024+
4025+ if (sigpage <= pc && pc < sigpage + 7*4) {
4026+ if (pc < sigpage + 3*4)
4027+ sys_sigreturn(regs);
4028+ else
4029+ sys_rt_sigreturn(regs);
4030+ return;
4031+ }
4032+ if (pc == 0xffff0f60UL) {
4033+ /*
4034+ * PaX: __kuser_cmpxchg64 emulation
4035+ */
4036+ // TODO
4037+ //regs->ARM_pc = regs->ARM_lr;
4038+ //return;
4039+ }
4040+ if (pc == 0xffff0fa0UL) {
4041+ /*
4042+ * PaX: __kuser_memory_barrier emulation
4043+ */
4044+ // dmb(); implied by the exception
4045+ regs->ARM_pc = regs->ARM_lr;
4046+ return;
4047+ }
4048+ if (pc == 0xffff0fc0UL) {
4049+ /*
4050+ * PaX: __kuser_cmpxchg emulation
4051+ */
4052+ // TODO
4053+ //long new;
4054+ //int op;
4055+
4056+ //op = FUTEX_OP_SET << 28;
4057+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4058+ //regs->ARM_r0 = old != new;
4059+ //regs->ARM_pc = regs->ARM_lr;
4060+ //return;
4061+ }
4062+ if (pc == 0xffff0fe0UL) {
4063+ /*
4064+ * PaX: __kuser_get_tls emulation
4065+ */
4066+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4067+ regs->ARM_pc = regs->ARM_lr;
4068+ return;
4069+ }
4070+ }
4071+
4072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4073+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4077+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4078+ else
4079+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4080+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4081+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4082+ goto die;
4083+ }
4084+#endif
4085+
4086+#ifdef CONFIG_PAX_REFCOUNT
4087+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4088+#ifdef CONFIG_THUMB2_KERNEL
4089+ unsigned short bkpt;
4090+
4091+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4092+#else
4093+ unsigned int bkpt;
4094+
4095+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4096+#endif
4097+ current->thread.error_code = ifsr;
4098+ current->thread.trap_no = 0;
4099+ pax_report_refcount_overflow(regs);
4100+ fixup_exception(regs);
4101+ return;
4102+ }
4103+ }
4104+#endif
4105
4106 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4107 return;
4108
4109+die:
4110 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4111 inf->name, ifsr, addr);
4112
4113diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4114index cf08bdf..772656c 100644
4115--- a/arch/arm/mm/fault.h
4116+++ b/arch/arm/mm/fault.h
4117@@ -3,6 +3,7 @@
4118
4119 /*
4120 * Fault status register encodings. We steal bit 31 for our own purposes.
4121+ * Set when the FSR value is from an instruction fault.
4122 */
4123 #define FSR_LNX_PF (1 << 31)
4124 #define FSR_WRITE (1 << 11)
4125@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4126 }
4127 #endif
4128
4129+/* valid for LPAE and !LPAE */
4130+static inline int is_xn_fault(unsigned int fsr)
4131+{
4132+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4133+}
4134+
4135+static inline int is_domain_fault(unsigned int fsr)
4136+{
4137+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4138+}
4139+
4140 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4141 unsigned long search_exception_table(unsigned long addr);
4142
4143diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4144index 1609b02..def0785 100644
4145--- a/arch/arm/mm/init.c
4146+++ b/arch/arm/mm/init.c
4147@@ -755,7 +755,46 @@ void free_tcmmem(void)
4148 {
4149 #ifdef CONFIG_HAVE_TCM
4150 extern char __tcm_start, __tcm_end;
4151+#endif
4152
4153+#ifdef CONFIG_PAX_KERNEXEC
4154+ unsigned long addr;
4155+ pgd_t *pgd;
4156+ pud_t *pud;
4157+ pmd_t *pmd;
4158+ int cpu_arch = cpu_architecture();
4159+ unsigned int cr = get_cr();
4160+
4161+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4162+ /* make pages tables, etc before .text NX */
4163+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4164+ pgd = pgd_offset_k(addr);
4165+ pud = pud_offset(pgd, addr);
4166+ pmd = pmd_offset(pud, addr);
4167+ __section_update(pmd, addr, PMD_SECT_XN);
4168+ }
4169+ /* make init NX */
4170+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4171+ pgd = pgd_offset_k(addr);
4172+ pud = pud_offset(pgd, addr);
4173+ pmd = pmd_offset(pud, addr);
4174+ __section_update(pmd, addr, PMD_SECT_XN);
4175+ }
4176+ /* make kernel code/rodata RX */
4177+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4178+ pgd = pgd_offset_k(addr);
4179+ pud = pud_offset(pgd, addr);
4180+ pmd = pmd_offset(pud, addr);
4181+#ifdef CONFIG_ARM_LPAE
4182+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4183+#else
4184+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4185+#endif
4186+ }
4187+ }
4188+#endif
4189+
4190+#ifdef CONFIG_HAVE_TCM
4191 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4192 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4193 #endif
4194diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4195index d1e5ad7..84dcbf2 100644
4196--- a/arch/arm/mm/ioremap.c
4197+++ b/arch/arm/mm/ioremap.c
4198@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4199 unsigned int mtype;
4200
4201 if (cached)
4202- mtype = MT_MEMORY_RWX;
4203+ mtype = MT_MEMORY_RX;
4204 else
4205- mtype = MT_MEMORY_RWX_NONCACHED;
4206+ mtype = MT_MEMORY_RX_NONCACHED;
4207
4208 return __arm_ioremap_caller(phys_addr, size, mtype,
4209 __builtin_return_address(0));
4210diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4211index 5e85ed3..b10a7ed 100644
4212--- a/arch/arm/mm/mmap.c
4213+++ b/arch/arm/mm/mmap.c
4214@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4215 struct vm_area_struct *vma;
4216 int do_align = 0;
4217 int aliasing = cache_is_vipt_aliasing();
4218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4219 struct vm_unmapped_area_info info;
4220
4221 /*
4222@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 if (len > TASK_SIZE)
4224 return -ENOMEM;
4225
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4228+#endif
4229+
4230 if (addr) {
4231 if (do_align)
4232 addr = COLOUR_ALIGN(addr, pgoff);
4233@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4234 addr = PAGE_ALIGN(addr);
4235
4236 vma = find_vma(mm, addr);
4237- if (TASK_SIZE - len >= addr &&
4238- (!vma || addr + len <= vma->vm_start))
4239+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4240 return addr;
4241 }
4242
4243@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 info.high_limit = TASK_SIZE;
4245 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4246 info.align_offset = pgoff << PAGE_SHIFT;
4247+ info.threadstack_offset = offset;
4248 return vm_unmapped_area(&info);
4249 }
4250
4251@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4252 unsigned long addr = addr0;
4253 int do_align = 0;
4254 int aliasing = cache_is_vipt_aliasing();
4255+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4256 struct vm_unmapped_area_info info;
4257
4258 /*
4259@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 return addr;
4261 }
4262
4263+#ifdef CONFIG_PAX_RANDMMAP
4264+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4265+#endif
4266+
4267 /* requesting a specific address */
4268 if (addr) {
4269 if (do_align)
4270@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 else
4272 addr = PAGE_ALIGN(addr);
4273 vma = find_vma(mm, addr);
4274- if (TASK_SIZE - len >= addr &&
4275- (!vma || addr + len <= vma->vm_start))
4276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4277 return addr;
4278 }
4279
4280@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 info.high_limit = mm->mmap_base;
4282 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4283 info.align_offset = pgoff << PAGE_SHIFT;
4284+ info.threadstack_offset = offset;
4285 addr = vm_unmapped_area(&info);
4286
4287 /*
4288@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289 {
4290 unsigned long random_factor = 0UL;
4291
4292+#ifdef CONFIG_PAX_RANDMMAP
4293+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4294+#endif
4295+
4296 /* 8 bits of randomness in 20 address space bits */
4297 if ((current->flags & PF_RANDOMIZE) &&
4298 !(current->personality & ADDR_NO_RANDOMIZE))
4299@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4300
4301 if (mmap_is_legacy()) {
4302 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4303+
4304+#ifdef CONFIG_PAX_RANDMMAP
4305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4306+ mm->mmap_base += mm->delta_mmap;
4307+#endif
4308+
4309 mm->get_unmapped_area = arch_get_unmapped_area;
4310 } else {
4311 mm->mmap_base = mmap_base(random_factor);
4312+
4313+#ifdef CONFIG_PAX_RANDMMAP
4314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4315+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4316+#endif
4317+
4318 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4319 }
4320 }
4321diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4322index 4e6ef89..21c27f2 100644
4323--- a/arch/arm/mm/mmu.c
4324+++ b/arch/arm/mm/mmu.c
4325@@ -41,6 +41,22 @@
4326 #include "mm.h"
4327 #include "tcm.h"
4328
4329+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4330+void modify_domain(unsigned int dom, unsigned int type)
4331+{
4332+ struct thread_info *thread = current_thread_info();
4333+ unsigned int domain = thread->cpu_domain;
4334+ /*
4335+ * DOMAIN_MANAGER might be defined to some other value,
4336+ * use the arch-defined constant
4337+ */
4338+ domain &= ~domain_val(dom, 3);
4339+ thread->cpu_domain = domain | domain_val(dom, type);
4340+ set_domain(thread->cpu_domain);
4341+}
4342+EXPORT_SYMBOL(modify_domain);
4343+#endif
4344+
4345 /*
4346 * empty_zero_page is a special page that is used for
4347 * zero-initialized data and COW.
4348@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4349 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4350 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4351
4352-static struct mem_type mem_types[] = {
4353+#ifdef CONFIG_PAX_KERNEXEC
4354+#define L_PTE_KERNEXEC L_PTE_RDONLY
4355+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4356+#else
4357+#define L_PTE_KERNEXEC L_PTE_DIRTY
4358+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4359+#endif
4360+
4361+static struct mem_type mem_types[] __read_only = {
4362 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4363 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4364 L_PTE_SHARED,
4365@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4366 .prot_sect = PROT_SECT_DEVICE,
4367 .domain = DOMAIN_IO,
4368 },
4369- [MT_UNCACHED] = {
4370+ [MT_UNCACHED_RW] = {
4371 .prot_pte = PROT_PTE_DEVICE,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4374 .domain = DOMAIN_IO,
4375 },
4376- [MT_CACHECLEAN] = {
4377- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4378+ [MT_CACHECLEAN_RO] = {
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382 #ifndef CONFIG_ARM_LPAE
4383- [MT_MINICLEAN] = {
4384- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4385+ [MT_MINICLEAN_RO] = {
4386+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4387 .domain = DOMAIN_KERNEL,
4388 },
4389 #endif
4390@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4391 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4392 L_PTE_RDONLY,
4393 .prot_l1 = PMD_TYPE_TABLE,
4394- .domain = DOMAIN_USER,
4395+ .domain = DOMAIN_VECTORS,
4396 },
4397 [MT_HIGH_VECTORS] = {
4398 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4399 L_PTE_USER | L_PTE_RDONLY,
4400 .prot_l1 = PMD_TYPE_TABLE,
4401- .domain = DOMAIN_USER,
4402+ .domain = DOMAIN_VECTORS,
4403 },
4404- [MT_MEMORY_RWX] = {
4405+ [__MT_MEMORY_RWX] = {
4406 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4407 .prot_l1 = PMD_TYPE_TABLE,
4408 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4409@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411 .domain = DOMAIN_KERNEL,
4412 },
4413- [MT_ROM] = {
4414- .prot_sect = PMD_TYPE_SECT,
4415+ [MT_MEMORY_RX] = {
4416+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4417+ .prot_l1 = PMD_TYPE_TABLE,
4418+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4419+ .domain = DOMAIN_KERNEL,
4420+ },
4421+ [MT_ROM_RX] = {
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425- [MT_MEMORY_RWX_NONCACHED] = {
4426+ [MT_MEMORY_RW_NONCACHED] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428 L_PTE_MT_BUFFERABLE,
4429 .prot_l1 = PMD_TYPE_TABLE,
4430 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4431 .domain = DOMAIN_KERNEL,
4432 },
4433+ [MT_MEMORY_RX_NONCACHED] = {
4434+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4435+ L_PTE_MT_BUFFERABLE,
4436+ .prot_l1 = PMD_TYPE_TABLE,
4437+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438+ .domain = DOMAIN_KERNEL,
4439+ },
4440 [MT_MEMORY_RW_DTCM] = {
4441 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4442 L_PTE_XN,
4443@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4444 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4445 .domain = DOMAIN_KERNEL,
4446 },
4447- [MT_MEMORY_RWX_ITCM] = {
4448- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4449+ [MT_MEMORY_RX_ITCM] = {
4450+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4451 .prot_l1 = PMD_TYPE_TABLE,
4452+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4453 .domain = DOMAIN_KERNEL,
4454 },
4455 [MT_MEMORY_RW_SO] = {
4456@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4457 * Mark cache clean areas and XIP ROM read only
4458 * from SVC mode and no access from userspace.
4459 */
4460- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4461- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4462- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464+#ifdef CONFIG_PAX_KERNEXEC
4465+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4467+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+#endif
4469+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 #endif
4472
4473 /*
4474@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4475 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4476 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4477 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4478- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4479- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4480+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4483 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4484+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4485+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4493 }
4494 }
4495
4496@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4497 if (cpu_arch >= CPU_ARCH_ARMv6) {
4498 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4499 /* Non-cacheable Normal is XCB = 001 */
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4501+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4502+ PMD_SECT_BUFFERED;
4503+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4504 PMD_SECT_BUFFERED;
4505 } else {
4506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4507- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4508+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4509+ PMD_SECT_TEX(1);
4510+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4511 PMD_SECT_TEX(1);
4512 }
4513 } else {
4514- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4515+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517 }
4518
4519 #ifdef CONFIG_ARM_LPAE
4520@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4521 user_pgprot |= PTE_EXT_PXN;
4522 #endif
4523
4524+ user_pgprot |= __supported_pte_mask;
4525+
4526 for (i = 0; i < 16; i++) {
4527 pteval_t v = pgprot_val(protection_map[i]);
4528 protection_map[i] = __pgprot(v | user_pgprot);
4529@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4530
4531 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4532 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4533- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4534- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4535+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4538 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4539+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4540+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4542- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4543- mem_types[MT_ROM].prot_sect |= cp->pmd;
4544+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4545+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4547
4548 switch (cp->pmd) {
4549 case PMD_SECT_WT:
4550- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4551+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4552 break;
4553 case PMD_SECT_WB:
4554 case PMD_SECT_WBWA:
4555- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4556+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4557 break;
4558 }
4559 pr_info("Memory policy: %sData cache %s\n",
4560@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4561 return;
4562 }
4563
4564- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4565+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4566 md->virtual >= PAGE_OFFSET &&
4567 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4568 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4569@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4570 * called function. This means you can't use any function or debugging
4571 * method which may touch any device, otherwise the kernel _will_ crash.
4572 */
4573+
4574+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4575+
4576 static void __init devicemaps_init(const struct machine_desc *mdesc)
4577 {
4578 struct map_desc map;
4579 unsigned long addr;
4580- void *vectors;
4581
4582- /*
4583- * Allocate the vector page early.
4584- */
4585- vectors = early_alloc(PAGE_SIZE * 2);
4586-
4587- early_trap_init(vectors);
4588+ early_trap_init(&vectors);
4589
4590 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4591 pmd_clear(pmd_off_k(addr));
4592@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4593 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4594 map.virtual = MODULES_VADDR;
4595 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4596- map.type = MT_ROM;
4597+ map.type = MT_ROM_RX;
4598 create_mapping(&map);
4599 #endif
4600
4601@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4602 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4603 map.virtual = FLUSH_BASE;
4604 map.length = SZ_1M;
4605- map.type = MT_CACHECLEAN;
4606+ map.type = MT_CACHECLEAN_RO;
4607 create_mapping(&map);
4608 #endif
4609 #ifdef FLUSH_BASE_MINICACHE
4610 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4611 map.virtual = FLUSH_BASE_MINICACHE;
4612 map.length = SZ_1M;
4613- map.type = MT_MINICLEAN;
4614+ map.type = MT_MINICLEAN_RO;
4615 create_mapping(&map);
4616 #endif
4617
4618@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4619 * location (0xffff0000). If we aren't using high-vectors, also
4620 * create a mapping at the low-vectors virtual address.
4621 */
4622- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4623+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4624 map.virtual = 0xffff0000;
4625 map.length = PAGE_SIZE;
4626 #ifdef CONFIG_KUSER_HELPERS
4627@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4628 static void __init map_lowmem(void)
4629 {
4630 struct memblock_region *reg;
4631+#ifndef CONFIG_PAX_KERNEXEC
4632 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4633 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4634+#endif
4635
4636 /* Map all the lowmem memory banks. */
4637 for_each_memblock(memory, reg) {
4638@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4639 if (start >= end)
4640 break;
4641
4642+#ifdef CONFIG_PAX_KERNEXEC
4643+ map.pfn = __phys_to_pfn(start);
4644+ map.virtual = __phys_to_virt(start);
4645+ map.length = end - start;
4646+
4647+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4648+ struct map_desc kernel;
4649+ struct map_desc initmap;
4650+
4651+ /* when freeing initmem we will make this RW */
4652+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4653+ initmap.virtual = (unsigned long)__init_begin;
4654+ initmap.length = _sdata - __init_begin;
4655+ initmap.type = __MT_MEMORY_RWX;
4656+ create_mapping(&initmap);
4657+
4658+ /* when freeing initmem we will make this RX */
4659+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4660+ kernel.virtual = (unsigned long)_stext;
4661+ kernel.length = __init_begin - _stext;
4662+ kernel.type = __MT_MEMORY_RWX;
4663+ create_mapping(&kernel);
4664+
4665+ if (map.virtual < (unsigned long)_stext) {
4666+ map.length = (unsigned long)_stext - map.virtual;
4667+ map.type = __MT_MEMORY_RWX;
4668+ create_mapping(&map);
4669+ }
4670+
4671+ map.pfn = __phys_to_pfn(__pa(_sdata));
4672+ map.virtual = (unsigned long)_sdata;
4673+ map.length = end - __pa(_sdata);
4674+ }
4675+
4676+ map.type = MT_MEMORY_RW;
4677+ create_mapping(&map);
4678+#else
4679 if (end < kernel_x_start) {
4680 map.pfn = __phys_to_pfn(start);
4681 map.virtual = __phys_to_virt(start);
4682 map.length = end - start;
4683- map.type = MT_MEMORY_RWX;
4684+ map.type = __MT_MEMORY_RWX;
4685
4686 create_mapping(&map);
4687 } else if (start >= kernel_x_end) {
4688@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4689 map.pfn = __phys_to_pfn(kernel_x_start);
4690 map.virtual = __phys_to_virt(kernel_x_start);
4691 map.length = kernel_x_end - kernel_x_start;
4692- map.type = MT_MEMORY_RWX;
4693+ map.type = __MT_MEMORY_RWX;
4694
4695 create_mapping(&map);
4696
4697@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4698 create_mapping(&map);
4699 }
4700 }
4701+#endif
4702 }
4703 }
4704
4705diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4706index e1268f9..a9755a7 100644
4707--- a/arch/arm/net/bpf_jit_32.c
4708+++ b/arch/arm/net/bpf_jit_32.c
4709@@ -20,6 +20,7 @@
4710 #include <asm/cacheflush.h>
4711 #include <asm/hwcap.h>
4712 #include <asm/opcodes.h>
4713+#include <asm/pgtable.h>
4714
4715 #include "bpf_jit_32.h"
4716
4717@@ -71,7 +72,11 @@ struct jit_ctx {
4718 #endif
4719 };
4720
4721+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4722+int bpf_jit_enable __read_only;
4723+#else
4724 int bpf_jit_enable __read_mostly;
4725+#endif
4726
4727 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4728 {
4729@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4730 {
4731 u32 *ptr;
4732 /* We are guaranteed to have aligned memory. */
4733+ pax_open_kernel();
4734 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4735 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4736+ pax_close_kernel();
4737 }
4738
4739 static void build_prologue(struct jit_ctx *ctx)
4740diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4741index 5b217f4..c23f40e 100644
4742--- a/arch/arm/plat-iop/setup.c
4743+++ b/arch/arm/plat-iop/setup.c
4744@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4745 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4746 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4747 .length = IOP3XX_PERIPHERAL_SIZE,
4748- .type = MT_UNCACHED,
4749+ .type = MT_UNCACHED_RW,
4750 },
4751 };
4752
4753diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4754index a5bc92d..0bb4730 100644
4755--- a/arch/arm/plat-omap/sram.c
4756+++ b/arch/arm/plat-omap/sram.c
4757@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4758 * Looks like we need to preserve some bootloader code at the
4759 * beginning of SRAM for jumping to flash for reboot to work...
4760 */
4761+ pax_open_kernel();
4762 memset_io(omap_sram_base + omap_sram_skip, 0,
4763 omap_sram_size - omap_sram_skip);
4764+ pax_close_kernel();
4765 }
4766diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4767index 7047051..44e8675 100644
4768--- a/arch/arm64/include/asm/atomic.h
4769+++ b/arch/arm64/include/asm/atomic.h
4770@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 #endif
4785 #endif
4786diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4787index a5abb00..9cbca9a 100644
4788--- a/arch/arm64/include/asm/barrier.h
4789+++ b/arch/arm64/include/asm/barrier.h
4790@@ -44,7 +44,7 @@
4791 do { \
4792 compiletime_assert_atomic_type(*p); \
4793 barrier(); \
4794- ACCESS_ONCE(*p) = (v); \
4795+ ACCESS_ONCE_RW(*p) = (v); \
4796 } while (0)
4797
4798 #define smp_load_acquire(p) \
4799diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4800index 4fde8c1..441f84f 100644
4801--- a/arch/arm64/include/asm/percpu.h
4802+++ b/arch/arm64/include/asm/percpu.h
4803@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4804 {
4805 switch (size) {
4806 case 1:
4807- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4808+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4809 break;
4810 case 2:
4811- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4812+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4813 break;
4814 case 4:
4815- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4816+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4817 break;
4818 case 8:
4819- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4820+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4821 break;
4822 default:
4823 BUILD_BUG();
4824diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4825index e20df38..027ede3 100644
4826--- a/arch/arm64/include/asm/pgalloc.h
4827+++ b/arch/arm64/include/asm/pgalloc.h
4828@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4830 }
4831
4832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4833+{
4834+ pud_populate(mm, pud, pmd);
4835+}
4836+
4837 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4838
4839 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4840diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4841index 07e1ba44..ec8cbbb 100644
4842--- a/arch/arm64/include/asm/uaccess.h
4843+++ b/arch/arm64/include/asm/uaccess.h
4844@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4845 flag; \
4846 })
4847
4848+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4849 #define access_ok(type, addr, size) __range_ok(addr, size)
4850 #define user_addr_max get_fs
4851
4852diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4853index ef7d112..08cd35f 100644
4854--- a/arch/arm64/mm/dma-mapping.c
4855+++ b/arch/arm64/mm/dma-mapping.c
4856@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4857 phys_to_page(paddr),
4858 size >> PAGE_SHIFT);
4859 if (!freed)
4860- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4861+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4862 }
4863
4864 static void *__dma_alloc(struct device *dev, size_t size,
4865diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4866index c3a58a1..78fbf54 100644
4867--- a/arch/avr32/include/asm/cache.h
4868+++ b/arch/avr32/include/asm/cache.h
4869@@ -1,8 +1,10 @@
4870 #ifndef __ASM_AVR32_CACHE_H
4871 #define __ASM_AVR32_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define L1_CACHE_SHIFT 5
4876-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4877+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4878
4879 /*
4880 * Memory returned by kmalloc() may be used for DMA, so we must make
4881diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4882index d232888..87c8df1 100644
4883--- a/arch/avr32/include/asm/elf.h
4884+++ b/arch/avr32/include/asm/elf.h
4885@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4886 the loader. We need to make sure that it is out of the way of the program
4887 that it will "exec", and that there is sufficient room for the brk. */
4888
4889-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4890+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4891
4892+#ifdef CONFIG_PAX_ASLR
4893+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4894+
4895+#define PAX_DELTA_MMAP_LEN 15
4896+#define PAX_DELTA_STACK_LEN 15
4897+#endif
4898
4899 /* This yields a mask that user programs can use to figure out what
4900 instruction set this CPU supports. This could be done in user space,
4901diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4902index 479330b..53717a8 100644
4903--- a/arch/avr32/include/asm/kmap_types.h
4904+++ b/arch/avr32/include/asm/kmap_types.h
4905@@ -2,9 +2,9 @@
4906 #define __ASM_AVR32_KMAP_TYPES_H
4907
4908 #ifdef CONFIG_DEBUG_HIGHMEM
4909-# define KM_TYPE_NR 29
4910+# define KM_TYPE_NR 30
4911 #else
4912-# define KM_TYPE_NR 14
4913+# define KM_TYPE_NR 15
4914 #endif
4915
4916 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4917diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4918index d223a8b..69c5210 100644
4919--- a/arch/avr32/mm/fault.c
4920+++ b/arch/avr32/mm/fault.c
4921@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4922
4923 int exception_trace = 1;
4924
4925+#ifdef CONFIG_PAX_PAGEEXEC
4926+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4927+{
4928+ unsigned long i;
4929+
4930+ printk(KERN_ERR "PAX: bytes at PC: ");
4931+ for (i = 0; i < 20; i++) {
4932+ unsigned char c;
4933+ if (get_user(c, (unsigned char *)pc+i))
4934+ printk(KERN_CONT "???????? ");
4935+ else
4936+ printk(KERN_CONT "%02x ", c);
4937+ }
4938+ printk("\n");
4939+}
4940+#endif
4941+
4942 /*
4943 * This routine handles page faults. It determines the address and the
4944 * problem, and then passes it off to one of the appropriate routines.
4945@@ -178,6 +195,16 @@ bad_area:
4946 up_read(&mm->mmap_sem);
4947
4948 if (user_mode(regs)) {
4949+
4950+#ifdef CONFIG_PAX_PAGEEXEC
4951+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4952+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4953+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4954+ do_group_exit(SIGKILL);
4955+ }
4956+ }
4957+#endif
4958+
4959 if (exception_trace && printk_ratelimit())
4960 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4961 "sp %08lx ecr %lu\n",
4962diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4963index 568885a..f8008df 100644
4964--- a/arch/blackfin/include/asm/cache.h
4965+++ b/arch/blackfin/include/asm/cache.h
4966@@ -7,6 +7,7 @@
4967 #ifndef __ARCH_BLACKFIN_CACHE_H
4968 #define __ARCH_BLACKFIN_CACHE_H
4969
4970+#include <linux/const.h>
4971 #include <linux/linkage.h> /* for asmlinkage */
4972
4973 /*
4974@@ -14,7 +15,7 @@
4975 * Blackfin loads 32 bytes for cache
4976 */
4977 #define L1_CACHE_SHIFT 5
4978-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4979+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4980 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4981
4982 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4983diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4984index aea2718..3639a60 100644
4985--- a/arch/cris/include/arch-v10/arch/cache.h
4986+++ b/arch/cris/include/arch-v10/arch/cache.h
4987@@ -1,8 +1,9 @@
4988 #ifndef _ASM_ARCH_CACHE_H
4989 #define _ASM_ARCH_CACHE_H
4990
4991+#include <linux/const.h>
4992 /* Etrax 100LX have 32-byte cache-lines. */
4993-#define L1_CACHE_BYTES 32
4994 #define L1_CACHE_SHIFT 5
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #endif /* _ASM_ARCH_CACHE_H */
4998diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4999index 7caf25d..ee65ac5 100644
5000--- a/arch/cris/include/arch-v32/arch/cache.h
5001+++ b/arch/cris/include/arch-v32/arch/cache.h
5002@@ -1,11 +1,12 @@
5003 #ifndef _ASM_CRIS_ARCH_CACHE_H
5004 #define _ASM_CRIS_ARCH_CACHE_H
5005
5006+#include <linux/const.h>
5007 #include <arch/hwregs/dma.h>
5008
5009 /* A cache-line is 32 bytes. */
5010-#define L1_CACHE_BYTES 32
5011 #define L1_CACHE_SHIFT 5
5012+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5013
5014 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5015
5016diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5017index 102190a..5334cea 100644
5018--- a/arch/frv/include/asm/atomic.h
5019+++ b/arch/frv/include/asm/atomic.h
5020@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5021 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5022 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5023
5024+#define atomic64_read_unchecked(v) atomic64_read(v)
5025+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5026+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5027+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5028+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5029+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5030+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5031+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5032+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5033+
5034 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5035 {
5036 int c, old;
5037diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5038index 2797163..c2a401df9 100644
5039--- a/arch/frv/include/asm/cache.h
5040+++ b/arch/frv/include/asm/cache.h
5041@@ -12,10 +12,11 @@
5042 #ifndef __ASM_CACHE_H
5043 #define __ASM_CACHE_H
5044
5045+#include <linux/const.h>
5046
5047 /* bytes per L1 cache line */
5048 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5051
5052 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5053 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5054diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5055index 43901f2..0d8b865 100644
5056--- a/arch/frv/include/asm/kmap_types.h
5057+++ b/arch/frv/include/asm/kmap_types.h
5058@@ -2,6 +2,6 @@
5059 #ifndef _ASM_KMAP_TYPES_H
5060 #define _ASM_KMAP_TYPES_H
5061
5062-#define KM_TYPE_NR 17
5063+#define KM_TYPE_NR 18
5064
5065 #endif
5066diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5067index 836f147..4cf23f5 100644
5068--- a/arch/frv/mm/elf-fdpic.c
5069+++ b/arch/frv/mm/elf-fdpic.c
5070@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5071 {
5072 struct vm_area_struct *vma;
5073 struct vm_unmapped_area_info info;
5074+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5075
5076 if (len > TASK_SIZE)
5077 return -ENOMEM;
5078@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5079 if (addr) {
5080 addr = PAGE_ALIGN(addr);
5081 vma = find_vma(current->mm, addr);
5082- if (TASK_SIZE - len >= addr &&
5083- (!vma || addr + len <= vma->vm_start))
5084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5085 goto success;
5086 }
5087
5088@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5089 info.high_limit = (current->mm->start_stack - 0x00200000);
5090 info.align_mask = 0;
5091 info.align_offset = 0;
5092+ info.threadstack_offset = offset;
5093 addr = vm_unmapped_area(&info);
5094 if (!(addr & ~PAGE_MASK))
5095 goto success;
5096diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5097index 69952c1..4fa2908 100644
5098--- a/arch/hexagon/include/asm/cache.h
5099+++ b/arch/hexagon/include/asm/cache.h
5100@@ -21,9 +21,11 @@
5101 #ifndef __ASM_CACHE_H
5102 #define __ASM_CACHE_H
5103
5104+#include <linux/const.h>
5105+
5106 /* Bytes per L1 cache line */
5107-#define L1_CACHE_SHIFT (5)
5108-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5109+#define L1_CACHE_SHIFT 5
5110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5111
5112 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5113
5114diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5115index 074e52b..76afdac 100644
5116--- a/arch/ia64/Kconfig
5117+++ b/arch/ia64/Kconfig
5118@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5119 config KEXEC
5120 bool "kexec system call"
5121 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5122+ depends on !GRKERNSEC_KMEM
5123 help
5124 kexec is a system call that implements the ability to shutdown your
5125 current kernel, and to start another kernel. It is like a reboot
5126diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5127index 970d0bd..e750b9b 100644
5128--- a/arch/ia64/Makefile
5129+++ b/arch/ia64/Makefile
5130@@ -98,5 +98,6 @@ endef
5131 archprepare: make_nr_irqs_h FORCE
5132 PHONY += make_nr_irqs_h FORCE
5133
5134+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5135 make_nr_irqs_h: FORCE
5136 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5137diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5138index 0bf0350..2ad1957 100644
5139--- a/arch/ia64/include/asm/atomic.h
5140+++ b/arch/ia64/include/asm/atomic.h
5141@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5142 #define atomic64_inc(v) atomic64_add(1, (v))
5143 #define atomic64_dec(v) atomic64_sub(1, (v))
5144
5145+#define atomic64_read_unchecked(v) atomic64_read(v)
5146+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5147+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5148+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5149+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5150+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5151+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5152+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5153+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5154+
5155 #endif /* _ASM_IA64_ATOMIC_H */
5156diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5157index f6769eb..1cdb590 100644
5158--- a/arch/ia64/include/asm/barrier.h
5159+++ b/arch/ia64/include/asm/barrier.h
5160@@ -66,7 +66,7 @@
5161 do { \
5162 compiletime_assert_atomic_type(*p); \
5163 barrier(); \
5164- ACCESS_ONCE(*p) = (v); \
5165+ ACCESS_ONCE_RW(*p) = (v); \
5166 } while (0)
5167
5168 #define smp_load_acquire(p) \
5169diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5170index 988254a..e1ee885 100644
5171--- a/arch/ia64/include/asm/cache.h
5172+++ b/arch/ia64/include/asm/cache.h
5173@@ -1,6 +1,7 @@
5174 #ifndef _ASM_IA64_CACHE_H
5175 #define _ASM_IA64_CACHE_H
5176
5177+#include <linux/const.h>
5178
5179 /*
5180 * Copyright (C) 1998-2000 Hewlett-Packard Co
5181@@ -9,7 +10,7 @@
5182
5183 /* Bytes per L1 (data) cache line. */
5184 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5187
5188 #ifdef CONFIG_SMP
5189 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5190diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5191index 5a83c5c..4d7f553 100644
5192--- a/arch/ia64/include/asm/elf.h
5193+++ b/arch/ia64/include/asm/elf.h
5194@@ -42,6 +42,13 @@
5195 */
5196 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5197
5198+#ifdef CONFIG_PAX_ASLR
5199+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5200+
5201+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5202+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5203+#endif
5204+
5205 #define PT_IA_64_UNWIND 0x70000001
5206
5207 /* IA-64 relocations: */
5208diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5209index 5767cdf..7462574 100644
5210--- a/arch/ia64/include/asm/pgalloc.h
5211+++ b/arch/ia64/include/asm/pgalloc.h
5212@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5213 pgd_val(*pgd_entry) = __pa(pud);
5214 }
5215
5216+static inline void
5217+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5218+{
5219+ pgd_populate(mm, pgd_entry, pud);
5220+}
5221+
5222 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5223 {
5224 return quicklist_alloc(0, GFP_KERNEL, NULL);
5225@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5226 pud_val(*pud_entry) = __pa(pmd);
5227 }
5228
5229+static inline void
5230+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5231+{
5232+ pud_populate(mm, pud_entry, pmd);
5233+}
5234+
5235 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5236 {
5237 return quicklist_alloc(0, GFP_KERNEL, NULL);
5238diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5239index 7b6f880..ac8e008 100644
5240--- a/arch/ia64/include/asm/pgtable.h
5241+++ b/arch/ia64/include/asm/pgtable.h
5242@@ -12,7 +12,7 @@
5243 * David Mosberger-Tang <davidm@hpl.hp.com>
5244 */
5245
5246-
5247+#include <linux/const.h>
5248 #include <asm/mman.h>
5249 #include <asm/page.h>
5250 #include <asm/processor.h>
5251@@ -139,6 +139,17 @@
5252 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5253 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5254 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5258+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5259+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5260+#else
5261+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5262+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5263+# define PAGE_COPY_NOEXEC PAGE_COPY
5264+#endif
5265+
5266 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5267 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5268 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5269diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5270index 45698cd..e8e2dbc 100644
5271--- a/arch/ia64/include/asm/spinlock.h
5272+++ b/arch/ia64/include/asm/spinlock.h
5273@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5274 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5275
5276 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5277- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5278+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5279 }
5280
5281 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5282diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5283index 4f3fb6cc..254055e 100644
5284--- a/arch/ia64/include/asm/uaccess.h
5285+++ b/arch/ia64/include/asm/uaccess.h
5286@@ -70,6 +70,7 @@
5287 && ((segment).seg == KERNEL_DS.seg \
5288 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5289 })
5290+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5291 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5292
5293 /*
5294@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5295 static inline unsigned long
5296 __copy_to_user (void __user *to, const void *from, unsigned long count)
5297 {
5298+ if (count > INT_MAX)
5299+ return count;
5300+
5301+ if (!__builtin_constant_p(count))
5302+ check_object_size(from, count, true);
5303+
5304 return __copy_user(to, (__force void __user *) from, count);
5305 }
5306
5307 static inline unsigned long
5308 __copy_from_user (void *to, const void __user *from, unsigned long count)
5309 {
5310+ if (count > INT_MAX)
5311+ return count;
5312+
5313+ if (!__builtin_constant_p(count))
5314+ check_object_size(to, count, false);
5315+
5316 return __copy_user((__force void __user *) to, from, count);
5317 }
5318
5319@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5320 ({ \
5321 void __user *__cu_to = (to); \
5322 const void *__cu_from = (from); \
5323- long __cu_len = (n); \
5324+ unsigned long __cu_len = (n); \
5325 \
5326- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5327+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5328+ if (!__builtin_constant_p(n)) \
5329+ check_object_size(__cu_from, __cu_len, true); \
5330 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5331+ } \
5332 __cu_len; \
5333 })
5334
5335@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5336 ({ \
5337 void *__cu_to = (to); \
5338 const void __user *__cu_from = (from); \
5339- long __cu_len = (n); \
5340+ unsigned long __cu_len = (n); \
5341 \
5342 __chk_user_ptr(__cu_from); \
5343- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5344+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5345+ if (!__builtin_constant_p(n)) \
5346+ check_object_size(__cu_to, __cu_len, false); \
5347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5348+ } \
5349 __cu_len; \
5350 })
5351
5352diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5353index 29754aa..06d2838 100644
5354--- a/arch/ia64/kernel/module.c
5355+++ b/arch/ia64/kernel/module.c
5356@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5357 }
5358
5359 static inline int
5360+in_init_rx (const struct module *mod, uint64_t addr)
5361+{
5362+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5363+}
5364+
5365+static inline int
5366+in_init_rw (const struct module *mod, uint64_t addr)
5367+{
5368+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5369+}
5370+
5371+static inline int
5372 in_init (const struct module *mod, uint64_t addr)
5373 {
5374- return addr - (uint64_t) mod->module_init < mod->init_size;
5375+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5376+}
5377+
5378+static inline int
5379+in_core_rx (const struct module *mod, uint64_t addr)
5380+{
5381+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5382+}
5383+
5384+static inline int
5385+in_core_rw (const struct module *mod, uint64_t addr)
5386+{
5387+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5388 }
5389
5390 static inline int
5391 in_core (const struct module *mod, uint64_t addr)
5392 {
5393- return addr - (uint64_t) mod->module_core < mod->core_size;
5394+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5395 }
5396
5397 static inline int
5398@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5399 break;
5400
5401 case RV_BDREL:
5402- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5403+ if (in_init_rx(mod, val))
5404+ val -= (uint64_t) mod->module_init_rx;
5405+ else if (in_init_rw(mod, val))
5406+ val -= (uint64_t) mod->module_init_rw;
5407+ else if (in_core_rx(mod, val))
5408+ val -= (uint64_t) mod->module_core_rx;
5409+ else if (in_core_rw(mod, val))
5410+ val -= (uint64_t) mod->module_core_rw;
5411 break;
5412
5413 case RV_LTV:
5414@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5415 * addresses have been selected...
5416 */
5417 uint64_t gp;
5418- if (mod->core_size > MAX_LTOFF)
5419+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5420 /*
5421 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5422 * at the end of the module.
5423 */
5424- gp = mod->core_size - MAX_LTOFF / 2;
5425+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5426 else
5427- gp = mod->core_size / 2;
5428- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5429+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5430+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5431 mod->arch.gp = gp;
5432 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5433 }
5434diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5435index c39c3cd..3c77738 100644
5436--- a/arch/ia64/kernel/palinfo.c
5437+++ b/arch/ia64/kernel/palinfo.c
5438@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5439 return NOTIFY_OK;
5440 }
5441
5442-static struct notifier_block __refdata palinfo_cpu_notifier =
5443+static struct notifier_block palinfo_cpu_notifier =
5444 {
5445 .notifier_call = palinfo_cpu_callback,
5446 .priority = 0,
5447diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5448index 41e33f8..65180b2a 100644
5449--- a/arch/ia64/kernel/sys_ia64.c
5450+++ b/arch/ia64/kernel/sys_ia64.c
5451@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5452 unsigned long align_mask = 0;
5453 struct mm_struct *mm = current->mm;
5454 struct vm_unmapped_area_info info;
5455+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5456
5457 if (len > RGN_MAP_LIMIT)
5458 return -ENOMEM;
5459@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5460 if (REGION_NUMBER(addr) == RGN_HPAGE)
5461 addr = 0;
5462 #endif
5463+
5464+#ifdef CONFIG_PAX_RANDMMAP
5465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5466+ addr = mm->free_area_cache;
5467+ else
5468+#endif
5469+
5470 if (!addr)
5471 addr = TASK_UNMAPPED_BASE;
5472
5473@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5474 info.high_limit = TASK_SIZE;
5475 info.align_mask = align_mask;
5476 info.align_offset = 0;
5477+ info.threadstack_offset = offset;
5478 return vm_unmapped_area(&info);
5479 }
5480
5481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5482index 84f8a52..7c76178 100644
5483--- a/arch/ia64/kernel/vmlinux.lds.S
5484+++ b/arch/ia64/kernel/vmlinux.lds.S
5485@@ -192,7 +192,7 @@ SECTIONS {
5486 /* Per-cpu data: */
5487 . = ALIGN(PERCPU_PAGE_SIZE);
5488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5489- __phys_per_cpu_start = __per_cpu_load;
5490+ __phys_per_cpu_start = per_cpu_load;
5491 /*
5492 * ensure percpu data fits
5493 * into percpu page size
5494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5495index ba5ba7a..36e9d3a 100644
5496--- a/arch/ia64/mm/fault.c
5497+++ b/arch/ia64/mm/fault.c
5498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5499 return pte_present(pte);
5500 }
5501
5502+#ifdef CONFIG_PAX_PAGEEXEC
5503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5504+{
5505+ unsigned long i;
5506+
5507+ printk(KERN_ERR "PAX: bytes at PC: ");
5508+ for (i = 0; i < 8; i++) {
5509+ unsigned int c;
5510+ if (get_user(c, (unsigned int *)pc+i))
5511+ printk(KERN_CONT "???????? ");
5512+ else
5513+ printk(KERN_CONT "%08x ", c);
5514+ }
5515+ printk("\n");
5516+}
5517+#endif
5518+
5519 # define VM_READ_BIT 0
5520 # define VM_WRITE_BIT 1
5521 # define VM_EXEC_BIT 2
5522@@ -151,8 +168,21 @@ retry:
5523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5524 goto bad_area;
5525
5526- if ((vma->vm_flags & mask) != mask)
5527+ if ((vma->vm_flags & mask) != mask) {
5528+
5529+#ifdef CONFIG_PAX_PAGEEXEC
5530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5532+ goto bad_area;
5533+
5534+ up_read(&mm->mmap_sem);
5535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5536+ do_group_exit(SIGKILL);
5537+ }
5538+#endif
5539+
5540 goto bad_area;
5541+ }
5542
5543 /*
5544 * If for any reason at all we couldn't handle the fault, make
5545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5546index 52b7604b..455cb85 100644
5547--- a/arch/ia64/mm/hugetlbpage.c
5548+++ b/arch/ia64/mm/hugetlbpage.c
5549@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5550 unsigned long pgoff, unsigned long flags)
5551 {
5552 struct vm_unmapped_area_info info;
5553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5554
5555 if (len > RGN_MAP_LIMIT)
5556 return -ENOMEM;
5557@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5558 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5559 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5560 info.align_offset = 0;
5561+ info.threadstack_offset = offset;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5566index 6b33457..88b5124 100644
5567--- a/arch/ia64/mm/init.c
5568+++ b/arch/ia64/mm/init.c
5569@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5570 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5571 vma->vm_end = vma->vm_start + PAGE_SIZE;
5572 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5573+
5574+#ifdef CONFIG_PAX_PAGEEXEC
5575+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5576+ vma->vm_flags &= ~VM_EXEC;
5577+
5578+#ifdef CONFIG_PAX_MPROTECT
5579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5580+ vma->vm_flags &= ~VM_MAYEXEC;
5581+#endif
5582+
5583+ }
5584+#endif
5585+
5586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5587 down_write(&current->mm->mmap_sem);
5588 if (insert_vm_struct(current->mm, vma)) {
5589@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5590 gate_vma.vm_start = FIXADDR_USER_START;
5591 gate_vma.vm_end = FIXADDR_USER_END;
5592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5593- gate_vma.vm_page_prot = __P101;
5594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5595
5596 return 0;
5597 }
5598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5599index 40b3ee98..8c2c112 100644
5600--- a/arch/m32r/include/asm/cache.h
5601+++ b/arch/m32r/include/asm/cache.h
5602@@ -1,8 +1,10 @@
5603 #ifndef _ASM_M32R_CACHE_H
5604 #define _ASM_M32R_CACHE_H
5605
5606+#include <linux/const.h>
5607+
5608 /* L1 cache line size */
5609 #define L1_CACHE_SHIFT 4
5610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5612
5613 #endif /* _ASM_M32R_CACHE_H */
5614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5615index 82abd15..d95ae5d 100644
5616--- a/arch/m32r/lib/usercopy.c
5617+++ b/arch/m32r/lib/usercopy.c
5618@@ -14,6 +14,9 @@
5619 unsigned long
5620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5621 {
5622+ if ((long)n < 0)
5623+ return n;
5624+
5625 prefetch(from);
5626 if (access_ok(VERIFY_WRITE, to, n))
5627 __copy_user(to,from,n);
5628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5629 unsigned long
5630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5631 {
5632+ if ((long)n < 0)
5633+ return n;
5634+
5635 prefetchw(to);
5636 if (access_ok(VERIFY_READ, from, n))
5637 __copy_user_zeroing(to,from,n);
5638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5639index 0395c51..5f26031 100644
5640--- a/arch/m68k/include/asm/cache.h
5641+++ b/arch/m68k/include/asm/cache.h
5642@@ -4,9 +4,11 @@
5643 #ifndef __ARCH_M68K_CACHE_H
5644 #define __ARCH_M68K_CACHE_H
5645
5646+#include <linux/const.h>
5647+
5648 /* bytes per L1 cache line */
5649 #define L1_CACHE_SHIFT 4
5650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5652
5653 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5654
5655diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5656index d703d8e..a8e2d70 100644
5657--- a/arch/metag/include/asm/barrier.h
5658+++ b/arch/metag/include/asm/barrier.h
5659@@ -90,7 +90,7 @@ static inline void fence(void)
5660 do { \
5661 compiletime_assert_atomic_type(*p); \
5662 smp_mb(); \
5663- ACCESS_ONCE(*p) = (v); \
5664+ ACCESS_ONCE_RW(*p) = (v); \
5665 } while (0)
5666
5667 #define smp_load_acquire(p) \
5668diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5669index 7ca80ac..794ba72 100644
5670--- a/arch/metag/mm/hugetlbpage.c
5671+++ b/arch/metag/mm/hugetlbpage.c
5672@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5673 info.high_limit = TASK_SIZE;
5674 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5675 info.align_offset = 0;
5676+ info.threadstack_offset = 0;
5677 return vm_unmapped_area(&info);
5678 }
5679
5680diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5681index 4efe96a..60e8699 100644
5682--- a/arch/microblaze/include/asm/cache.h
5683+++ b/arch/microblaze/include/asm/cache.h
5684@@ -13,11 +13,12 @@
5685 #ifndef _ASM_MICROBLAZE_CACHE_H
5686 #define _ASM_MICROBLAZE_CACHE_H
5687
5688+#include <linux/const.h>
5689 #include <asm/registers.h>
5690
5691 #define L1_CACHE_SHIFT 5
5692 /* word-granular cache in microblaze */
5693-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5694+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5695
5696 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5697
5698diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5699index c7a1690..28c24b6 100644
5700--- a/arch/mips/Kconfig
5701+++ b/arch/mips/Kconfig
5702@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5703
5704 config KEXEC
5705 bool "Kexec system call"
5706+ depends on !GRKERNSEC_KMEM
5707 help
5708 kexec is a system call that implements the ability to shutdown your
5709 current kernel, and to start another kernel. It is like a reboot
5710diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5711index 7d89878..57c55b7 100644
5712--- a/arch/mips/cavium-octeon/dma-octeon.c
5713+++ b/arch/mips/cavium-octeon/dma-octeon.c
5714@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5715 if (dma_release_from_coherent(dev, order, vaddr))
5716 return;
5717
5718- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5719+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5720 }
5721
5722 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5723diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5724index 26d4363..3c9a82e 100644
5725--- a/arch/mips/include/asm/atomic.h
5726+++ b/arch/mips/include/asm/atomic.h
5727@@ -22,15 +22,39 @@
5728 #include <asm/cmpxchg.h>
5729 #include <asm/war.h>
5730
5731+#ifdef CONFIG_GENERIC_ATOMIC64
5732+#include <asm-generic/atomic64.h>
5733+#endif
5734+
5735 #define ATOMIC_INIT(i) { (i) }
5736
5737+#ifdef CONFIG_64BIT
5738+#define _ASM_EXTABLE(from, to) \
5739+" .section __ex_table,\"a\"\n" \
5740+" .dword " #from ", " #to"\n" \
5741+" .previous\n"
5742+#else
5743+#define _ASM_EXTABLE(from, to) \
5744+" .section __ex_table,\"a\"\n" \
5745+" .word " #from ", " #to"\n" \
5746+" .previous\n"
5747+#endif
5748+
5749 /*
5750 * atomic_read - read atomic variable
5751 * @v: pointer of type atomic_t
5752 *
5753 * Atomically reads the value of @v.
5754 */
5755-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5756+static inline int atomic_read(const atomic_t *v)
5757+{
5758+ return ACCESS_ONCE(v->counter);
5759+}
5760+
5761+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5762+{
5763+ return ACCESS_ONCE(v->counter);
5764+}
5765
5766 /*
5767 * atomic_set - set atomic variable
5768@@ -39,47 +63,77 @@
5769 *
5770 * Atomically sets the value of @v to @i.
5771 */
5772-#define atomic_set(v, i) ((v)->counter = (i))
5773+static inline void atomic_set(atomic_t *v, int i)
5774+{
5775+ v->counter = i;
5776+}
5777
5778-#define ATOMIC_OP(op, c_op, asm_op) \
5779-static __inline__ void atomic_##op(int i, atomic_t * v) \
5780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5781+{
5782+ v->counter = i;
5783+}
5784+
5785+#ifdef CONFIG_PAX_REFCOUNT
5786+#define __OVERFLOW_POST \
5787+ " b 4f \n" \
5788+ " .set noreorder \n" \
5789+ "3: b 5f \n" \
5790+ " move %0, %1 \n" \
5791+ " .set reorder \n"
5792+#define __OVERFLOW_EXTABLE \
5793+ "3:\n" \
5794+ _ASM_EXTABLE(2b, 3b)
5795+#else
5796+#define __OVERFLOW_POST
5797+#define __OVERFLOW_EXTABLE
5798+#endif
5799+
5800+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5801+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5802 { \
5803 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5804 int temp; \
5805 \
5806 __asm__ __volatile__( \
5807- " .set arch=r4000 \n" \
5808- "1: ll %0, %1 # atomic_" #op " \n" \
5809- " " #asm_op " %0, %2 \n" \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813 " sc %0, %1 \n" \
5814 " beqzl %0, 1b \n" \
5815+ extable \
5816 " .set mips0 \n" \
5817 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5818 : "Ir" (i)); \
5819 } else if (kernel_uses_llsc) { \
5820 int temp; \
5821 \
5822- do { \
5823- __asm__ __volatile__( \
5824- " .set "MIPS_ISA_LEVEL" \n" \
5825- " ll %0, %1 # atomic_" #op "\n" \
5826- " " #asm_op " %0, %2 \n" \
5827- " sc %0, %1 \n" \
5828- " .set mips0 \n" \
5829- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5830- : "Ir" (i)); \
5831- } while (unlikely(!temp)); \
5832+ __asm__ __volatile__( \
5833+ " .set "MIPS_ISA_LEVEL" \n" \
5834+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5835+ "2: " #asm_op " %0, %2 \n" \
5836+ " sc %0, %1 \n" \
5837+ " beqz %0, 1b \n" \
5838+ extable \
5839+ " .set mips0 \n" \
5840+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5841+ : "Ir" (i)); \
5842 } else { \
5843 unsigned long flags; \
5844 \
5845 raw_local_irq_save(flags); \
5846- v->counter c_op i; \
5847+ __asm__ __volatile__( \
5848+ "2: " #asm_op " %0, %1 \n" \
5849+ extable \
5850+ : "+r" (v->counter) : "Ir" (i)); \
5851 raw_local_irq_restore(flags); \
5852 } \
5853 }
5854
5855-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5856-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5857+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5858+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5859+
5860+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5861+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5862 { \
5863 int result; \
5864 \
5865@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5866 int temp; \
5867 \
5868 __asm__ __volatile__( \
5869- " .set arch=r4000 \n" \
5870- "1: ll %1, %2 # atomic_" #op "_return \n" \
5871- " " #asm_op " %0, %1, %3 \n" \
5872+ " .set mips3 \n" \
5873+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5874+ "2: " #asm_op " %0, %1, %3 \n" \
5875 " sc %0, %2 \n" \
5876 " beqzl %0, 1b \n" \
5877- " " #asm_op " %0, %1, %3 \n" \
5878+ post_op \
5879+ extable \
5880+ "4: " #asm_op " %0, %1, %3 \n" \
5881+ "5: \n" \
5882 " .set mips0 \n" \
5883 : "=&r" (result), "=&r" (temp), \
5884 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5885@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5886 } else if (kernel_uses_llsc) { \
5887 int temp; \
5888 \
5889- do { \
5890- __asm__ __volatile__( \
5891- " .set "MIPS_ISA_LEVEL" \n" \
5892- " ll %1, %2 # atomic_" #op "_return \n" \
5893- " " #asm_op " %0, %1, %3 \n" \
5894- " sc %0, %2 \n" \
5895- " .set mips0 \n" \
5896- : "=&r" (result), "=&r" (temp), \
5897- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5898- : "Ir" (i)); \
5899- } while (unlikely(!result)); \
5900+ __asm__ __volatile__( \
5901+ " .set "MIPS_ISA_LEVEL" \n" \
5902+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5903+ "2: " #asm_op " %0, %1, %3 \n" \
5904+ " sc %0, %2 \n" \
5905+ post_op \
5906+ extable \
5907+ "4: " #asm_op " %0, %1, %3 \n" \
5908+ "5: \n" \
5909+ " .set mips0 \n" \
5910+ : "=&r" (result), "=&r" (temp), \
5911+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5912+ : "Ir" (i)); \
5913 \
5914 result = temp; result c_op i; \
5915 } else { \
5916 unsigned long flags; \
5917 \
5918 raw_local_irq_save(flags); \
5919- result = v->counter; \
5920- result c_op i; \
5921- v->counter = result; \
5922+ __asm__ __volatile__( \
5923+ " lw %0, %1 \n" \
5924+ "2: " #asm_op " %0, %1, %2 \n" \
5925+ " sw %0, %1 \n" \
5926+ "3: \n" \
5927+ extable \
5928+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5929+ : "Ir" (i)); \
5930 raw_local_irq_restore(flags); \
5931 } \
5932 \
5933@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5934 return result; \
5935 }
5936
5937-#define ATOMIC_OPS(op, c_op, asm_op) \
5938- ATOMIC_OP(op, c_op, asm_op) \
5939- ATOMIC_OP_RETURN(op, c_op, asm_op)
5940+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5941+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5942
5943-ATOMIC_OPS(add, +=, addu)
5944-ATOMIC_OPS(sub, -=, subu)
5945+#define ATOMIC_OPS(op, asm_op) \
5946+ ATOMIC_OP(op, asm_op) \
5947+ ATOMIC_OP_RETURN(op, asm_op)
5948+
5949+ATOMIC_OPS(add, add)
5950+ATOMIC_OPS(sub, sub)
5951
5952 #undef ATOMIC_OPS
5953 #undef ATOMIC_OP_RETURN
5954+#undef __ATOMIC_OP_RETURN
5955 #undef ATOMIC_OP
5956+#undef __ATOMIC_OP
5957
5958 /*
5959 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5960@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5961 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5962 * The function returns the old value of @v minus @i.
5963 */
5964-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5965+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5966 {
5967 int result;
5968
5969@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5970 int temp;
5971
5972 __asm__ __volatile__(
5973- " .set arch=r4000 \n"
5974+ " .set "MIPS_ISA_LEVEL" \n"
5975 "1: ll %1, %2 # atomic_sub_if_positive\n"
5976 " subu %0, %1, %3 \n"
5977 " bltz %0, 1f \n"
5978@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5979 return result;
5980 }
5981
5982-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5983-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5984+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5985+{
5986+ return cmpxchg(&v->counter, old, new);
5987+}
5988+
5989+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5990+ int new)
5991+{
5992+ return cmpxchg(&(v->counter), old, new);
5993+}
5994+
5995+static inline int atomic_xchg(atomic_t *v, int new)
5996+{
5997+ return xchg(&v->counter, new);
5998+}
5999+
6000+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6001+{
6002+ return xchg(&(v->counter), new);
6003+}
6004
6005 /**
6006 * __atomic_add_unless - add unless the number is a given value
6007@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6008
6009 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6010 #define atomic_inc_return(v) atomic_add_return(1, (v))
6011+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6012+{
6013+ return atomic_add_return_unchecked(1, v);
6014+}
6015
6016 /*
6017 * atomic_sub_and_test - subtract value from variable and test result
6018@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6019 * other cases.
6020 */
6021 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6022+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6023+{
6024+ return atomic_add_return_unchecked(1, v) == 0;
6025+}
6026
6027 /*
6028 * atomic_dec_and_test - decrement by 1 and test
6029@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6030 * Atomically increments @v by 1.
6031 */
6032 #define atomic_inc(v) atomic_add(1, (v))
6033+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6034+{
6035+ atomic_add_unchecked(1, v);
6036+}
6037
6038 /*
6039 * atomic_dec - decrement and test
6040@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6041 * Atomically decrements @v by 1.
6042 */
6043 #define atomic_dec(v) atomic_sub(1, (v))
6044+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6045+{
6046+ atomic_sub_unchecked(1, v);
6047+}
6048
6049 /*
6050 * atomic_add_negative - add and test if negative
6051@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6052 * @v: pointer of type atomic64_t
6053 *
6054 */
6055-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6056+static inline long atomic64_read(const atomic64_t *v)
6057+{
6058+ return ACCESS_ONCE(v->counter);
6059+}
6060+
6061+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6062+{
6063+ return ACCESS_ONCE(v->counter);
6064+}
6065
6066 /*
6067 * atomic64_set - set atomic variable
6068 * @v: pointer of type atomic64_t
6069 * @i: required value
6070 */
6071-#define atomic64_set(v, i) ((v)->counter = (i))
6072+static inline void atomic64_set(atomic64_t *v, long i)
6073+{
6074+ v->counter = i;
6075+}
6076
6077-#define ATOMIC64_OP(op, c_op, asm_op) \
6078-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6079+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6080+{
6081+ v->counter = i;
6082+}
6083+
6084+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6085+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6086 { \
6087 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6088 long temp; \
6089 \
6090 __asm__ __volatile__( \
6091- " .set arch=r4000 \n" \
6092- "1: lld %0, %1 # atomic64_" #op " \n" \
6093- " " #asm_op " %0, %2 \n" \
6094+ " .set "MIPS_ISA_LEVEL" \n" \
6095+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6096+ "2: " #asm_op " %0, %2 \n" \
6097 " scd %0, %1 \n" \
6098 " beqzl %0, 1b \n" \
6099+ extable \
6100 " .set mips0 \n" \
6101 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6102 : "Ir" (i)); \
6103 } else if (kernel_uses_llsc) { \
6104 long temp; \
6105 \
6106- do { \
6107- __asm__ __volatile__( \
6108- " .set "MIPS_ISA_LEVEL" \n" \
6109- " lld %0, %1 # atomic64_" #op "\n" \
6110- " " #asm_op " %0, %2 \n" \
6111- " scd %0, %1 \n" \
6112- " .set mips0 \n" \
6113- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6114- : "Ir" (i)); \
6115- } while (unlikely(!temp)); \
6116+ __asm__ __volatile__( \
6117+ " .set "MIPS_ISA_LEVEL" \n" \
6118+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6119+ "2: " #asm_op " %0, %2 \n" \
6120+ " scd %0, %1 \n" \
6121+ " beqz %0, 1b \n" \
6122+ extable \
6123+ " .set mips0 \n" \
6124+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6125+ : "Ir" (i)); \
6126 } else { \
6127 unsigned long flags; \
6128 \
6129 raw_local_irq_save(flags); \
6130- v->counter c_op i; \
6131+ __asm__ __volatile__( \
6132+ "2: " #asm_op " %0, %1 \n" \
6133+ extable \
6134+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6135 raw_local_irq_restore(flags); \
6136 } \
6137 }
6138
6139-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6140-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6141+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6142+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6143+
6144+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6145+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6146 { \
6147 long result; \
6148 \
6149@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6150 long temp; \
6151 \
6152 __asm__ __volatile__( \
6153- " .set arch=r4000 \n" \
6154+ " .set mips3 \n" \
6155 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6156- " " #asm_op " %0, %1, %3 \n" \
6157+ "2: " #asm_op " %0, %1, %3 \n" \
6158 " scd %0, %2 \n" \
6159 " beqzl %0, 1b \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161+ post_op \
6162+ extable \
6163+ "4: " #asm_op " %0, %1, %3 \n" \
6164+ "5: \n" \
6165 " .set mips0 \n" \
6166 : "=&r" (result), "=&r" (temp), \
6167 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6168@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6169 } else if (kernel_uses_llsc) { \
6170 long temp; \
6171 \
6172- do { \
6173- __asm__ __volatile__( \
6174- " .set "MIPS_ISA_LEVEL" \n" \
6175- " lld %1, %2 # atomic64_" #op "_return\n" \
6176- " " #asm_op " %0, %1, %3 \n" \
6177- " scd %0, %2 \n" \
6178- " .set mips0 \n" \
6179- : "=&r" (result), "=&r" (temp), \
6180- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6181- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6182- : "memory"); \
6183- } while (unlikely(!result)); \
6184+ __asm__ __volatile__( \
6185+ " .set "MIPS_ISA_LEVEL" \n" \
6186+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6187+ "2: " #asm_op " %0, %1, %3 \n" \
6188+ " scd %0, %2 \n" \
6189+ " beqz %0, 1b \n" \
6190+ post_op \
6191+ extable \
6192+ "4: " #asm_op " %0, %1, %3 \n" \
6193+ "5: \n" \
6194+ " .set mips0 \n" \
6195+ : "=&r" (result), "=&r" (temp), \
6196+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6197+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6198+ : "memory"); \
6199 \
6200 result = temp; result c_op i; \
6201 } else { \
6202 unsigned long flags; \
6203 \
6204 raw_local_irq_save(flags); \
6205- result = v->counter; \
6206- result c_op i; \
6207- v->counter = result; \
6208+ __asm__ __volatile__( \
6209+ " ld %0, %1 \n" \
6210+ "2: " #asm_op " %0, %1, %2 \n" \
6211+ " sd %0, %1 \n" \
6212+ "3: \n" \
6213+ extable \
6214+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6215+ : "Ir" (i)); \
6216 raw_local_irq_restore(flags); \
6217 } \
6218 \
6219@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6220 return result; \
6221 }
6222
6223-#define ATOMIC64_OPS(op, c_op, asm_op) \
6224- ATOMIC64_OP(op, c_op, asm_op) \
6225- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6226+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6227+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6228
6229-ATOMIC64_OPS(add, +=, daddu)
6230-ATOMIC64_OPS(sub, -=, dsubu)
6231+#define ATOMIC64_OPS(op, asm_op) \
6232+ ATOMIC64_OP(op, asm_op) \
6233+ ATOMIC64_OP_RETURN(op, asm_op)
6234+
6235+ATOMIC64_OPS(add, dadd)
6236+ATOMIC64_OPS(sub, dsub)
6237
6238 #undef ATOMIC64_OPS
6239 #undef ATOMIC64_OP_RETURN
6240+#undef __ATOMIC64_OP_RETURN
6241 #undef ATOMIC64_OP
6242+#undef __ATOMIC64_OP
6243+#undef __OVERFLOW_EXTABLE
6244+#undef __OVERFLOW_POST
6245
6246 /*
6247 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6248@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6249 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6250 * The function returns the old value of @v minus @i.
6251 */
6252-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6253+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6254 {
6255 long result;
6256
6257@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6258 long temp;
6259
6260 __asm__ __volatile__(
6261- " .set arch=r4000 \n"
6262+ " .set "MIPS_ISA_LEVEL" \n"
6263 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6264 " dsubu %0, %1, %3 \n"
6265 " bltz %0, 1f \n"
6266@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6267 return result;
6268 }
6269
6270-#define atomic64_cmpxchg(v, o, n) \
6271- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6272-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6273+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6274+{
6275+ return cmpxchg(&v->counter, old, new);
6276+}
6277+
6278+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6279+ long new)
6280+{
6281+ return cmpxchg(&(v->counter), old, new);
6282+}
6283+
6284+static inline long atomic64_xchg(atomic64_t *v, long new)
6285+{
6286+ return xchg(&v->counter, new);
6287+}
6288+
6289+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6290+{
6291+ return xchg(&(v->counter), new);
6292+}
6293
6294 /**
6295 * atomic64_add_unless - add unless the number is a given value
6296@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6297
6298 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6299 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6300+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6301
6302 /*
6303 * atomic64_sub_and_test - subtract value from variable and test result
6304@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305 * other cases.
6306 */
6307 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6308+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6309
6310 /*
6311 * atomic64_dec_and_test - decrement by 1 and test
6312@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * Atomically increments @v by 1.
6314 */
6315 #define atomic64_inc(v) atomic64_add(1, (v))
6316+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6317
6318 /*
6319 * atomic64_dec - decrement and test
6320@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically decrements @v by 1.
6322 */
6323 #define atomic64_dec(v) atomic64_sub(1, (v))
6324+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_add_negative - add and test if negative
6328diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6329index 2b8bbbc..4556df6 100644
6330--- a/arch/mips/include/asm/barrier.h
6331+++ b/arch/mips/include/asm/barrier.h
6332@@ -133,7 +133,7 @@
6333 do { \
6334 compiletime_assert_atomic_type(*p); \
6335 smp_mb(); \
6336- ACCESS_ONCE(*p) = (v); \
6337+ ACCESS_ONCE_RW(*p) = (v); \
6338 } while (0)
6339
6340 #define smp_load_acquire(p) \
6341diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6342index b4db69f..8f3b093 100644
6343--- a/arch/mips/include/asm/cache.h
6344+++ b/arch/mips/include/asm/cache.h
6345@@ -9,10 +9,11 @@
6346 #ifndef _ASM_CACHE_H
6347 #define _ASM_CACHE_H
6348
6349+#include <linux/const.h>
6350 #include <kmalloc.h>
6351
6352 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6355
6356 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6357 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6358diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6359index 535f196..2ab029e 100644
6360--- a/arch/mips/include/asm/elf.h
6361+++ b/arch/mips/include/asm/elf.h
6362@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6363 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6364 #endif
6365
6366+#ifdef CONFIG_PAX_ASLR
6367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6368+
6369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6371+#endif
6372+
6373 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6374 struct linux_binprm;
6375 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6376 int uses_interp);
6377
6378-struct mm_struct;
6379-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6380-#define arch_randomize_brk arch_randomize_brk
6381-
6382 struct arch_elf_state {
6383 int fp_abi;
6384 int interp_fp_abi;
6385diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6386index c1f6afa..38cc6e9 100644
6387--- a/arch/mips/include/asm/exec.h
6388+++ b/arch/mips/include/asm/exec.h
6389@@ -12,6 +12,6 @@
6390 #ifndef _ASM_EXEC_H
6391 #define _ASM_EXEC_H
6392
6393-extern unsigned long arch_align_stack(unsigned long sp);
6394+#define arch_align_stack(x) ((x) & ~0xfUL)
6395
6396 #endif /* _ASM_EXEC_H */
6397diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6398index 9e8ef59..1139d6b 100644
6399--- a/arch/mips/include/asm/hw_irq.h
6400+++ b/arch/mips/include/asm/hw_irq.h
6401@@ -10,7 +10,7 @@
6402
6403 #include <linux/atomic.h>
6404
6405-extern atomic_t irq_err_count;
6406+extern atomic_unchecked_t irq_err_count;
6407
6408 /*
6409 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6410diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6411index 8feaed6..1bd8a64 100644
6412--- a/arch/mips/include/asm/local.h
6413+++ b/arch/mips/include/asm/local.h
6414@@ -13,15 +13,25 @@ typedef struct
6415 atomic_long_t a;
6416 } local_t;
6417
6418+typedef struct {
6419+ atomic_long_unchecked_t a;
6420+} local_unchecked_t;
6421+
6422 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6423
6424 #define local_read(l) atomic_long_read(&(l)->a)
6425+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6426 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6427+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6428
6429 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6430+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6431 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6432+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6433 #define local_inc(l) atomic_long_inc(&(l)->a)
6434+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6435 #define local_dec(l) atomic_long_dec(&(l)->a)
6436+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6437
6438 /*
6439 * Same as above, but return the result value
6440@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6441 return result;
6442 }
6443
6444+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6445+{
6446+ unsigned long result;
6447+
6448+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6449+ unsigned long temp;
6450+
6451+ __asm__ __volatile__(
6452+ " .set mips3 \n"
6453+ "1:" __LL "%1, %2 # local_add_return \n"
6454+ " addu %0, %1, %3 \n"
6455+ __SC "%0, %2 \n"
6456+ " beqzl %0, 1b \n"
6457+ " addu %0, %1, %3 \n"
6458+ " .set mips0 \n"
6459+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6460+ : "Ir" (i), "m" (l->a.counter)
6461+ : "memory");
6462+ } else if (kernel_uses_llsc) {
6463+ unsigned long temp;
6464+
6465+ __asm__ __volatile__(
6466+ " .set mips3 \n"
6467+ "1:" __LL "%1, %2 # local_add_return \n"
6468+ " addu %0, %1, %3 \n"
6469+ __SC "%0, %2 \n"
6470+ " beqz %0, 1b \n"
6471+ " addu %0, %1, %3 \n"
6472+ " .set mips0 \n"
6473+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6474+ : "Ir" (i), "m" (l->a.counter)
6475+ : "memory");
6476+ } else {
6477+ unsigned long flags;
6478+
6479+ local_irq_save(flags);
6480+ result = l->a.counter;
6481+ result += i;
6482+ l->a.counter = result;
6483+ local_irq_restore(flags);
6484+ }
6485+
6486+ return result;
6487+}
6488+
6489 static __inline__ long local_sub_return(long i, local_t * l)
6490 {
6491 unsigned long result;
6492@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6493
6494 #define local_cmpxchg(l, o, n) \
6495 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6496+#define local_cmpxchg_unchecked(l, o, n) \
6497+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6499
6500 /**
6501diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6502index 154b70a..426ae3d 100644
6503--- a/arch/mips/include/asm/page.h
6504+++ b/arch/mips/include/asm/page.h
6505@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6506 #ifdef CONFIG_CPU_MIPS32
6507 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6508 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6509- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6510+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6511 #else
6512 typedef struct { unsigned long long pte; } pte_t;
6513 #define pte_val(x) ((x).pte)
6514diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6515index b336037..5b874cc 100644
6516--- a/arch/mips/include/asm/pgalloc.h
6517+++ b/arch/mips/include/asm/pgalloc.h
6518@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6519 {
6520 set_pud(pud, __pud((unsigned long)pmd));
6521 }
6522+
6523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6524+{
6525+ pud_populate(mm, pud, pmd);
6526+}
6527 #endif
6528
6529 /*
6530diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6531index bef782c..d99df93 100644
6532--- a/arch/mips/include/asm/pgtable.h
6533+++ b/arch/mips/include/asm/pgtable.h
6534@@ -20,6 +20,9 @@
6535 #include <asm/io.h>
6536 #include <asm/pgtable-bits.h>
6537
6538+#define ktla_ktva(addr) (addr)
6539+#define ktva_ktla(addr) (addr)
6540+
6541 struct mm_struct;
6542 struct vm_area_struct;
6543
6544diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6545index 55ed660..3dc9422 100644
6546--- a/arch/mips/include/asm/thread_info.h
6547+++ b/arch/mips/include/asm/thread_info.h
6548@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6549 #define TIF_SECCOMP 4 /* secure computing */
6550 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6551 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6552+/* li takes a 32bit immediate */
6553+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6554+
6555 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6556 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6557 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6558@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6559 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6560 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6561 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6562+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6563
6564 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6565 _TIF_SYSCALL_AUDIT | \
6566- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6567+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6568+ _TIF_GRSEC_SETXID)
6569
6570 /* work to do in syscall_trace_leave() */
6571 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do on interrupt/exception return */
6576 #define _TIF_WORK_MASK \
6577@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6578 /* work to do on any return to u-space */
6579 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6580 _TIF_WORK_SYSCALL_EXIT | \
6581- _TIF_SYSCALL_TRACEPOINT)
6582+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6583
6584 /*
6585 * We stash processor id into a COP0 register to retrieve it fast
6586diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6587index bf8b324..cec5705 100644
6588--- a/arch/mips/include/asm/uaccess.h
6589+++ b/arch/mips/include/asm/uaccess.h
6590@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6591 __ok == 0; \
6592 })
6593
6594+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6595 #define access_ok(type, addr, size) \
6596 likely(__access_ok((addr), (size), __access_mask))
6597
6598diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6599index 1188e00..41cf144 100644
6600--- a/arch/mips/kernel/binfmt_elfn32.c
6601+++ b/arch/mips/kernel/binfmt_elfn32.c
6602@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6603 #undef ELF_ET_DYN_BASE
6604 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6605
6606+#ifdef CONFIG_PAX_ASLR
6607+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6608+
6609+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6610+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6611+#endif
6612+
6613 #include <asm/processor.h>
6614 #include <linux/module.h>
6615 #include <linux/elfcore.h>
6616diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6617index 9287678..f870e47 100644
6618--- a/arch/mips/kernel/binfmt_elfo32.c
6619+++ b/arch/mips/kernel/binfmt_elfo32.c
6620@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6621 #undef ELF_ET_DYN_BASE
6622 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6623
6624+#ifdef CONFIG_PAX_ASLR
6625+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6626+
6627+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6628+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6629+#endif
6630+
6631 #include <asm/processor.h>
6632
6633 #include <linux/module.h>
6634diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6635index a74ec3a..4f06f18 100644
6636--- a/arch/mips/kernel/i8259.c
6637+++ b/arch/mips/kernel/i8259.c
6638@@ -202,7 +202,7 @@ spurious_8259A_irq:
6639 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6640 spurious_irq_mask |= irqmask;
6641 }
6642- atomic_inc(&irq_err_count);
6643+ atomic_inc_unchecked(&irq_err_count);
6644 /*
6645 * Theoretically we do not have to handle this IRQ,
6646 * but in Linux this does not cause problems and is
6647diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6648index 44a1f79..2bd6aa3 100644
6649--- a/arch/mips/kernel/irq-gt641xx.c
6650+++ b/arch/mips/kernel/irq-gt641xx.c
6651@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6652 }
6653 }
6654
6655- atomic_inc(&irq_err_count);
6656+ atomic_inc_unchecked(&irq_err_count);
6657 }
6658
6659 void __init gt641xx_irq_init(void)
6660diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6661index d2bfbc2..a8eacd2 100644
6662--- a/arch/mips/kernel/irq.c
6663+++ b/arch/mips/kernel/irq.c
6664@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6665 printk("unexpected IRQ # %d\n", irq);
6666 }
6667
6668-atomic_t irq_err_count;
6669+atomic_unchecked_t irq_err_count;
6670
6671 int arch_show_interrupts(struct seq_file *p, int prec)
6672 {
6673- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6674+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6675 return 0;
6676 }
6677
6678 asmlinkage void spurious_interrupt(void)
6679 {
6680- atomic_inc(&irq_err_count);
6681+ atomic_inc_unchecked(&irq_err_count);
6682 }
6683
6684 void __init init_IRQ(void)
6685@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6686 #endif
6687 }
6688
6689+
6690 #ifdef DEBUG_STACKOVERFLOW
6691+extern void gr_handle_kernel_exploit(void);
6692+
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index c9eccf5..3903621 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 3391d06..c23a2cc 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index e1ffea2..46ed66e 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..371e85c 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9665 */
9666 if (mmap_is_legacy()) {
9667 mm->mmap_base = mmap_base_legacy();
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9671+ mm->mmap_base += mm->delta_mmap;
9672+#endif
9673+
9674 mm->get_unmapped_area = arch_get_unmapped_area;
9675 } else {
9676 mm->mmap_base = mmap_base();
9677+
9678+#ifdef CONFIG_PAX_RANDMMAP
9679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9681+#endif
9682+
9683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9684 }
9685 }
9686@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 */
9688 if (mmap_is_legacy()) {
9689 mm->mmap_base = mmap_base_legacy();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = s390_get_unmapped_area;
9697 } else {
9698 mm->mmap_base = mmap_base();
9699+
9700+#ifdef CONFIG_PAX_RANDMMAP
9701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9702+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9703+#endif
9704+
9705 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9706 }
9707 }
9708diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9709index ae3d59f..f65f075 100644
9710--- a/arch/score/include/asm/cache.h
9711+++ b/arch/score/include/asm/cache.h
9712@@ -1,7 +1,9 @@
9713 #ifndef _ASM_SCORE_CACHE_H
9714 #define _ASM_SCORE_CACHE_H
9715
9716+#include <linux/const.h>
9717+
9718 #define L1_CACHE_SHIFT 4
9719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9721
9722 #endif /* _ASM_SCORE_CACHE_H */
9723diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9724index f9f3cd5..58ff438 100644
9725--- a/arch/score/include/asm/exec.h
9726+++ b/arch/score/include/asm/exec.h
9727@@ -1,6 +1,6 @@
9728 #ifndef _ASM_SCORE_EXEC_H
9729 #define _ASM_SCORE_EXEC_H
9730
9731-extern unsigned long arch_align_stack(unsigned long sp);
9732+#define arch_align_stack(x) (x)
9733
9734 #endif /* _ASM_SCORE_EXEC_H */
9735diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9736index a1519ad3..e8ac1ff 100644
9737--- a/arch/score/kernel/process.c
9738+++ b/arch/score/kernel/process.c
9739@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9740
9741 return task_pt_regs(task)->cp0_epc;
9742 }
9743-
9744-unsigned long arch_align_stack(unsigned long sp)
9745-{
9746- return sp;
9747-}
9748diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9749index ef9e555..331bd29 100644
9750--- a/arch/sh/include/asm/cache.h
9751+++ b/arch/sh/include/asm/cache.h
9752@@ -9,10 +9,11 @@
9753 #define __ASM_SH_CACHE_H
9754 #ifdef __KERNEL__
9755
9756+#include <linux/const.h>
9757 #include <linux/init.h>
9758 #include <cpu/cache.h>
9759
9760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9762
9763 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9764
9765diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9766index 6777177..cb5e44f 100644
9767--- a/arch/sh/mm/mmap.c
9768+++ b/arch/sh/mm/mmap.c
9769@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9770 struct mm_struct *mm = current->mm;
9771 struct vm_area_struct *vma;
9772 int do_colour_align;
9773+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9774 struct vm_unmapped_area_info info;
9775
9776 if (flags & MAP_FIXED) {
9777@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9778 if (filp || (flags & MAP_SHARED))
9779 do_colour_align = 1;
9780
9781+#ifdef CONFIG_PAX_RANDMMAP
9782+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9783+#endif
9784+
9785 if (addr) {
9786 if (do_colour_align)
9787 addr = COLOUR_ALIGN(addr, pgoff);
9788@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9789 addr = PAGE_ALIGN(addr);
9790
9791 vma = find_vma(mm, addr);
9792- if (TASK_SIZE - len >= addr &&
9793- (!vma || addr + len <= vma->vm_start))
9794+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9795 return addr;
9796 }
9797
9798 info.flags = 0;
9799 info.length = len;
9800- info.low_limit = TASK_UNMAPPED_BASE;
9801+ info.low_limit = mm->mmap_base;
9802 info.high_limit = TASK_SIZE;
9803 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9804 info.align_offset = pgoff << PAGE_SHIFT;
9805@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9806 struct mm_struct *mm = current->mm;
9807 unsigned long addr = addr0;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 /* requesting a specific address */
9822 if (addr) {
9823 if (do_colour_align)
9824@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 VM_BUG_ON(addr != -ENOMEM);
9836 info.flags = 0;
9837 info.low_limit = TASK_UNMAPPED_BASE;
9838+
9839+#ifdef CONFIG_PAX_RANDMMAP
9840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9841+ info.low_limit += mm->delta_mmap;
9842+#endif
9843+
9844 info.high_limit = TASK_SIZE;
9845 addr = vm_unmapped_area(&info);
9846 }
9847diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9848index 4082749..fd97781 100644
9849--- a/arch/sparc/include/asm/atomic_64.h
9850+++ b/arch/sparc/include/asm/atomic_64.h
9851@@ -15,18 +15,38 @@
9852 #define ATOMIC64_INIT(i) { (i) }
9853
9854 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9855+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9856+{
9857+ return ACCESS_ONCE(v->counter);
9858+}
9859 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9861+{
9862+ return ACCESS_ONCE(v->counter);
9863+}
9864
9865 #define atomic_set(v, i) (((v)->counter) = i)
9866+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9867+{
9868+ v->counter = i;
9869+}
9870 #define atomic64_set(v, i) (((v)->counter) = i)
9871+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9872+{
9873+ v->counter = i;
9874+}
9875
9876-#define ATOMIC_OP(op) \
9877-void atomic_##op(int, atomic_t *); \
9878-void atomic64_##op(long, atomic64_t *);
9879+#define __ATOMIC_OP(op, suffix) \
9880+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9881+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9882
9883-#define ATOMIC_OP_RETURN(op) \
9884-int atomic_##op##_return(int, atomic_t *); \
9885-long atomic64_##op##_return(long, atomic64_t *);
9886+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9887+
9888+#define __ATOMIC_OP_RETURN(op, suffix) \
9889+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9890+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9891+
9892+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9893
9894 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9895
9896@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9897
9898 #undef ATOMIC_OPS
9899 #undef ATOMIC_OP_RETURN
9900+#undef __ATOMIC_OP_RETURN
9901 #undef ATOMIC_OP
9902+#undef __ATOMIC_OP
9903
9904 #define atomic_dec_return(v) atomic_sub_return(1, v)
9905 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9906
9907 #define atomic_inc_return(v) atomic_add_return(1, v)
9908+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9909+{
9910+ return atomic_add_return_unchecked(1, v);
9911+}
9912 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9913+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914+{
9915+ return atomic64_add_return_unchecked(1, v);
9916+}
9917
9918 /*
9919 * atomic_inc_and_test - increment and test
9920@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9921 * other cases.
9922 */
9923 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9924+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9925+{
9926+ return atomic_inc_return_unchecked(v) == 0;
9927+}
9928 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9929
9930 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9931@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9932 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9933
9934 #define atomic_inc(v) atomic_add(1, v)
9935+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9936+{
9937+ atomic_add_unchecked(1, v);
9938+}
9939 #define atomic64_inc(v) atomic64_add(1, v)
9940+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941+{
9942+ atomic64_add_unchecked(1, v);
9943+}
9944
9945 #define atomic_dec(v) atomic_sub(1, v)
9946+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9947+{
9948+ atomic_sub_unchecked(1, v);
9949+}
9950 #define atomic64_dec(v) atomic64_sub(1, v)
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ atomic64_sub_unchecked(1, v);
9954+}
9955
9956 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9957 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9958
9959 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9960+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9961+{
9962+ return cmpxchg(&v->counter, old, new);
9963+}
9964 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9966+{
9967+ return xchg(&v->counter, new);
9968+}
9969
9970 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9971 {
9972- int c, old;
9973+ int c, old, new;
9974 c = atomic_read(v);
9975 for (;;) {
9976- if (unlikely(c == (u)))
9977+ if (unlikely(c == u))
9978 break;
9979- old = atomic_cmpxchg((v), c, c + (a));
9980+
9981+ asm volatile("addcc %2, %0, %0\n"
9982+
9983+#ifdef CONFIG_PAX_REFCOUNT
9984+ "tvs %%icc, 6\n"
9985+#endif
9986+
9987+ : "=r" (new)
9988+ : "0" (c), "ir" (a)
9989+ : "cc");
9990+
9991+ old = atomic_cmpxchg(v, c, new);
9992 if (likely(old == c))
9993 break;
9994 c = old;
9995@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9996 #define atomic64_cmpxchg(v, o, n) \
9997 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9998 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9999+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10000+{
10001+ return xchg(&v->counter, new);
10002+}
10003
10004 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10005 {
10006- long c, old;
10007+ long c, old, new;
10008 c = atomic64_read(v);
10009 for (;;) {
10010- if (unlikely(c == (u)))
10011+ if (unlikely(c == u))
10012 break;
10013- old = atomic64_cmpxchg((v), c, c + (a));
10014+
10015+ asm volatile("addcc %2, %0, %0\n"
10016+
10017+#ifdef CONFIG_PAX_REFCOUNT
10018+ "tvs %%xcc, 6\n"
10019+#endif
10020+
10021+ : "=r" (new)
10022+ : "0" (c), "ir" (a)
10023+ : "cc");
10024+
10025+ old = atomic64_cmpxchg(v, c, new);
10026 if (likely(old == c))
10027 break;
10028 c = old;
10029 }
10030- return c != (u);
10031+ return c != u;
10032 }
10033
10034 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10035diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10036index 7664894..45a974b 100644
10037--- a/arch/sparc/include/asm/barrier_64.h
10038+++ b/arch/sparc/include/asm/barrier_64.h
10039@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10040 do { \
10041 compiletime_assert_atomic_type(*p); \
10042 barrier(); \
10043- ACCESS_ONCE(*p) = (v); \
10044+ ACCESS_ONCE_RW(*p) = (v); \
10045 } while (0)
10046
10047 #define smp_load_acquire(p) \
10048diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10049index 5bb6991..5c2132e 100644
10050--- a/arch/sparc/include/asm/cache.h
10051+++ b/arch/sparc/include/asm/cache.h
10052@@ -7,10 +7,12 @@
10053 #ifndef _SPARC_CACHE_H
10054 #define _SPARC_CACHE_H
10055
10056+#include <linux/const.h>
10057+
10058 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10059
10060 #define L1_CACHE_SHIFT 5
10061-#define L1_CACHE_BYTES 32
10062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10063
10064 #ifdef CONFIG_SPARC32
10065 #define SMP_CACHE_BYTES_SHIFT 5
10066diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10067index a24e41f..47677ff 100644
10068--- a/arch/sparc/include/asm/elf_32.h
10069+++ b/arch/sparc/include/asm/elf_32.h
10070@@ -114,6 +114,13 @@ typedef struct {
10071
10072 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10073
10074+#ifdef CONFIG_PAX_ASLR
10075+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10076+
10077+#define PAX_DELTA_MMAP_LEN 16
10078+#define PAX_DELTA_STACK_LEN 16
10079+#endif
10080+
10081 /* This yields a mask that user programs can use to figure out what
10082 instruction set this cpu supports. This can NOT be done in userspace
10083 on Sparc. */
10084diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10085index 370ca1e..d4f4a98 100644
10086--- a/arch/sparc/include/asm/elf_64.h
10087+++ b/arch/sparc/include/asm/elf_64.h
10088@@ -189,6 +189,13 @@ typedef struct {
10089 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10090 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10091
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10097+#endif
10098+
10099 extern unsigned long sparc64_elf_hwcap;
10100 #define ELF_HWCAP sparc64_elf_hwcap
10101
10102diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10103index a3890da..f6a408e 100644
10104--- a/arch/sparc/include/asm/pgalloc_32.h
10105+++ b/arch/sparc/include/asm/pgalloc_32.h
10106@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10107 }
10108
10109 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10110+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10111
10112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10113 unsigned long address)
10114diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10115index 5e31871..13469c6 100644
10116--- a/arch/sparc/include/asm/pgalloc_64.h
10117+++ b/arch/sparc/include/asm/pgalloc_64.h
10118@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10119 }
10120
10121 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10122+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10123
10124 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10125 {
10126@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10127 }
10128
10129 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10130+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10131
10132 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10133 {
10134diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10135index 59ba6f6..4518128 100644
10136--- a/arch/sparc/include/asm/pgtable.h
10137+++ b/arch/sparc/include/asm/pgtable.h
10138@@ -5,4 +5,8 @@
10139 #else
10140 #include <asm/pgtable_32.h>
10141 #endif
10142+
10143+#define ktla_ktva(addr) (addr)
10144+#define ktva_ktla(addr) (addr)
10145+
10146 #endif
10147diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10148index f06b36a..bca3189 100644
10149--- a/arch/sparc/include/asm/pgtable_32.h
10150+++ b/arch/sparc/include/asm/pgtable_32.h
10151@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10152 #define PAGE_SHARED SRMMU_PAGE_SHARED
10153 #define PAGE_COPY SRMMU_PAGE_COPY
10154 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10155+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10156+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10157+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10158 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10159
10160 /* Top-level page directory - dummy used by init-mm.
10161@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10162
10163 /* xwr */
10164 #define __P000 PAGE_NONE
10165-#define __P001 PAGE_READONLY
10166-#define __P010 PAGE_COPY
10167-#define __P011 PAGE_COPY
10168+#define __P001 PAGE_READONLY_NOEXEC
10169+#define __P010 PAGE_COPY_NOEXEC
10170+#define __P011 PAGE_COPY_NOEXEC
10171 #define __P100 PAGE_READONLY
10172 #define __P101 PAGE_READONLY
10173 #define __P110 PAGE_COPY
10174 #define __P111 PAGE_COPY
10175
10176 #define __S000 PAGE_NONE
10177-#define __S001 PAGE_READONLY
10178-#define __S010 PAGE_SHARED
10179-#define __S011 PAGE_SHARED
10180+#define __S001 PAGE_READONLY_NOEXEC
10181+#define __S010 PAGE_SHARED_NOEXEC
10182+#define __S011 PAGE_SHARED_NOEXEC
10183 #define __S100 PAGE_READONLY
10184 #define __S101 PAGE_READONLY
10185 #define __S110 PAGE_SHARED
10186diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10187index ae51a11..eadfd03 100644
10188--- a/arch/sparc/include/asm/pgtsrmmu.h
10189+++ b/arch/sparc/include/asm/pgtsrmmu.h
10190@@ -111,6 +111,11 @@
10191 SRMMU_EXEC | SRMMU_REF)
10192 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10193 SRMMU_EXEC | SRMMU_REF)
10194+
10195+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10196+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10197+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10198+
10199 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10200 SRMMU_DIRTY | SRMMU_REF)
10201
10202diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10203index 29d64b1..4272fe8 100644
10204--- a/arch/sparc/include/asm/setup.h
10205+++ b/arch/sparc/include/asm/setup.h
10206@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10207 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10208
10209 /* init_64.c */
10210-extern atomic_t dcpage_flushes;
10211-extern atomic_t dcpage_flushes_xcall;
10212+extern atomic_unchecked_t dcpage_flushes;
10213+extern atomic_unchecked_t dcpage_flushes_xcall;
10214
10215 extern int sysctl_tsb_ratio;
10216 #endif
10217diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10218index 9689176..63c18ea 100644
10219--- a/arch/sparc/include/asm/spinlock_64.h
10220+++ b/arch/sparc/include/asm/spinlock_64.h
10221@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10222
10223 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10224
10225-static void inline arch_read_lock(arch_rwlock_t *lock)
10226+static inline void arch_read_lock(arch_rwlock_t *lock)
10227 {
10228 unsigned long tmp1, tmp2;
10229
10230 __asm__ __volatile__ (
10231 "1: ldsw [%2], %0\n"
10232 " brlz,pn %0, 2f\n"
10233-"4: add %0, 1, %1\n"
10234+"4: addcc %0, 1, %1\n"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+" tvs %%icc, 6\n"
10238+#endif
10239+
10240 " cas [%2], %0, %1\n"
10241 " cmp %0, %1\n"
10242 " bne,pn %%icc, 1b\n"
10243@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10244 " .previous"
10245 : "=&r" (tmp1), "=&r" (tmp2)
10246 : "r" (lock)
10247- : "memory");
10248+ : "memory", "cc");
10249 }
10250
10251-static int inline arch_read_trylock(arch_rwlock_t *lock)
10252+static inline int arch_read_trylock(arch_rwlock_t *lock)
10253 {
10254 int tmp1, tmp2;
10255
10256@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10257 "1: ldsw [%2], %0\n"
10258 " brlz,a,pn %0, 2f\n"
10259 " mov 0, %0\n"
10260-" add %0, 1, %1\n"
10261+" addcc %0, 1, %1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+" tvs %%icc, 6\n"
10265+#endif
10266+
10267 " cas [%2], %0, %1\n"
10268 " cmp %0, %1\n"
10269 " bne,pn %%icc, 1b\n"
10270@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10271 return tmp1;
10272 }
10273
10274-static void inline arch_read_unlock(arch_rwlock_t *lock)
10275+static inline void arch_read_unlock(arch_rwlock_t *lock)
10276 {
10277 unsigned long tmp1, tmp2;
10278
10279 __asm__ __volatile__(
10280 "1: lduw [%2], %0\n"
10281-" sub %0, 1, %1\n"
10282+" subcc %0, 1, %1\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+" tvs %%icc, 6\n"
10286+#endif
10287+
10288 " cas [%2], %0, %1\n"
10289 " cmp %0, %1\n"
10290 " bne,pn %%xcc, 1b\n"
10291@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10292 : "memory");
10293 }
10294
10295-static void inline arch_write_lock(arch_rwlock_t *lock)
10296+static inline void arch_write_lock(arch_rwlock_t *lock)
10297 {
10298 unsigned long mask, tmp1, tmp2;
10299
10300@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10301 : "memory");
10302 }
10303
10304-static void inline arch_write_unlock(arch_rwlock_t *lock)
10305+static inline void arch_write_unlock(arch_rwlock_t *lock)
10306 {
10307 __asm__ __volatile__(
10308 " stw %%g0, [%0]"
10309@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10310 : "memory");
10311 }
10312
10313-static int inline arch_write_trylock(arch_rwlock_t *lock)
10314+static inline int arch_write_trylock(arch_rwlock_t *lock)
10315 {
10316 unsigned long mask, tmp1, tmp2, result;
10317
10318diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10319index fd7bd0a..2e2fa7a 100644
10320--- a/arch/sparc/include/asm/thread_info_32.h
10321+++ b/arch/sparc/include/asm/thread_info_32.h
10322@@ -47,6 +47,7 @@ struct thread_info {
10323 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10324 unsigned long rwbuf_stkptrs[NSWINS];
10325 unsigned long w_saved;
10326+ unsigned long lowest_stack;
10327 };
10328
10329 /*
10330diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10331index ff45516..73001ab 100644
10332--- a/arch/sparc/include/asm/thread_info_64.h
10333+++ b/arch/sparc/include/asm/thread_info_64.h
10334@@ -61,6 +61,8 @@ struct thread_info {
10335 struct pt_regs *kern_una_regs;
10336 unsigned int kern_una_insn;
10337
10338+ unsigned long lowest_stack;
10339+
10340 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10341 __attribute__ ((aligned(64)));
10342 };
10343@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10344 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10345 /* flag bit 4 is available */
10346 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10347-/* flag bit 6 is available */
10348+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10349 #define TIF_32BIT 7 /* 32-bit binary */
10350 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10351 #define TIF_SECCOMP 9 /* secure computing */
10352 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10353 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10354+
10355 /* NOTE: Thread flags >= 12 should be ones we have no interest
10356 * in using in assembly, else we can't use the mask as
10357 * an immediate value in instructions such as andcc.
10358@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10359 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10360 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10361 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10362+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10363
10364 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10365 _TIF_DO_NOTIFY_RESUME_MASK | \
10366 _TIF_NEED_RESCHED)
10367 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10368
10369+#define _TIF_WORK_SYSCALL \
10370+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10371+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10372+
10373 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10374
10375 /*
10376diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10377index bd56c28..4b63d83 100644
10378--- a/arch/sparc/include/asm/uaccess.h
10379+++ b/arch/sparc/include/asm/uaccess.h
10380@@ -1,5 +1,6 @@
10381 #ifndef ___ASM_SPARC_UACCESS_H
10382 #define ___ASM_SPARC_UACCESS_H
10383+
10384 #if defined(__sparc__) && defined(__arch64__)
10385 #include <asm/uaccess_64.h>
10386 #else
10387diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10388index 64ee103..388aef0 100644
10389--- a/arch/sparc/include/asm/uaccess_32.h
10390+++ b/arch/sparc/include/asm/uaccess_32.h
10391@@ -47,6 +47,7 @@
10392 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10393 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10394 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10395+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10396 #define access_ok(type, addr, size) \
10397 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10398
10399@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10400
10401 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10402 {
10403- if (n && __access_ok((unsigned long) to, n))
10404+ if ((long)n < 0)
10405+ return n;
10406+
10407+ if (n && __access_ok((unsigned long) to, n)) {
10408+ if (!__builtin_constant_p(n))
10409+ check_object_size(from, n, true);
10410 return __copy_user(to, (__force void __user *) from, n);
10411- else
10412+ } else
10413 return n;
10414 }
10415
10416 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10417 {
10418+ if ((long)n < 0)
10419+ return n;
10420+
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423+
10424 return __copy_user(to, (__force void __user *) from, n);
10425 }
10426
10427 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) from, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) from, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(to, n, false);
10436 return __copy_user((__force void __user *) to, from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447 return __copy_user((__force void __user *) to, from, n);
10448 }
10449
10450diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10451index a35194b..47dabc0d 100644
10452--- a/arch/sparc/include/asm/uaccess_64.h
10453+++ b/arch/sparc/include/asm/uaccess_64.h
10454@@ -10,6 +10,7 @@
10455 #include <linux/compiler.h>
10456 #include <linux/string.h>
10457 #include <linux/thread_info.h>
10458+#include <linux/kernel.h>
10459 #include <asm/asi.h>
10460 #include <asm/spitfire.h>
10461 #include <asm-generic/uaccess-unaligned.h>
10462@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10463 return 1;
10464 }
10465
10466+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10467+{
10468+ return 1;
10469+}
10470+
10471 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10472 {
10473 return 1;
10474@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10475 static inline unsigned long __must_check
10476 copy_from_user(void *to, const void __user *from, unsigned long size)
10477 {
10478- unsigned long ret = ___copy_from_user(to, from, size);
10479+ unsigned long ret;
10480
10481+ if ((long)size < 0 || size > INT_MAX)
10482+ return size;
10483+
10484+ if (!__builtin_constant_p(size))
10485+ check_object_size(to, size, false);
10486+
10487+ ret = ___copy_from_user(to, from, size);
10488 if (unlikely(ret))
10489 ret = copy_from_user_fixup(to, from, size);
10490
10491@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10492 static inline unsigned long __must_check
10493 copy_to_user(void __user *to, const void *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_to_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(from, size, true);
10503+
10504+ ret = ___copy_to_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_to_user_fixup(to, from, size);
10507 return ret;
10508diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10509index 7cf9c6e..6206648 100644
10510--- a/arch/sparc/kernel/Makefile
10511+++ b/arch/sparc/kernel/Makefile
10512@@ -4,7 +4,7 @@
10513 #
10514
10515 asflags-y := -ansi
10516-ccflags-y := -Werror
10517+#ccflags-y := -Werror
10518
10519 extra-y := head_$(BITS).o
10520
10521diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10522index 50e7b62..79fae35 100644
10523--- a/arch/sparc/kernel/process_32.c
10524+++ b/arch/sparc/kernel/process_32.c
10525@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10526
10527 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10528 r->psr, r->pc, r->npc, r->y, print_tainted());
10529- printk("PC: <%pS>\n", (void *) r->pc);
10530+ printk("PC: <%pA>\n", (void *) r->pc);
10531 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10532 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10533 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10534 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10535 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10536 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10537- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10538+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10539
10540 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10541 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10542@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10543 rw = (struct reg_window32 *) fp;
10544 pc = rw->ins[7];
10545 printk("[%08lx : ", pc);
10546- printk("%pS ] ", (void *) pc);
10547+ printk("%pA ] ", (void *) pc);
10548 fp = rw->ins[6];
10549 } while (++count < 16);
10550 printk("\n");
10551diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10552index 46a5964..a35c62c 100644
10553--- a/arch/sparc/kernel/process_64.c
10554+++ b/arch/sparc/kernel/process_64.c
10555@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10556 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10557 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10558 if (regs->tstate & TSTATE_PRIV)
10559- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10560+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10561 }
10562
10563 void show_regs(struct pt_regs *regs)
10564@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10565
10566 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10567 regs->tpc, regs->tnpc, regs->y, print_tainted());
10568- printk("TPC: <%pS>\n", (void *) regs->tpc);
10569+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10570 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10571 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10572 regs->u_regs[3]);
10573@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10574 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10575 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10576 regs->u_regs[15]);
10577- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10578+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10579 show_regwindow(regs);
10580 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10581 }
10582@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10583 ((tp && tp->task) ? tp->task->pid : -1));
10584
10585 if (gp->tstate & TSTATE_PRIV) {
10586- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10587+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10588 (void *) gp->tpc,
10589 (void *) gp->o7,
10590 (void *) gp->i7,
10591diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10592index 79cc0d1..ec62734 100644
10593--- a/arch/sparc/kernel/prom_common.c
10594+++ b/arch/sparc/kernel/prom_common.c
10595@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10596
10597 unsigned int prom_early_allocated __initdata;
10598
10599-static struct of_pdt_ops prom_sparc_ops __initdata = {
10600+static struct of_pdt_ops prom_sparc_ops __initconst = {
10601 .nextprop = prom_common_nextprop,
10602 .getproplen = prom_getproplen,
10603 .getproperty = prom_getproperty,
10604diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10605index 9ddc492..27a5619 100644
10606--- a/arch/sparc/kernel/ptrace_64.c
10607+++ b/arch/sparc/kernel/ptrace_64.c
10608@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10609 return ret;
10610 }
10611
10612+#ifdef CONFIG_GRKERNSEC_SETXID
10613+extern void gr_delayed_cred_worker(void);
10614+#endif
10615+
10616 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10617 {
10618 int ret = 0;
10619@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10620 if (test_thread_flag(TIF_NOHZ))
10621 user_exit();
10622
10623+#ifdef CONFIG_GRKERNSEC_SETXID
10624+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10625+ gr_delayed_cred_worker();
10626+#endif
10627+
10628 if (test_thread_flag(TIF_SYSCALL_TRACE))
10629 ret = tracehook_report_syscall_entry(regs);
10630
10631@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10632 if (test_thread_flag(TIF_NOHZ))
10633 user_exit();
10634
10635+#ifdef CONFIG_GRKERNSEC_SETXID
10636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10637+ gr_delayed_cred_worker();
10638+#endif
10639+
10640 audit_syscall_exit(regs);
10641
10642 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10643diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10644index 61139d9..c1a5f28 100644
10645--- a/arch/sparc/kernel/smp_64.c
10646+++ b/arch/sparc/kernel/smp_64.c
10647@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10648 return;
10649
10650 #ifdef CONFIG_DEBUG_DCFLUSH
10651- atomic_inc(&dcpage_flushes);
10652+ atomic_inc_unchecked(&dcpage_flushes);
10653 #endif
10654
10655 this_cpu = get_cpu();
10656@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10657 xcall_deliver(data0, __pa(pg_addr),
10658 (u64) pg_addr, cpumask_of(cpu));
10659 #ifdef CONFIG_DEBUG_DCFLUSH
10660- atomic_inc(&dcpage_flushes_xcall);
10661+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10662 #endif
10663 }
10664 }
10665@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10666 preempt_disable();
10667
10668 #ifdef CONFIG_DEBUG_DCFLUSH
10669- atomic_inc(&dcpage_flushes);
10670+ atomic_inc_unchecked(&dcpage_flushes);
10671 #endif
10672 data0 = 0;
10673 pg_addr = page_address(page);
10674@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10675 xcall_deliver(data0, __pa(pg_addr),
10676 (u64) pg_addr, cpu_online_mask);
10677 #ifdef CONFIG_DEBUG_DCFLUSH
10678- atomic_inc(&dcpage_flushes_xcall);
10679+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10680 #endif
10681 }
10682 __local_flush_dcache_page(page);
10683diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10684index 646988d..b88905f 100644
10685--- a/arch/sparc/kernel/sys_sparc_32.c
10686+++ b/arch/sparc/kernel/sys_sparc_32.c
10687@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10688 if (len > TASK_SIZE - PAGE_SIZE)
10689 return -ENOMEM;
10690 if (!addr)
10691- addr = TASK_UNMAPPED_BASE;
10692+ addr = current->mm->mmap_base;
10693
10694 info.flags = 0;
10695 info.length = len;
10696diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10697index 30e7ddb..266a3b0 100644
10698--- a/arch/sparc/kernel/sys_sparc_64.c
10699+++ b/arch/sparc/kernel/sys_sparc_64.c
10700@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 struct vm_area_struct * vma;
10702 unsigned long task_size = TASK_SIZE;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 if (flags & MAP_FIXED) {
10708 /* We do not accept a shared mapping if it would violate
10709 * cache aliasing constraints.
10710 */
10711- if ((flags & MAP_SHARED) &&
10712+ if ((filp || (flags & MAP_SHARED)) &&
10713 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10714 return -EINVAL;
10715 return addr;
10716@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10717 if (filp || (flags & MAP_SHARED))
10718 do_color_align = 1;
10719
10720+#ifdef CONFIG_PAX_RANDMMAP
10721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10722+#endif
10723+
10724 if (addr) {
10725 if (do_color_align)
10726 addr = COLOR_ALIGN(addr, pgoff);
10727@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10728 addr = PAGE_ALIGN(addr);
10729
10730 vma = find_vma(mm, addr);
10731- if (task_size - len >= addr &&
10732- (!vma || addr + len <= vma->vm_start))
10733+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10734 return addr;
10735 }
10736
10737 info.flags = 0;
10738 info.length = len;
10739- info.low_limit = TASK_UNMAPPED_BASE;
10740+ info.low_limit = mm->mmap_base;
10741 info.high_limit = min(task_size, VA_EXCLUDE_START);
10742 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10743 info.align_offset = pgoff << PAGE_SHIFT;
10744+ info.threadstack_offset = offset;
10745 addr = vm_unmapped_area(&info);
10746
10747 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10748 VM_BUG_ON(addr != -ENOMEM);
10749 info.low_limit = VA_EXCLUDE_END;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = task_size;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10760 unsigned long task_size = STACK_TOP32;
10761 unsigned long addr = addr0;
10762 int do_color_align;
10763+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10764 struct vm_unmapped_area_info info;
10765
10766 /* This should only ever run for 32-bit processes. */
10767@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10768 /* We do not accept a shared mapping if it would violate
10769 * cache aliasing constraints.
10770 */
10771- if ((flags & MAP_SHARED) &&
10772+ if ((filp || (flags & MAP_SHARED)) &&
10773 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10774 return -EINVAL;
10775 return addr;
10776@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 if (filp || (flags & MAP_SHARED))
10778 do_color_align = 1;
10779
10780+#ifdef CONFIG_PAX_RANDMMAP
10781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10782+#endif
10783+
10784 /* requesting a specific address */
10785 if (addr) {
10786 if (do_color_align)
10787@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10788 addr = PAGE_ALIGN(addr);
10789
10790 vma = find_vma(mm, addr);
10791- if (task_size - len >= addr &&
10792- (!vma || addr + len <= vma->vm_start))
10793+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10794 return addr;
10795 }
10796
10797@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10798 info.high_limit = mm->mmap_base;
10799 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10800 info.align_offset = pgoff << PAGE_SHIFT;
10801+ info.threadstack_offset = offset;
10802 addr = vm_unmapped_area(&info);
10803
10804 /*
10805@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10806 VM_BUG_ON(addr != -ENOMEM);
10807 info.flags = 0;
10808 info.low_limit = TASK_UNMAPPED_BASE;
10809+
10810+#ifdef CONFIG_PAX_RANDMMAP
10811+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10812+ info.low_limit += mm->delta_mmap;
10813+#endif
10814+
10815 info.high_limit = STACK_TOP32;
10816 addr = vm_unmapped_area(&info);
10817 }
10818@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10819 EXPORT_SYMBOL(get_fb_unmapped_area);
10820
10821 /* Essentially the same as PowerPC. */
10822-static unsigned long mmap_rnd(void)
10823+static unsigned long mmap_rnd(struct mm_struct *mm)
10824 {
10825 unsigned long rnd = 0UL;
10826
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10829+#endif
10830+
10831 if (current->flags & PF_RANDOMIZE) {
10832 unsigned long val = get_random_int();
10833 if (test_thread_flag(TIF_32BIT))
10834@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10835
10836 void arch_pick_mmap_layout(struct mm_struct *mm)
10837 {
10838- unsigned long random_factor = mmap_rnd();
10839+ unsigned long random_factor = mmap_rnd(mm);
10840 unsigned long gap;
10841
10842 /*
10843@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10844 gap == RLIM_INFINITY ||
10845 sysctl_legacy_va_layout) {
10846 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10847+
10848+#ifdef CONFIG_PAX_RANDMMAP
10849+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10850+ mm->mmap_base += mm->delta_mmap;
10851+#endif
10852+
10853 mm->get_unmapped_area = arch_get_unmapped_area;
10854 } else {
10855 /* We know it's 32-bit */
10856@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap = (task_size / 6 * 5);
10858
10859 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10867 }
10868 }
10869diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10870index bb00089..e0ea580 100644
10871--- a/arch/sparc/kernel/syscalls.S
10872+++ b/arch/sparc/kernel/syscalls.S
10873@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10874 #endif
10875 .align 32
10876 1: ldx [%g6 + TI_FLAGS], %l5
10877- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10878+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10879 be,pt %icc, rtrap
10880 nop
10881 call syscall_trace_leave
10882@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10883
10884 srl %i3, 0, %o3 ! IEU0
10885 srl %i2, 0, %o2 ! IEU0 Group
10886- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10887+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10888 bne,pn %icc, linux_syscall_trace32 ! CTI
10889 mov %i0, %l5 ! IEU1
10890 5: call %l7 ! CTI Group brk forced
10891@@ -218,7 +218,7 @@ linux_sparc_syscall:
10892
10893 mov %i3, %o3 ! IEU1
10894 mov %i4, %o4 ! IEU0 Group
10895- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10896+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10897 bne,pn %icc, linux_syscall_trace ! CTI Group
10898 mov %i0, %l5 ! IEU0
10899 2: call %l7 ! CTI Group brk forced
10900@@ -233,7 +233,7 @@ ret_sys_call:
10901
10902 cmp %o0, -ERESTART_RESTARTBLOCK
10903 bgeu,pn %xcc, 1f
10904- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10905+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10906 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10907
10908 2:
10909diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10910index 6fd386c5..6907d81 100644
10911--- a/arch/sparc/kernel/traps_32.c
10912+++ b/arch/sparc/kernel/traps_32.c
10913@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10914 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10915 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10916
10917+extern void gr_handle_kernel_exploit(void);
10918+
10919 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10920 {
10921 static int die_counter;
10922@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10923 count++ < 30 &&
10924 (((unsigned long) rw) >= PAGE_OFFSET) &&
10925 !(((unsigned long) rw) & 0x7)) {
10926- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10927+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10928 (void *) rw->ins[7]);
10929 rw = (struct reg_window32 *)rw->ins[6];
10930 }
10931 }
10932 printk("Instruction DUMP:");
10933 instruction_dump ((unsigned long *) regs->pc);
10934- if(regs->psr & PSR_PS)
10935+ if(regs->psr & PSR_PS) {
10936+ gr_handle_kernel_exploit();
10937 do_exit(SIGKILL);
10938+ }
10939 do_exit(SIGSEGV);
10940 }
10941
10942diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10943index 0e69974..0c15a6e 100644
10944--- a/arch/sparc/kernel/traps_64.c
10945+++ b/arch/sparc/kernel/traps_64.c
10946@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10947 i + 1,
10948 p->trapstack[i].tstate, p->trapstack[i].tpc,
10949 p->trapstack[i].tnpc, p->trapstack[i].tt);
10950- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10951+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10952 }
10953 }
10954
10955@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10956
10957 lvl -= 0x100;
10958 if (regs->tstate & TSTATE_PRIV) {
10959+
10960+#ifdef CONFIG_PAX_REFCOUNT
10961+ if (lvl == 6)
10962+ pax_report_refcount_overflow(regs);
10963+#endif
10964+
10965 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10966 die_if_kernel(buffer, regs);
10967 }
10968@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10970 {
10971 char buffer[32];
10972-
10973+
10974 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10975 0, lvl, SIGTRAP) == NOTIFY_STOP)
10976 return;
10977
10978+#ifdef CONFIG_PAX_REFCOUNT
10979+ if (lvl == 6)
10980+ pax_report_refcount_overflow(regs);
10981+#endif
10982+
10983 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10984
10985 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10986@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10987 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10988 printk("%s" "ERROR(%d): ",
10989 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10990- printk("TPC<%pS>\n", (void *) regs->tpc);
10991+ printk("TPC<%pA>\n", (void *) regs->tpc);
10992 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10993 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10994 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10995@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10996 smp_processor_id(),
10997 (type & 0x1) ? 'I' : 'D',
10998 regs->tpc);
10999- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11000+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11001 panic("Irrecoverable Cheetah+ parity error.");
11002 }
11003
11004@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11005 smp_processor_id(),
11006 (type & 0x1) ? 'I' : 'D',
11007 regs->tpc);
11008- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11009+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11010 }
11011
11012 struct sun4v_error_entry {
11013@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11014 /*0x38*/u64 reserved_5;
11015 };
11016
11017-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11018-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11019+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021
11022 static const char *sun4v_err_type_to_str(u8 type)
11023 {
11024@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11025 }
11026
11027 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11028- int cpu, const char *pfx, atomic_t *ocnt)
11029+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11030 {
11031 u64 *raw_ptr = (u64 *) ent;
11032 u32 attrs;
11033@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11034
11035 show_regs(regs);
11036
11037- if ((cnt = atomic_read(ocnt)) != 0) {
11038- atomic_set(ocnt, 0);
11039+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11040+ atomic_set_unchecked(ocnt, 0);
11041 wmb();
11042 printk("%s: Queue overflowed %d times.\n",
11043 pfx, cnt);
11044@@ -2048,7 +2059,7 @@ out:
11045 */
11046 void sun4v_resum_overflow(struct pt_regs *regs)
11047 {
11048- atomic_inc(&sun4v_resum_oflow_cnt);
11049+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11050 }
11051
11052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11053@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11054 /* XXX Actually even this can make not that much sense. Perhaps
11055 * XXX we should just pull the plug and panic directly from here?
11056 */
11057- atomic_inc(&sun4v_nonresum_oflow_cnt);
11058+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11059 }
11060
11061 static void sun4v_tlb_error(struct pt_regs *regs)
11062@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11063
11064 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11065 regs->tpc, tl);
11066- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11067+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11068 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11069- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11070+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11071 (void *) regs->u_regs[UREG_I7]);
11072 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11073 "pte[%lx] error[%lx]\n",
11074@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11075
11076 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11077 regs->tpc, tl);
11078- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11079+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11080 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11081- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11082+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11083 (void *) regs->u_regs[UREG_I7]);
11084 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11085 "pte[%lx] error[%lx]\n",
11086@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11087 fp = (unsigned long)sf->fp + STACK_BIAS;
11088 }
11089
11090- printk(" [%016lx] %pS\n", pc, (void *) pc);
11091+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11093 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11094 int index = tsk->curr_ret_stack;
11095 if (tsk->ret_stack && index >= graph) {
11096 pc = tsk->ret_stack[index - graph].ret;
11097- printk(" [%016lx] %pS\n", pc, (void *) pc);
11098+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11099 graph++;
11100 }
11101 }
11102@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11103 return (struct reg_window *) (fp + STACK_BIAS);
11104 }
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 while (rw &&
11113 count++ < 30 &&
11114 kstack_valid(tp, (unsigned long) rw)) {
11115- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118
11119 rw = kernel_stack_up(rw);
11120@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11121 }
11122 if (panic_on_oops)
11123 panic("Fatal exception");
11124- if (regs->tstate & TSTATE_PRIV)
11125+ if (regs->tstate & TSTATE_PRIV) {
11126+ gr_handle_kernel_exploit();
11127 do_exit(SIGKILL);
11128+ }
11129 do_exit(SIGSEGV);
11130 }
11131 EXPORT_SYMBOL(die_if_kernel);
11132diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11133index 62098a8..547ab2c 100644
11134--- a/arch/sparc/kernel/unaligned_64.c
11135+++ b/arch/sparc/kernel/unaligned_64.c
11136@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11137 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11138
11139 if (__ratelimit(&ratelimit)) {
11140- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11141+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11142 regs->tpc, (void *) regs->tpc);
11143 }
11144 }
11145diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11146index 3269b02..64f5231 100644
11147--- a/arch/sparc/lib/Makefile
11148+++ b/arch/sparc/lib/Makefile
11149@@ -2,7 +2,7 @@
11150 #
11151
11152 asflags-y := -ansi -DST_DIV0=0x02
11153-ccflags-y := -Werror
11154+#ccflags-y := -Werror
11155
11156 lib-$(CONFIG_SPARC32) += ashrdi3.o
11157 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11158diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11159index 05dac43..76f8ed4 100644
11160--- a/arch/sparc/lib/atomic_64.S
11161+++ b/arch/sparc/lib/atomic_64.S
11162@@ -15,11 +15,22 @@
11163 * a value and does the barriers.
11164 */
11165
11166-#define ATOMIC_OP(op) \
11167-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#ifdef CONFIG_PAX_REFCOUNT
11169+#define __REFCOUNT_OP(op) op##cc
11170+#define __OVERFLOW_IOP tvs %icc, 6;
11171+#define __OVERFLOW_XOP tvs %xcc, 6;
11172+#else
11173+#define __REFCOUNT_OP(op) op
11174+#define __OVERFLOW_IOP
11175+#define __OVERFLOW_XOP
11176+#endif
11177+
11178+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11179+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11180 BACKOFF_SETUP(%o2); \
11181 1: lduw [%o1], %g1; \
11182- op %g1, %o0, %g7; \
11183+ asm_op %g1, %o0, %g7; \
11184+ post_op \
11185 cas [%o1], %g1, %g7; \
11186 cmp %g1, %g7; \
11187 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11188@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11189 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11190 ENDPROC(atomic_##op); \
11191
11192-#define ATOMIC_OP_RETURN(op) \
11193-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11194+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11195+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11196+
11197+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11198+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11199 BACKOFF_SETUP(%o2); \
11200 1: lduw [%o1], %g1; \
11201- op %g1, %o0, %g7; \
11202+ asm_op %g1, %o0, %g7; \
11203+ post_op \
11204 cas [%o1], %g1, %g7; \
11205 cmp %g1, %g7; \
11206 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11207@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11209 ENDPROC(atomic_##op##_return);
11210
11211+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11212+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11215
11216 ATOMIC_OPS(add)
11217@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11218
11219 #undef ATOMIC_OPS
11220 #undef ATOMIC_OP_RETURN
11221+#undef __ATOMIC_OP_RETURN
11222 #undef ATOMIC_OP
11223+#undef __ATOMIC_OP
11224
11225-#define ATOMIC64_OP(op) \
11226-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11227+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11228+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11229 BACKOFF_SETUP(%o2); \
11230 1: ldx [%o1], %g1; \
11231- op %g1, %o0, %g7; \
11232+ asm_op %g1, %o0, %g7; \
11233+ post_op \
11234 casx [%o1], %g1, %g7; \
11235 cmp %g1, %g7; \
11236 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11237@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11238 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11239 ENDPROC(atomic64_##op); \
11240
11241-#define ATOMIC64_OP_RETURN(op) \
11242-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11243+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11244+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11245+
11246+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11247+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11248 BACKOFF_SETUP(%o2); \
11249 1: ldx [%o1], %g1; \
11250- op %g1, %o0, %g7; \
11251+ asm_op %g1, %o0, %g7; \
11252+ post_op \
11253 casx [%o1], %g1, %g7; \
11254 cmp %g1, %g7; \
11255 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11256@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11258 ENDPROC(atomic64_##op##_return);
11259
11260+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11261+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11264
11265 ATOMIC64_OPS(add)
11266@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11267
11268 #undef ATOMIC64_OPS
11269 #undef ATOMIC64_OP_RETURN
11270+#undef __ATOMIC64_OP_RETURN
11271 #undef ATOMIC64_OP
11272+#undef __ATOMIC64_OP
11273+#undef __OVERFLOW_XOP
11274+#undef __OVERFLOW_IOP
11275+#undef __REFCOUNT_OP
11276
11277 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11278 BACKOFF_SETUP(%o2)
11279diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11280index 1d649a9..fbc5bfc 100644
11281--- a/arch/sparc/lib/ksyms.c
11282+++ b/arch/sparc/lib/ksyms.c
11283@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11284 /* Atomic counter implementation. */
11285 #define ATOMIC_OP(op) \
11286 EXPORT_SYMBOL(atomic_##op); \
11287-EXPORT_SYMBOL(atomic64_##op);
11288+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11289+EXPORT_SYMBOL(atomic64_##op); \
11290+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11291
11292 #define ATOMIC_OP_RETURN(op) \
11293 EXPORT_SYMBOL(atomic_##op##_return); \
11294@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11295 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11296
11297 ATOMIC_OPS(add)
11298+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11299+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11300 ATOMIC_OPS(sub)
11301
11302 #undef ATOMIC_OPS
11303diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11304index 30c3ecc..736f015 100644
11305--- a/arch/sparc/mm/Makefile
11306+++ b/arch/sparc/mm/Makefile
11307@@ -2,7 +2,7 @@
11308 #
11309
11310 asflags-y := -ansi
11311-ccflags-y := -Werror
11312+#ccflags-y := -Werror
11313
11314 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11315 obj-y += fault_$(BITS).o
11316diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11317index 70d8171..274c6c0 100644
11318--- a/arch/sparc/mm/fault_32.c
11319+++ b/arch/sparc/mm/fault_32.c
11320@@ -21,6 +21,9 @@
11321 #include <linux/perf_event.h>
11322 #include <linux/interrupt.h>
11323 #include <linux/kdebug.h>
11324+#include <linux/slab.h>
11325+#include <linux/pagemap.h>
11326+#include <linux/compiler.h>
11327
11328 #include <asm/page.h>
11329 #include <asm/pgtable.h>
11330@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11331 return safe_compute_effective_address(regs, insn);
11332 }
11333
11334+#ifdef CONFIG_PAX_PAGEEXEC
11335+#ifdef CONFIG_PAX_DLRESOLVE
11336+static void pax_emuplt_close(struct vm_area_struct *vma)
11337+{
11338+ vma->vm_mm->call_dl_resolve = 0UL;
11339+}
11340+
11341+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11342+{
11343+ unsigned int *kaddr;
11344+
11345+ vmf->page = alloc_page(GFP_HIGHUSER);
11346+ if (!vmf->page)
11347+ return VM_FAULT_OOM;
11348+
11349+ kaddr = kmap(vmf->page);
11350+ memset(kaddr, 0, PAGE_SIZE);
11351+ kaddr[0] = 0x9DE3BFA8U; /* save */
11352+ flush_dcache_page(vmf->page);
11353+ kunmap(vmf->page);
11354+ return VM_FAULT_MAJOR;
11355+}
11356+
11357+static const struct vm_operations_struct pax_vm_ops = {
11358+ .close = pax_emuplt_close,
11359+ .fault = pax_emuplt_fault
11360+};
11361+
11362+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11363+{
11364+ int ret;
11365+
11366+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11367+ vma->vm_mm = current->mm;
11368+ vma->vm_start = addr;
11369+ vma->vm_end = addr + PAGE_SIZE;
11370+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11371+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11372+ vma->vm_ops = &pax_vm_ops;
11373+
11374+ ret = insert_vm_struct(current->mm, vma);
11375+ if (ret)
11376+ return ret;
11377+
11378+ ++current->mm->total_vm;
11379+ return 0;
11380+}
11381+#endif
11382+
11383+/*
11384+ * PaX: decide what to do with offenders (regs->pc = fault address)
11385+ *
11386+ * returns 1 when task should be killed
11387+ * 2 when patched PLT trampoline was detected
11388+ * 3 when unpatched PLT trampoline was detected
11389+ */
11390+static int pax_handle_fetch_fault(struct pt_regs *regs)
11391+{
11392+
11393+#ifdef CONFIG_PAX_EMUPLT
11394+ int err;
11395+
11396+ do { /* PaX: patched PLT emulation #1 */
11397+ unsigned int sethi1, sethi2, jmpl;
11398+
11399+ err = get_user(sethi1, (unsigned int *)regs->pc);
11400+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11401+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11402+
11403+ if (err)
11404+ break;
11405+
11406+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11407+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11408+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11409+ {
11410+ unsigned int addr;
11411+
11412+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11413+ addr = regs->u_regs[UREG_G1];
11414+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11415+ regs->pc = addr;
11416+ regs->npc = addr+4;
11417+ return 2;
11418+ }
11419+ } while (0);
11420+
11421+ do { /* PaX: patched PLT emulation #2 */
11422+ unsigned int ba;
11423+
11424+ err = get_user(ba, (unsigned int *)regs->pc);
11425+
11426+ if (err)
11427+ break;
11428+
11429+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11430+ unsigned int addr;
11431+
11432+ if ((ba & 0xFFC00000U) == 0x30800000U)
11433+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11434+ else
11435+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11436+ regs->pc = addr;
11437+ regs->npc = addr+4;
11438+ return 2;
11439+ }
11440+ } while (0);
11441+
11442+ do { /* PaX: patched PLT emulation #3 */
11443+ unsigned int sethi, bajmpl, nop;
11444+
11445+ err = get_user(sethi, (unsigned int *)regs->pc);
11446+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11447+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11448+
11449+ if (err)
11450+ break;
11451+
11452+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11453+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11454+ nop == 0x01000000U)
11455+ {
11456+ unsigned int addr;
11457+
11458+ addr = (sethi & 0x003FFFFFU) << 10;
11459+ regs->u_regs[UREG_G1] = addr;
11460+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11461+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11462+ else
11463+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11464+ regs->pc = addr;
11465+ regs->npc = addr+4;
11466+ return 2;
11467+ }
11468+ } while (0);
11469+
11470+ do { /* PaX: unpatched PLT emulation step 1 */
11471+ unsigned int sethi, ba, nop;
11472+
11473+ err = get_user(sethi, (unsigned int *)regs->pc);
11474+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11475+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11476+
11477+ if (err)
11478+ break;
11479+
11480+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11481+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11482+ nop == 0x01000000U)
11483+ {
11484+ unsigned int addr, save, call;
11485+
11486+ if ((ba & 0xFFC00000U) == 0x30800000U)
11487+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11488+ else
11489+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11490+
11491+ err = get_user(save, (unsigned int *)addr);
11492+ err |= get_user(call, (unsigned int *)(addr+4));
11493+ err |= get_user(nop, (unsigned int *)(addr+8));
11494+ if (err)
11495+ break;
11496+
11497+#ifdef CONFIG_PAX_DLRESOLVE
11498+ if (save == 0x9DE3BFA8U &&
11499+ (call & 0xC0000000U) == 0x40000000U &&
11500+ nop == 0x01000000U)
11501+ {
11502+ struct vm_area_struct *vma;
11503+ unsigned long call_dl_resolve;
11504+
11505+ down_read(&current->mm->mmap_sem);
11506+ call_dl_resolve = current->mm->call_dl_resolve;
11507+ up_read(&current->mm->mmap_sem);
11508+ if (likely(call_dl_resolve))
11509+ goto emulate;
11510+
11511+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11512+
11513+ down_write(&current->mm->mmap_sem);
11514+ if (current->mm->call_dl_resolve) {
11515+ call_dl_resolve = current->mm->call_dl_resolve;
11516+ up_write(&current->mm->mmap_sem);
11517+ if (vma)
11518+ kmem_cache_free(vm_area_cachep, vma);
11519+ goto emulate;
11520+ }
11521+
11522+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11523+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11524+ up_write(&current->mm->mmap_sem);
11525+ if (vma)
11526+ kmem_cache_free(vm_area_cachep, vma);
11527+ return 1;
11528+ }
11529+
11530+ if (pax_insert_vma(vma, call_dl_resolve)) {
11531+ up_write(&current->mm->mmap_sem);
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ return 1;
11534+ }
11535+
11536+ current->mm->call_dl_resolve = call_dl_resolve;
11537+ up_write(&current->mm->mmap_sem);
11538+
11539+emulate:
11540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11541+ regs->pc = call_dl_resolve;
11542+ regs->npc = addr+4;
11543+ return 3;
11544+ }
11545+#endif
11546+
11547+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11548+ if ((save & 0xFFC00000U) == 0x05000000U &&
11549+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11550+ nop == 0x01000000U)
11551+ {
11552+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11553+ regs->u_regs[UREG_G2] = addr + 4;
11554+ addr = (save & 0x003FFFFFU) << 10;
11555+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11556+ regs->pc = addr;
11557+ regs->npc = addr+4;
11558+ return 3;
11559+ }
11560+ }
11561+ } while (0);
11562+
11563+ do { /* PaX: unpatched PLT emulation step 2 */
11564+ unsigned int save, call, nop;
11565+
11566+ err = get_user(save, (unsigned int *)(regs->pc-4));
11567+ err |= get_user(call, (unsigned int *)regs->pc);
11568+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11569+ if (err)
11570+ break;
11571+
11572+ if (save == 0x9DE3BFA8U &&
11573+ (call & 0xC0000000U) == 0x40000000U &&
11574+ nop == 0x01000000U)
11575+ {
11576+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11577+
11578+ regs->u_regs[UREG_RETPC] = regs->pc;
11579+ regs->pc = dl_resolve;
11580+ regs->npc = dl_resolve+4;
11581+ return 3;
11582+ }
11583+ } while (0);
11584+#endif
11585+
11586+ return 1;
11587+}
11588+
11589+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11590+{
11591+ unsigned long i;
11592+
11593+ printk(KERN_ERR "PAX: bytes at PC: ");
11594+ for (i = 0; i < 8; i++) {
11595+ unsigned int c;
11596+ if (get_user(c, (unsigned int *)pc+i))
11597+ printk(KERN_CONT "???????? ");
11598+ else
11599+ printk(KERN_CONT "%08x ", c);
11600+ }
11601+ printk("\n");
11602+}
11603+#endif
11604+
11605 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11606 int text_fault)
11607 {
11608@@ -226,6 +500,24 @@ good_area:
11609 if (!(vma->vm_flags & VM_WRITE))
11610 goto bad_area;
11611 } else {
11612+
11613+#ifdef CONFIG_PAX_PAGEEXEC
11614+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11615+ up_read(&mm->mmap_sem);
11616+ switch (pax_handle_fetch_fault(regs)) {
11617+
11618+#ifdef CONFIG_PAX_EMUPLT
11619+ case 2:
11620+ case 3:
11621+ return;
11622+#endif
11623+
11624+ }
11625+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11626+ do_group_exit(SIGKILL);
11627+ }
11628+#endif
11629+
11630 /* Allow reads even for write-only mappings */
11631 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11632 goto bad_area;
11633diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11634index 4798232..f76e3aa 100644
11635--- a/arch/sparc/mm/fault_64.c
11636+++ b/arch/sparc/mm/fault_64.c
11637@@ -22,6 +22,9 @@
11638 #include <linux/kdebug.h>
11639 #include <linux/percpu.h>
11640 #include <linux/context_tracking.h>
11641+#include <linux/slab.h>
11642+#include <linux/pagemap.h>
11643+#include <linux/compiler.h>
11644
11645 #include <asm/page.h>
11646 #include <asm/pgtable.h>
11647@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11648 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11649 regs->tpc);
11650 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11651- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11652+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11653 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11654 dump_stack();
11655 unhandled_fault(regs->tpc, current, regs);
11656@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11657 show_regs(regs);
11658 }
11659
11660+#ifdef CONFIG_PAX_PAGEEXEC
11661+#ifdef CONFIG_PAX_DLRESOLVE
11662+static void pax_emuplt_close(struct vm_area_struct *vma)
11663+{
11664+ vma->vm_mm->call_dl_resolve = 0UL;
11665+}
11666+
11667+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11668+{
11669+ unsigned int *kaddr;
11670+
11671+ vmf->page = alloc_page(GFP_HIGHUSER);
11672+ if (!vmf->page)
11673+ return VM_FAULT_OOM;
11674+
11675+ kaddr = kmap(vmf->page);
11676+ memset(kaddr, 0, PAGE_SIZE);
11677+ kaddr[0] = 0x9DE3BFA8U; /* save */
11678+ flush_dcache_page(vmf->page);
11679+ kunmap(vmf->page);
11680+ return VM_FAULT_MAJOR;
11681+}
11682+
11683+static const struct vm_operations_struct pax_vm_ops = {
11684+ .close = pax_emuplt_close,
11685+ .fault = pax_emuplt_fault
11686+};
11687+
11688+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11689+{
11690+ int ret;
11691+
11692+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11693+ vma->vm_mm = current->mm;
11694+ vma->vm_start = addr;
11695+ vma->vm_end = addr + PAGE_SIZE;
11696+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11697+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11698+ vma->vm_ops = &pax_vm_ops;
11699+
11700+ ret = insert_vm_struct(current->mm, vma);
11701+ if (ret)
11702+ return ret;
11703+
11704+ ++current->mm->total_vm;
11705+ return 0;
11706+}
11707+#endif
11708+
11709+/*
11710+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11711+ *
11712+ * returns 1 when task should be killed
11713+ * 2 when patched PLT trampoline was detected
11714+ * 3 when unpatched PLT trampoline was detected
11715+ */
11716+static int pax_handle_fetch_fault(struct pt_regs *regs)
11717+{
11718+
11719+#ifdef CONFIG_PAX_EMUPLT
11720+ int err;
11721+
11722+ do { /* PaX: patched PLT emulation #1 */
11723+ unsigned int sethi1, sethi2, jmpl;
11724+
11725+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11726+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11727+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11728+
11729+ if (err)
11730+ break;
11731+
11732+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11733+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11734+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11735+ {
11736+ unsigned long addr;
11737+
11738+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11739+ addr = regs->u_regs[UREG_G1];
11740+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11741+
11742+ if (test_thread_flag(TIF_32BIT))
11743+ addr &= 0xFFFFFFFFUL;
11744+
11745+ regs->tpc = addr;
11746+ regs->tnpc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: patched PLT emulation #2 */
11752+ unsigned int ba;
11753+
11754+ err = get_user(ba, (unsigned int *)regs->tpc);
11755+
11756+ if (err)
11757+ break;
11758+
11759+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11760+ unsigned long addr;
11761+
11762+ if ((ba & 0xFFC00000U) == 0x30800000U)
11763+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11764+ else
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11766+
11767+ if (test_thread_flag(TIF_32BIT))
11768+ addr &= 0xFFFFFFFFUL;
11769+
11770+ regs->tpc = addr;
11771+ regs->tnpc = addr+4;
11772+ return 2;
11773+ }
11774+ } while (0);
11775+
11776+ do { /* PaX: patched PLT emulation #3 */
11777+ unsigned int sethi, bajmpl, nop;
11778+
11779+ err = get_user(sethi, (unsigned int *)regs->tpc);
11780+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11781+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11782+
11783+ if (err)
11784+ break;
11785+
11786+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11787+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11788+ nop == 0x01000000U)
11789+ {
11790+ unsigned long addr;
11791+
11792+ addr = (sethi & 0x003FFFFFU) << 10;
11793+ regs->u_regs[UREG_G1] = addr;
11794+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11795+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11796+ else
11797+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11798+
11799+ if (test_thread_flag(TIF_32BIT))
11800+ addr &= 0xFFFFFFFFUL;
11801+
11802+ regs->tpc = addr;
11803+ regs->tnpc = addr+4;
11804+ return 2;
11805+ }
11806+ } while (0);
11807+
11808+ do { /* PaX: patched PLT emulation #4 */
11809+ unsigned int sethi, mov1, call, mov2;
11810+
11811+ err = get_user(sethi, (unsigned int *)regs->tpc);
11812+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11814+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11815+
11816+ if (err)
11817+ break;
11818+
11819+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11820+ mov1 == 0x8210000FU &&
11821+ (call & 0xC0000000U) == 0x40000000U &&
11822+ mov2 == 0x9E100001U)
11823+ {
11824+ unsigned long addr;
11825+
11826+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11827+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11828+
11829+ if (test_thread_flag(TIF_32BIT))
11830+ addr &= 0xFFFFFFFFUL;
11831+
11832+ regs->tpc = addr;
11833+ regs->tnpc = addr+4;
11834+ return 2;
11835+ }
11836+ } while (0);
11837+
11838+ do { /* PaX: patched PLT emulation #5 */
11839+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11840+
11841+ err = get_user(sethi, (unsigned int *)regs->tpc);
11842+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11843+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11844+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11845+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11846+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11848+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11849+
11850+ if (err)
11851+ break;
11852+
11853+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11854+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11855+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11856+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11857+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11858+ sllx == 0x83287020U &&
11859+ jmpl == 0x81C04005U &&
11860+ nop == 0x01000000U)
11861+ {
11862+ unsigned long addr;
11863+
11864+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11865+ regs->u_regs[UREG_G1] <<= 32;
11866+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11867+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11868+ regs->tpc = addr;
11869+ regs->tnpc = addr+4;
11870+ return 2;
11871+ }
11872+ } while (0);
11873+
11874+ do { /* PaX: patched PLT emulation #6 */
11875+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11876+
11877+ err = get_user(sethi, (unsigned int *)regs->tpc);
11878+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11881+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11882+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11883+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11884+
11885+ if (err)
11886+ break;
11887+
11888+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11889+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11890+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11891+ sllx == 0x83287020U &&
11892+ (or & 0xFFFFE000U) == 0x8A116000U &&
11893+ jmpl == 0x81C04005U &&
11894+ nop == 0x01000000U)
11895+ {
11896+ unsigned long addr;
11897+
11898+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11899+ regs->u_regs[UREG_G1] <<= 32;
11900+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11901+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11902+ regs->tpc = addr;
11903+ regs->tnpc = addr+4;
11904+ return 2;
11905+ }
11906+ } while (0);
11907+
11908+ do { /* PaX: unpatched PLT emulation step 1 */
11909+ unsigned int sethi, ba, nop;
11910+
11911+ err = get_user(sethi, (unsigned int *)regs->tpc);
11912+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11913+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11914+
11915+ if (err)
11916+ break;
11917+
11918+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11919+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11920+ nop == 0x01000000U)
11921+ {
11922+ unsigned long addr;
11923+ unsigned int save, call;
11924+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11925+
11926+ if ((ba & 0xFFC00000U) == 0x30800000U)
11927+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11928+ else
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11930+
11931+ if (test_thread_flag(TIF_32BIT))
11932+ addr &= 0xFFFFFFFFUL;
11933+
11934+ err = get_user(save, (unsigned int *)addr);
11935+ err |= get_user(call, (unsigned int *)(addr+4));
11936+ err |= get_user(nop, (unsigned int *)(addr+8));
11937+ if (err)
11938+ break;
11939+
11940+#ifdef CONFIG_PAX_DLRESOLVE
11941+ if (save == 0x9DE3BFA8U &&
11942+ (call & 0xC0000000U) == 0x40000000U &&
11943+ nop == 0x01000000U)
11944+ {
11945+ struct vm_area_struct *vma;
11946+ unsigned long call_dl_resolve;
11947+
11948+ down_read(&current->mm->mmap_sem);
11949+ call_dl_resolve = current->mm->call_dl_resolve;
11950+ up_read(&current->mm->mmap_sem);
11951+ if (likely(call_dl_resolve))
11952+ goto emulate;
11953+
11954+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11955+
11956+ down_write(&current->mm->mmap_sem);
11957+ if (current->mm->call_dl_resolve) {
11958+ call_dl_resolve = current->mm->call_dl_resolve;
11959+ up_write(&current->mm->mmap_sem);
11960+ if (vma)
11961+ kmem_cache_free(vm_area_cachep, vma);
11962+ goto emulate;
11963+ }
11964+
11965+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11966+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11967+ up_write(&current->mm->mmap_sem);
11968+ if (vma)
11969+ kmem_cache_free(vm_area_cachep, vma);
11970+ return 1;
11971+ }
11972+
11973+ if (pax_insert_vma(vma, call_dl_resolve)) {
11974+ up_write(&current->mm->mmap_sem);
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ return 1;
11977+ }
11978+
11979+ current->mm->call_dl_resolve = call_dl_resolve;
11980+ up_write(&current->mm->mmap_sem);
11981+
11982+emulate:
11983+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11984+ regs->tpc = call_dl_resolve;
11985+ regs->tnpc = addr+4;
11986+ return 3;
11987+ }
11988+#endif
11989+
11990+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11991+ if ((save & 0xFFC00000U) == 0x05000000U &&
11992+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11993+ nop == 0x01000000U)
11994+ {
11995+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11996+ regs->u_regs[UREG_G2] = addr + 4;
11997+ addr = (save & 0x003FFFFFU) << 10;
11998+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ addr &= 0xFFFFFFFFUL;
12002+
12003+ regs->tpc = addr;
12004+ regs->tnpc = addr+4;
12005+ return 3;
12006+ }
12007+
12008+ /* PaX: 64-bit PLT stub */
12009+ err = get_user(sethi1, (unsigned int *)addr);
12010+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12011+ err |= get_user(or1, (unsigned int *)(addr+8));
12012+ err |= get_user(or2, (unsigned int *)(addr+12));
12013+ err |= get_user(sllx, (unsigned int *)(addr+16));
12014+ err |= get_user(add, (unsigned int *)(addr+20));
12015+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12016+ err |= get_user(nop, (unsigned int *)(addr+28));
12017+ if (err)
12018+ break;
12019+
12020+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12021+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12022+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12023+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12024+ sllx == 0x89293020U &&
12025+ add == 0x8A010005U &&
12026+ jmpl == 0x89C14000U &&
12027+ nop == 0x01000000U)
12028+ {
12029+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12030+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12031+ regs->u_regs[UREG_G4] <<= 32;
12032+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12033+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12034+ regs->u_regs[UREG_G4] = addr + 24;
12035+ addr = regs->u_regs[UREG_G5];
12036+ regs->tpc = addr;
12037+ regs->tnpc = addr+4;
12038+ return 3;
12039+ }
12040+ }
12041+ } while (0);
12042+
12043+#ifdef CONFIG_PAX_DLRESOLVE
12044+ do { /* PaX: unpatched PLT emulation step 2 */
12045+ unsigned int save, call, nop;
12046+
12047+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12048+ err |= get_user(call, (unsigned int *)regs->tpc);
12049+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12050+ if (err)
12051+ break;
12052+
12053+ if (save == 0x9DE3BFA8U &&
12054+ (call & 0xC0000000U) == 0x40000000U &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12058+
12059+ if (test_thread_flag(TIF_32BIT))
12060+ dl_resolve &= 0xFFFFFFFFUL;
12061+
12062+ regs->u_regs[UREG_RETPC] = regs->tpc;
12063+ regs->tpc = dl_resolve;
12064+ regs->tnpc = dl_resolve+4;
12065+ return 3;
12066+ }
12067+ } while (0);
12068+#endif
12069+
12070+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12071+ unsigned int sethi, ba, nop;
12072+
12073+ err = get_user(sethi, (unsigned int *)regs->tpc);
12074+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12075+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12076+
12077+ if (err)
12078+ break;
12079+
12080+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12081+ (ba & 0xFFF00000U) == 0x30600000U &&
12082+ nop == 0x01000000U)
12083+ {
12084+ unsigned long addr;
12085+
12086+ addr = (sethi & 0x003FFFFFU) << 10;
12087+ regs->u_regs[UREG_G1] = addr;
12088+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12089+
12090+ if (test_thread_flag(TIF_32BIT))
12091+ addr &= 0xFFFFFFFFUL;
12092+
12093+ regs->tpc = addr;
12094+ regs->tnpc = addr+4;
12095+ return 2;
12096+ }
12097+ } while (0);
12098+
12099+#endif
12100+
12101+ return 1;
12102+}
12103+
12104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12105+{
12106+ unsigned long i;
12107+
12108+ printk(KERN_ERR "PAX: bytes at PC: ");
12109+ for (i = 0; i < 8; i++) {
12110+ unsigned int c;
12111+ if (get_user(c, (unsigned int *)pc+i))
12112+ printk(KERN_CONT "???????? ");
12113+ else
12114+ printk(KERN_CONT "%08x ", c);
12115+ }
12116+ printk("\n");
12117+}
12118+#endif
12119+
12120 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12121 {
12122 enum ctx_state prev_state = exception_enter();
12123@@ -353,6 +816,29 @@ retry:
12124 if (!vma)
12125 goto bad_area;
12126
12127+#ifdef CONFIG_PAX_PAGEEXEC
12128+ /* PaX: detect ITLB misses on non-exec pages */
12129+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12130+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12131+ {
12132+ if (address != regs->tpc)
12133+ goto good_area;
12134+
12135+ up_read(&mm->mmap_sem);
12136+ switch (pax_handle_fetch_fault(regs)) {
12137+
12138+#ifdef CONFIG_PAX_EMUPLT
12139+ case 2:
12140+ case 3:
12141+ return;
12142+#endif
12143+
12144+ }
12145+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12146+ do_group_exit(SIGKILL);
12147+ }
12148+#endif
12149+
12150 /* Pure DTLB misses do not tell us whether the fault causing
12151 * load/store/atomic was a write or not, it only says that there
12152 * was no match. So in such a case we (carefully) read the
12153diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12154index 4242eab..9ae6360 100644
12155--- a/arch/sparc/mm/hugetlbpage.c
12156+++ b/arch/sparc/mm/hugetlbpage.c
12157@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12158 unsigned long addr,
12159 unsigned long len,
12160 unsigned long pgoff,
12161- unsigned long flags)
12162+ unsigned long flags,
12163+ unsigned long offset)
12164 {
12165+ struct mm_struct *mm = current->mm;
12166 unsigned long task_size = TASK_SIZE;
12167 struct vm_unmapped_area_info info;
12168
12169@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12170
12171 info.flags = 0;
12172 info.length = len;
12173- info.low_limit = TASK_UNMAPPED_BASE;
12174+ info.low_limit = mm->mmap_base;
12175 info.high_limit = min(task_size, VA_EXCLUDE_START);
12176 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12177 info.align_offset = 0;
12178+ info.threadstack_offset = offset;
12179 addr = vm_unmapped_area(&info);
12180
12181 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12182 VM_BUG_ON(addr != -ENOMEM);
12183 info.low_limit = VA_EXCLUDE_END;
12184+
12185+#ifdef CONFIG_PAX_RANDMMAP
12186+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12187+ info.low_limit += mm->delta_mmap;
12188+#endif
12189+
12190 info.high_limit = task_size;
12191 addr = vm_unmapped_area(&info);
12192 }
12193@@ -55,7 +64,8 @@ static unsigned long
12194 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12195 const unsigned long len,
12196 const unsigned long pgoff,
12197- const unsigned long flags)
12198+ const unsigned long flags,
12199+ const unsigned long offset)
12200 {
12201 struct mm_struct *mm = current->mm;
12202 unsigned long addr = addr0;
12203@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12204 info.high_limit = mm->mmap_base;
12205 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12206 info.align_offset = 0;
12207+ info.threadstack_offset = offset;
12208 addr = vm_unmapped_area(&info);
12209
12210 /*
12211@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 VM_BUG_ON(addr != -ENOMEM);
12213 info.flags = 0;
12214 info.low_limit = TASK_UNMAPPED_BASE;
12215+
12216+#ifdef CONFIG_PAX_RANDMMAP
12217+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12218+ info.low_limit += mm->delta_mmap;
12219+#endif
12220+
12221 info.high_limit = STACK_TOP32;
12222 addr = vm_unmapped_area(&info);
12223 }
12224@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12225 struct mm_struct *mm = current->mm;
12226 struct vm_area_struct *vma;
12227 unsigned long task_size = TASK_SIZE;
12228+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12229
12230 if (test_thread_flag(TIF_32BIT))
12231 task_size = STACK_TOP32;
12232@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12233 return addr;
12234 }
12235
12236+#ifdef CONFIG_PAX_RANDMMAP
12237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12238+#endif
12239+
12240 if (addr) {
12241 addr = ALIGN(addr, HPAGE_SIZE);
12242 vma = find_vma(mm, addr);
12243- if (task_size - len >= addr &&
12244- (!vma || addr + len <= vma->vm_start))
12245+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12246 return addr;
12247 }
12248 if (mm->get_unmapped_area == arch_get_unmapped_area)
12249 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12250- pgoff, flags);
12251+ pgoff, flags, offset);
12252 else
12253 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12254- pgoff, flags);
12255+ pgoff, flags, offset);
12256 }
12257
12258 pte_t *huge_pte_alloc(struct mm_struct *mm,
12259diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12260index 4ca0d6b..e89bca1 100644
12261--- a/arch/sparc/mm/init_64.c
12262+++ b/arch/sparc/mm/init_64.c
12263@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12264 int num_kernel_image_mappings;
12265
12266 #ifdef CONFIG_DEBUG_DCFLUSH
12267-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12268+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12269 #ifdef CONFIG_SMP
12270-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12271+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12272 #endif
12273 #endif
12274
12275@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12276 {
12277 BUG_ON(tlb_type == hypervisor);
12278 #ifdef CONFIG_DEBUG_DCFLUSH
12279- atomic_inc(&dcpage_flushes);
12280+ atomic_inc_unchecked(&dcpage_flushes);
12281 #endif
12282
12283 #ifdef DCACHE_ALIASING_POSSIBLE
12284@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12285
12286 #ifdef CONFIG_DEBUG_DCFLUSH
12287 seq_printf(m, "DCPageFlushes\t: %d\n",
12288- atomic_read(&dcpage_flushes));
12289+ atomic_read_unchecked(&dcpage_flushes));
12290 #ifdef CONFIG_SMP
12291 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12292- atomic_read(&dcpage_flushes_xcall));
12293+ atomic_read_unchecked(&dcpage_flushes_xcall));
12294 #endif /* CONFIG_SMP */
12295 #endif /* CONFIG_DEBUG_DCFLUSH */
12296 }
12297diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12298index 7cca418..53fc030 100644
12299--- a/arch/tile/Kconfig
12300+++ b/arch/tile/Kconfig
12301@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12302
12303 config KEXEC
12304 bool "kexec system call"
12305+ depends on !GRKERNSEC_KMEM
12306 ---help---
12307 kexec is a system call that implements the ability to shutdown your
12308 current kernel, and to start another kernel. It is like a reboot
12309diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12310index 7b11c5f..755a026 100644
12311--- a/arch/tile/include/asm/atomic_64.h
12312+++ b/arch/tile/include/asm/atomic_64.h
12313@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12314
12315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12316
12317+#define atomic64_read_unchecked(v) atomic64_read(v)
12318+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12319+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12320+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12321+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12322+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12323+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12324+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12325+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12326+
12327 /* Define this to indicate that cmpxchg is an efficient operation. */
12328 #define __HAVE_ARCH_CMPXCHG
12329
12330diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12331index 6160761..00cac88 100644
12332--- a/arch/tile/include/asm/cache.h
12333+++ b/arch/tile/include/asm/cache.h
12334@@ -15,11 +15,12 @@
12335 #ifndef _ASM_TILE_CACHE_H
12336 #define _ASM_TILE_CACHE_H
12337
12338+#include <linux/const.h>
12339 #include <arch/chip.h>
12340
12341 /* bytes per L1 data cache line */
12342 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12345
12346 /* bytes per L2 cache line */
12347 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12348diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12349index f41cb53..31d3ab4 100644
12350--- a/arch/tile/include/asm/uaccess.h
12351+++ b/arch/tile/include/asm/uaccess.h
12352@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12353 const void __user *from,
12354 unsigned long n)
12355 {
12356- int sz = __compiletime_object_size(to);
12357+ size_t sz = __compiletime_object_size(to);
12358
12359- if (likely(sz == -1 || sz >= n))
12360+ if (likely(sz == (size_t)-1 || sz >= n))
12361 n = _copy_from_user(to, from, n);
12362 else
12363 copy_from_user_overflow();
12364diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12365index 8416240..a012fb7 100644
12366--- a/arch/tile/mm/hugetlbpage.c
12367+++ b/arch/tile/mm/hugetlbpage.c
12368@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12369 info.high_limit = TASK_SIZE;
12370 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12371 info.align_offset = 0;
12372+ info.threadstack_offset = 0;
12373 return vm_unmapped_area(&info);
12374 }
12375
12376@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12377 info.high_limit = current->mm->mmap_base;
12378 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12379 info.align_offset = 0;
12380+ info.threadstack_offset = 0;
12381 addr = vm_unmapped_area(&info);
12382
12383 /*
12384diff --git a/arch/um/Makefile b/arch/um/Makefile
12385index e4b1a96..16162f8 100644
12386--- a/arch/um/Makefile
12387+++ b/arch/um/Makefile
12388@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12389 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12390 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12391
12392+ifdef CONSTIFY_PLUGIN
12393+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12394+endif
12395+
12396 #This will adjust *FLAGS accordingly to the platform.
12397 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12398
12399diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12400index 19e1bdd..3665b77 100644
12401--- a/arch/um/include/asm/cache.h
12402+++ b/arch/um/include/asm/cache.h
12403@@ -1,6 +1,7 @@
12404 #ifndef __UM_CACHE_H
12405 #define __UM_CACHE_H
12406
12407+#include <linux/const.h>
12408
12409 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12410 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12411@@ -12,6 +13,6 @@
12412 # define L1_CACHE_SHIFT 5
12413 #endif
12414
12415-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12417
12418 #endif
12419diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12420index 2e0a6b1..a64d0f5 100644
12421--- a/arch/um/include/asm/kmap_types.h
12422+++ b/arch/um/include/asm/kmap_types.h
12423@@ -8,6 +8,6 @@
12424
12425 /* No more #include "asm/arch/kmap_types.h" ! */
12426
12427-#define KM_TYPE_NR 14
12428+#define KM_TYPE_NR 15
12429
12430 #endif
12431diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12432index 71c5d13..4c7b9f1 100644
12433--- a/arch/um/include/asm/page.h
12434+++ b/arch/um/include/asm/page.h
12435@@ -14,6 +14,9 @@
12436 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12437 #define PAGE_MASK (~(PAGE_SIZE-1))
12438
12439+#define ktla_ktva(addr) (addr)
12440+#define ktva_ktla(addr) (addr)
12441+
12442 #ifndef __ASSEMBLY__
12443
12444 struct page;
12445diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12446index 2b4274e..754fe06 100644
12447--- a/arch/um/include/asm/pgtable-3level.h
12448+++ b/arch/um/include/asm/pgtable-3level.h
12449@@ -58,6 +58,7 @@
12450 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12451 #define pud_populate(mm, pud, pmd) \
12452 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12453+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12454
12455 #ifdef CONFIG_64BIT
12456 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12457diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12458index f17bca8..48adb87 100644
12459--- a/arch/um/kernel/process.c
12460+++ b/arch/um/kernel/process.c
12461@@ -356,22 +356,6 @@ int singlestepping(void * t)
12462 return 2;
12463 }
12464
12465-/*
12466- * Only x86 and x86_64 have an arch_align_stack().
12467- * All other arches have "#define arch_align_stack(x) (x)"
12468- * in their asm/exec.h
12469- * As this is included in UML from asm-um/system-generic.h,
12470- * we can use it to behave as the subarch does.
12471- */
12472-#ifndef arch_align_stack
12473-unsigned long arch_align_stack(unsigned long sp)
12474-{
12475- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12476- sp -= get_random_int() % 8192;
12477- return sp & ~0xf;
12478-}
12479-#endif
12480-
12481 unsigned long get_wchan(struct task_struct *p)
12482 {
12483 unsigned long stack_page, sp, ip;
12484diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12485index ad8f795..2c7eec6 100644
12486--- a/arch/unicore32/include/asm/cache.h
12487+++ b/arch/unicore32/include/asm/cache.h
12488@@ -12,8 +12,10 @@
12489 #ifndef __UNICORE_CACHE_H__
12490 #define __UNICORE_CACHE_H__
12491
12492-#define L1_CACHE_SHIFT (5)
12493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12494+#include <linux/const.h>
12495+
12496+#define L1_CACHE_SHIFT 5
12497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12498
12499 /*
12500 * Memory returned by kmalloc() may be used for DMA, so we must make
12501diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12502index b7d31ca..9481ec5 100644
12503--- a/arch/x86/Kconfig
12504+++ b/arch/x86/Kconfig
12505@@ -132,7 +132,7 @@ config X86
12506 select RTC_LIB
12507 select HAVE_DEBUG_STACKOVERFLOW
12508 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12509- select HAVE_CC_STACKPROTECTOR
12510+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12511 select GENERIC_CPU_AUTOPROBE
12512 select HAVE_ARCH_AUDITSYSCALL
12513 select ARCH_SUPPORTS_ATOMIC_RMW
12514@@ -266,7 +266,7 @@ config X86_HT
12515
12516 config X86_32_LAZY_GS
12517 def_bool y
12518- depends on X86_32 && !CC_STACKPROTECTOR
12519+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12520
12521 config ARCH_HWEIGHT_CFLAGS
12522 string
12523@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12524
12525 menuconfig HYPERVISOR_GUEST
12526 bool "Linux guest support"
12527+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12528 ---help---
12529 Say Y here to enable options for running Linux under various hyper-
12530 visors. This option enables basic hypervisor detection and platform
12531@@ -1013,6 +1014,7 @@ config VM86
12532
12533 config X86_16BIT
12534 bool "Enable support for 16-bit segments" if EXPERT
12535+ depends on !GRKERNSEC
12536 default y
12537 ---help---
12538 This option is required by programs like Wine to run 16-bit
12539@@ -1186,6 +1188,7 @@ choice
12540
12541 config NOHIGHMEM
12542 bool "off"
12543+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12544 ---help---
12545 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12546 However, the address space of 32-bit x86 processors is only 4
12547@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12548
12549 config HIGHMEM4G
12550 bool "4GB"
12551+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12552 ---help---
12553 Select this if you have a 32-bit processor and between 1 and 4
12554 gigabytes of physical RAM.
12555@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12556 hex
12557 default 0xB0000000 if VMSPLIT_3G_OPT
12558 default 0x80000000 if VMSPLIT_2G
12559- default 0x78000000 if VMSPLIT_2G_OPT
12560+ default 0x70000000 if VMSPLIT_2G_OPT
12561 default 0x40000000 if VMSPLIT_1G
12562 default 0xC0000000
12563 depends on X86_32
12564@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12565
12566 config KEXEC
12567 bool "kexec system call"
12568+ depends on !GRKERNSEC_KMEM
12569 ---help---
12570 kexec is a system call that implements the ability to shutdown your
12571 current kernel, and to start another kernel. It is like a reboot
12572@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12573
12574 config PHYSICAL_ALIGN
12575 hex "Alignment value to which kernel should be aligned"
12576- default "0x200000"
12577+ default "0x1000000"
12578+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12579+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12580 range 0x2000 0x1000000 if X86_32
12581 range 0x200000 0x1000000 if X86_64
12582 ---help---
12583@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12584 def_bool n
12585 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12586 depends on X86_32 || IA32_EMULATION
12587+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12588 ---help---
12589 Certain buggy versions of glibc will crash if they are
12590 presented with a 32-bit vDSO that is not mapped at the address
12591diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12592index 6983314..54ad7e8 100644
12593--- a/arch/x86/Kconfig.cpu
12594+++ b/arch/x86/Kconfig.cpu
12595@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12596
12597 config X86_F00F_BUG
12598 def_bool y
12599- depends on M586MMX || M586TSC || M586 || M486
12600+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12601
12602 config X86_INVD_BUG
12603 def_bool y
12604@@ -327,7 +327,7 @@ config X86_INVD_BUG
12605
12606 config X86_ALIGNMENT_16
12607 def_bool y
12608- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12609+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12610
12611 config X86_INTEL_USERCOPY
12612 def_bool y
12613@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12614 # generates cmov.
12615 config X86_CMOV
12616 def_bool y
12617- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12618+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12619
12620 config X86_MINIMUM_CPU_FAMILY
12621 int
12622diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12623index 20028da..88d5946 100644
12624--- a/arch/x86/Kconfig.debug
12625+++ b/arch/x86/Kconfig.debug
12626@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12627 config DEBUG_RODATA
12628 bool "Write protect kernel read-only data structures"
12629 default y
12630- depends on DEBUG_KERNEL
12631+ depends on DEBUG_KERNEL && BROKEN
12632 ---help---
12633 Mark the kernel read-only data as write-protected in the pagetables,
12634 in order to catch accidental (and incorrect) writes to such const
12635@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12636
12637 config DEBUG_SET_MODULE_RONX
12638 bool "Set loadable kernel module data as NX and text as RO"
12639- depends on MODULES
12640+ depends on MODULES && BROKEN
12641 ---help---
12642 This option helps catch unintended modifications to loadable
12643 kernel module's text and read-only data. It also prevents execution
12644diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12645index 5ba2d9c..41e5bb6 100644
12646--- a/arch/x86/Makefile
12647+++ b/arch/x86/Makefile
12648@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12649 # CPU-specific tuning. Anything which can be shared with UML should go here.
12650 include $(srctree)/arch/x86/Makefile_32.cpu
12651 KBUILD_CFLAGS += $(cflags-y)
12652-
12653- # temporary until string.h is fixed
12654- KBUILD_CFLAGS += -ffreestanding
12655 else
12656 BITS := 64
12657 UTS_MACHINE := x86_64
12658@@ -107,6 +104,9 @@ else
12659 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12660 endif
12661
12662+# temporary until string.h is fixed
12663+KBUILD_CFLAGS += -ffreestanding
12664+
12665 # Make sure compiler does not have buggy stack-protector support.
12666 ifdef CONFIG_CC_STACKPROTECTOR
12667 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12668@@ -181,6 +181,7 @@ archheaders:
12669 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12670
12671 archprepare:
12672+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12673 ifeq ($(CONFIG_KEXEC_FILE),y)
12674 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12675 endif
12676@@ -264,3 +265,9 @@ define archhelp
12677 echo ' FDARGS="..." arguments for the booted kernel'
12678 echo ' FDINITRD=file initrd for the booted kernel'
12679 endef
12680+
12681+define OLD_LD
12682+
12683+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12684+*** Please upgrade your binutils to 2.18 or newer
12685+endef
12686diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12687index 57bbf2f..b100fce 100644
12688--- a/arch/x86/boot/Makefile
12689+++ b/arch/x86/boot/Makefile
12690@@ -58,6 +58,9 @@ clean-files += cpustr.h
12691 # ---------------------------------------------------------------------------
12692
12693 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12694+ifdef CONSTIFY_PLUGIN
12695+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12696+endif
12697 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12698 GCOV_PROFILE := n
12699
12700diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12701index 878e4b9..20537ab 100644
12702--- a/arch/x86/boot/bitops.h
12703+++ b/arch/x86/boot/bitops.h
12704@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12705 u8 v;
12706 const u32 *p = (const u32 *)addr;
12707
12708- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12709+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12710 return v;
12711 }
12712
12713@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12714
12715 static inline void set_bit(int nr, void *addr)
12716 {
12717- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12718+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12719 }
12720
12721 #endif /* BOOT_BITOPS_H */
12722diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12723index bd49ec6..94c7f58 100644
12724--- a/arch/x86/boot/boot.h
12725+++ b/arch/x86/boot/boot.h
12726@@ -84,7 +84,7 @@ static inline void io_delay(void)
12727 static inline u16 ds(void)
12728 {
12729 u16 seg;
12730- asm("movw %%ds,%0" : "=rm" (seg));
12731+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12732 return seg;
12733 }
12734
12735diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12736index 0a291cd..9686efc 100644
12737--- a/arch/x86/boot/compressed/Makefile
12738+++ b/arch/x86/boot/compressed/Makefile
12739@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12740 KBUILD_CFLAGS += -mno-mmx -mno-sse
12741 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12742 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12743+ifdef CONSTIFY_PLUGIN
12744+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12745+endif
12746
12747 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12748 GCOV_PROFILE := n
12749diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12750index a53440e..c3dbf1e 100644
12751--- a/arch/x86/boot/compressed/efi_stub_32.S
12752+++ b/arch/x86/boot/compressed/efi_stub_32.S
12753@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12754 * parameter 2, ..., param n. To make things easy, we save the return
12755 * address of efi_call_phys in a global variable.
12756 */
12757- popl %ecx
12758- movl %ecx, saved_return_addr(%edx)
12759- /* get the function pointer into ECX*/
12760- popl %ecx
12761- movl %ecx, efi_rt_function_ptr(%edx)
12762+ popl saved_return_addr(%edx)
12763+ popl efi_rt_function_ptr(%edx)
12764
12765 /*
12766 * 3. Call the physical function.
12767 */
12768- call *%ecx
12769+ call *efi_rt_function_ptr(%edx)
12770
12771 /*
12772 * 4. Balance the stack. And because EAX contain the return value,
12773@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12774 1: popl %edx
12775 subl $1b, %edx
12776
12777- movl efi_rt_function_ptr(%edx), %ecx
12778- pushl %ecx
12779+ pushl efi_rt_function_ptr(%edx)
12780
12781 /*
12782 * 10. Push the saved return address onto the stack and return.
12783 */
12784- movl saved_return_addr(%edx), %ecx
12785- pushl %ecx
12786- ret
12787+ jmpl *saved_return_addr(%edx)
12788 ENDPROC(efi_call_phys)
12789 .previous
12790
12791diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12792index 630384a..278e788 100644
12793--- a/arch/x86/boot/compressed/efi_thunk_64.S
12794+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12795@@ -189,8 +189,8 @@ efi_gdt64:
12796 .long 0 /* Filled out by user */
12797 .word 0
12798 .quad 0x0000000000000000 /* NULL descriptor */
12799- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12800- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12801+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12802+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12803 .quad 0x0080890000000000 /* TS descriptor */
12804 .quad 0x0000000000000000 /* TS continued */
12805 efi_gdt64_end:
12806diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12807index 1d7fbbc..36ecd58 100644
12808--- a/arch/x86/boot/compressed/head_32.S
12809+++ b/arch/x86/boot/compressed/head_32.S
12810@@ -140,10 +140,10 @@ preferred_addr:
12811 addl %eax, %ebx
12812 notl %eax
12813 andl %eax, %ebx
12814- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12815+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12816 jge 1f
12817 #endif
12818- movl $LOAD_PHYSICAL_ADDR, %ebx
12819+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12820 1:
12821
12822 /* Target address to relocate to for decompression */
12823diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12824index 6b1766c..ad465c9 100644
12825--- a/arch/x86/boot/compressed/head_64.S
12826+++ b/arch/x86/boot/compressed/head_64.S
12827@@ -94,10 +94,10 @@ ENTRY(startup_32)
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840@@ -322,10 +322,10 @@ preferred_addr:
12841 addq %rax, %rbp
12842 notq %rax
12843 andq %rax, %rbp
12844- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12845+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12846 jge 1f
12847 #endif
12848- movq $LOAD_PHYSICAL_ADDR, %rbp
12849+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -434,8 +434,8 @@ gdt:
12854 .long gdt
12855 .word 0
12856 .quad 0x0000000000000000 /* NULL descriptor */
12857- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12858- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12859+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12860+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12861 .quad 0x0080890000000000 /* TS descriptor */
12862 .quad 0x0000000000000000 /* TS continued */
12863 gdt_end:
12864diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12865index a950864..c710239 100644
12866--- a/arch/x86/boot/compressed/misc.c
12867+++ b/arch/x86/boot/compressed/misc.c
12868@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12869 * Calculate the delta between where vmlinux was linked to load
12870 * and where it was actually loaded.
12871 */
12872- delta = min_addr - LOAD_PHYSICAL_ADDR;
12873+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12874 if (!delta) {
12875 debug_putstr("No relocation needed... ");
12876 return;
12877@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12878 Elf32_Ehdr ehdr;
12879 Elf32_Phdr *phdrs, *phdr;
12880 #endif
12881- void *dest;
12882+ void *dest, *prev;
12883 int i;
12884
12885 memcpy(&ehdr, output, sizeof(ehdr));
12886@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12887 case PT_LOAD:
12888 #ifdef CONFIG_RELOCATABLE
12889 dest = output;
12890- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12891+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12892 #else
12893 dest = (void *)(phdr->p_paddr);
12894 #endif
12895 memcpy(dest,
12896 output + phdr->p_offset,
12897 phdr->p_filesz);
12898+ if (i)
12899+ memset(prev, 0xff, dest - prev);
12900+ prev = dest + phdr->p_filesz;
12901 break;
12902 default: /* Ignore other PT_* */ break;
12903 }
12904@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12905 error("Destination address too large");
12906 #endif
12907 #ifndef CONFIG_RELOCATABLE
12908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12910 error("Wrong destination address");
12911 #endif
12912
12913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12914index 1fd7d57..0f7d096 100644
12915--- a/arch/x86/boot/cpucheck.c
12916+++ b/arch/x86/boot/cpucheck.c
12917@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12918 u32 ecx = MSR_K7_HWCR;
12919 u32 eax, edx;
12920
12921- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12922+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12923 eax &= ~(1 << 15);
12924- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12925+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12926
12927 get_cpuflags(); /* Make sure it really did something */
12928 err = check_cpuflags();
12929@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12930 u32 ecx = MSR_VIA_FCR;
12931 u32 eax, edx;
12932
12933- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12934+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935 eax |= (1<<1)|(1<<7);
12936- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12937+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938
12939 set_bit(X86_FEATURE_CX8, cpu.flags);
12940 err = check_cpuflags();
12941@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12942 u32 eax, edx;
12943 u32 level = 1;
12944
12945- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12946- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12947- asm("cpuid"
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12950+ asm volatile("cpuid"
12951 : "+a" (level), "=d" (cpu.flags[0])
12952 : : "ecx", "ebx");
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 err = check_cpuflags();
12957 } else if (err == 0x01 &&
12958diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12959index 16ef025..91e033b 100644
12960--- a/arch/x86/boot/header.S
12961+++ b/arch/x86/boot/header.S
12962@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12963 # single linked list of
12964 # struct setup_data
12965
12966-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12967+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12968
12969 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12970+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12971+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12972+#else
12973 #define VO_INIT_SIZE (VO__end - VO__text)
12974+#endif
12975 #if ZO_INIT_SIZE > VO_INIT_SIZE
12976 #define INIT_SIZE ZO_INIT_SIZE
12977 #else
12978diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12979index db75d07..8e6d0af 100644
12980--- a/arch/x86/boot/memory.c
12981+++ b/arch/x86/boot/memory.c
12982@@ -19,7 +19,7 @@
12983
12984 static int detect_memory_e820(void)
12985 {
12986- int count = 0;
12987+ unsigned int count = 0;
12988 struct biosregs ireg, oreg;
12989 struct e820entry *desc = boot_params.e820_map;
12990 static struct e820entry buf; /* static so it is zeroed */
12991diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12992index ba3e100..6501b8f 100644
12993--- a/arch/x86/boot/video-vesa.c
12994+++ b/arch/x86/boot/video-vesa.c
12995@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12996
12997 boot_params.screen_info.vesapm_seg = oreg.es;
12998 boot_params.screen_info.vesapm_off = oreg.di;
12999+ boot_params.screen_info.vesapm_size = oreg.cx;
13000 }
13001
13002 /*
13003diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13004index 43eda28..5ab5fdb 100644
13005--- a/arch/x86/boot/video.c
13006+++ b/arch/x86/boot/video.c
13007@@ -96,7 +96,7 @@ static void store_mode_params(void)
13008 static unsigned int get_entry(void)
13009 {
13010 char entry_buf[4];
13011- int i, len = 0;
13012+ unsigned int i, len = 0;
13013 int key;
13014 unsigned int v;
13015
13016diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13017index 9105655..41779c1 100644
13018--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13019+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13020@@ -8,6 +8,8 @@
13021 * including this sentence is retained in full.
13022 */
13023
13024+#include <asm/alternative-asm.h>
13025+
13026 .extern crypto_ft_tab
13027 .extern crypto_it_tab
13028 .extern crypto_fl_tab
13029@@ -70,6 +72,8 @@
13030 je B192; \
13031 leaq 32(r9),r9;
13032
13033+#define ret pax_force_retaddr; ret
13034+
13035 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13036 movq r1,r2; \
13037 movq r3,r4; \
13038diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13039index 6bd2c6c..368c93e 100644
13040--- a/arch/x86/crypto/aesni-intel_asm.S
13041+++ b/arch/x86/crypto/aesni-intel_asm.S
13042@@ -31,6 +31,7 @@
13043
13044 #include <linux/linkage.h>
13045 #include <asm/inst.h>
13046+#include <asm/alternative-asm.h>
13047
13048 /*
13049 * The following macros are used to move an (un)aligned 16 byte value to/from
13050@@ -217,7 +218,7 @@ enc: .octa 0x2
13051 * num_initial_blocks = b mod 4
13052 * encrypt the initial num_initial_blocks blocks and apply ghash on
13053 * the ciphertext
13054-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13055+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13056 * are clobbered
13057 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13058 */
13059@@ -227,8 +228,8 @@ enc: .octa 0x2
13060 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13061 MOVADQ SHUF_MASK(%rip), %xmm14
13062 mov arg7, %r10 # %r10 = AAD
13063- mov arg8, %r12 # %r12 = aadLen
13064- mov %r12, %r11
13065+ mov arg8, %r15 # %r15 = aadLen
13066+ mov %r15, %r11
13067 pxor %xmm\i, %xmm\i
13068
13069 _get_AAD_loop\num_initial_blocks\operation:
13070@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13071 psrldq $4, %xmm\i
13072 pxor \TMP1, %xmm\i
13073 add $4, %r10
13074- sub $4, %r12
13075+ sub $4, %r15
13076 jne _get_AAD_loop\num_initial_blocks\operation
13077
13078 cmp $16, %r11
13079 je _get_AAD_loop2_done\num_initial_blocks\operation
13080
13081- mov $16, %r12
13082+ mov $16, %r15
13083 _get_AAD_loop2\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085- sub $4, %r12
13086- cmp %r11, %r12
13087+ sub $4, %r15
13088+ cmp %r11, %r15
13089 jne _get_AAD_loop2\num_initial_blocks\operation
13090
13091 _get_AAD_loop2_done\num_initial_blocks\operation:
13092@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13093 * num_initial_blocks = b mod 4
13094 * encrypt the initial num_initial_blocks blocks and apply ghash on
13095 * the ciphertext
13096-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13097+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13098 * are clobbered
13099 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13100 */
13101@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13102 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13103 MOVADQ SHUF_MASK(%rip), %xmm14
13104 mov arg7, %r10 # %r10 = AAD
13105- mov arg8, %r12 # %r12 = aadLen
13106- mov %r12, %r11
13107+ mov arg8, %r15 # %r15 = aadLen
13108+ mov %r15, %r11
13109 pxor %xmm\i, %xmm\i
13110 _get_AAD_loop\num_initial_blocks\operation:
13111 movd (%r10), \TMP1
13112@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13113 psrldq $4, %xmm\i
13114 pxor \TMP1, %xmm\i
13115 add $4, %r10
13116- sub $4, %r12
13117+ sub $4, %r15
13118 jne _get_AAD_loop\num_initial_blocks\operation
13119 cmp $16, %r11
13120 je _get_AAD_loop2_done\num_initial_blocks\operation
13121- mov $16, %r12
13122+ mov $16, %r15
13123 _get_AAD_loop2\num_initial_blocks\operation:
13124 psrldq $4, %xmm\i
13125- sub $4, %r12
13126- cmp %r11, %r12
13127+ sub $4, %r15
13128+ cmp %r11, %r15
13129 jne _get_AAD_loop2\num_initial_blocks\operation
13130 _get_AAD_loop2_done\num_initial_blocks\operation:
13131 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13132@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13133 *
13134 *****************************************************************************/
13135 ENTRY(aesni_gcm_dec)
13136- push %r12
13137+ push %r15
13138 push %r13
13139 push %r14
13140 mov %rsp, %r14
13141@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13142 */
13143 sub $VARIABLE_OFFSET, %rsp
13144 and $~63, %rsp # align rsp to 64 bytes
13145- mov %arg6, %r12
13146- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13147+ mov %arg6, %r15
13148+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13149 movdqa SHUF_MASK(%rip), %xmm2
13150 PSHUFB_XMM %xmm2, %xmm13
13151
13152@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13153 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13154 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13155 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13156- mov %r13, %r12
13157- and $(3<<4), %r12
13158+ mov %r13, %r15
13159+ and $(3<<4), %r15
13160 jz _initial_num_blocks_is_0_decrypt
13161- cmp $(2<<4), %r12
13162+ cmp $(2<<4), %r15
13163 jb _initial_num_blocks_is_1_decrypt
13164 je _initial_num_blocks_is_2_decrypt
13165 _initial_num_blocks_is_3_decrypt:
13166@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13167 sub $16, %r11
13168 add %r13, %r11
13169 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13170- lea SHIFT_MASK+16(%rip), %r12
13171- sub %r13, %r12
13172+ lea SHIFT_MASK+16(%rip), %r15
13173+ sub %r13, %r15
13174 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13175 # (%r13 is the number of bytes in plaintext mod 16)
13176- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13177+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13178 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13179
13180 movdqa %xmm1, %xmm2
13181 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13182- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13183+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13184 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13185 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13186 pand %xmm1, %xmm2
13187@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13188 sub $1, %r13
13189 jne _less_than_8_bytes_left_decrypt
13190 _multiple_of_16_bytes_decrypt:
13191- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13192- shl $3, %r12 # convert into number of bits
13193- movd %r12d, %xmm15 # len(A) in %xmm15
13194+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13195+ shl $3, %r15 # convert into number of bits
13196+ movd %r15d, %xmm15 # len(A) in %xmm15
13197 shl $3, %arg4 # len(C) in bits (*128)
13198 MOVQ_R64_XMM %arg4, %xmm1
13199 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13200@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13201 mov %r14, %rsp
13202 pop %r14
13203 pop %r13
13204- pop %r12
13205+ pop %r15
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(aesni_gcm_dec)
13209
13210@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13211 * poly = x^128 + x^127 + x^126 + x^121 + 1
13212 ***************************************************************************/
13213 ENTRY(aesni_gcm_enc)
13214- push %r12
13215+ push %r15
13216 push %r13
13217 push %r14
13218 mov %rsp, %r14
13219@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13220 #
13221 sub $VARIABLE_OFFSET, %rsp
13222 and $~63, %rsp
13223- mov %arg6, %r12
13224- movdqu (%r12), %xmm13
13225+ mov %arg6, %r15
13226+ movdqu (%r15), %xmm13
13227 movdqa SHUF_MASK(%rip), %xmm2
13228 PSHUFB_XMM %xmm2, %xmm13
13229
13230@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13231 movdqa %xmm13, HashKey(%rsp)
13232 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13233 and $-16, %r13
13234- mov %r13, %r12
13235+ mov %r13, %r15
13236
13237 # Encrypt first few blocks
13238
13239- and $(3<<4), %r12
13240+ and $(3<<4), %r15
13241 jz _initial_num_blocks_is_0_encrypt
13242- cmp $(2<<4), %r12
13243+ cmp $(2<<4), %r15
13244 jb _initial_num_blocks_is_1_encrypt
13245 je _initial_num_blocks_is_2_encrypt
13246 _initial_num_blocks_is_3_encrypt:
13247@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13248 sub $16, %r11
13249 add %r13, %r11
13250 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13251- lea SHIFT_MASK+16(%rip), %r12
13252- sub %r13, %r12
13253+ lea SHIFT_MASK+16(%rip), %r15
13254+ sub %r13, %r15
13255 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13256 # (%r13 is the number of bytes in plaintext mod 16)
13257- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13258+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13259 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13260 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13261- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13262+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13263 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13264 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13265 movdqa SHUF_MASK(%rip), %xmm10
13266@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13267 sub $1, %r13
13268 jne _less_than_8_bytes_left_encrypt
13269 _multiple_of_16_bytes_encrypt:
13270- mov arg8, %r12 # %r12 = addLen (number of bytes)
13271- shl $3, %r12
13272- movd %r12d, %xmm15 # len(A) in %xmm15
13273+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13274+ shl $3, %r15
13275+ movd %r15d, %xmm15 # len(A) in %xmm15
13276 shl $3, %arg4 # len(C) in bits (*128)
13277 MOVQ_R64_XMM %arg4, %xmm1
13278 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13279@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13280 mov %r14, %rsp
13281 pop %r14
13282 pop %r13
13283- pop %r12
13284+ pop %r15
13285+ pax_force_retaddr
13286 ret
13287 ENDPROC(aesni_gcm_enc)
13288
13289@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13290 pxor %xmm1, %xmm0
13291 movaps %xmm0, (TKEYP)
13292 add $0x10, TKEYP
13293+ pax_force_retaddr
13294 ret
13295 ENDPROC(_key_expansion_128)
13296 ENDPROC(_key_expansion_256a)
13297@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13298 shufps $0b01001110, %xmm2, %xmm1
13299 movaps %xmm1, 0x10(TKEYP)
13300 add $0x20, TKEYP
13301+ pax_force_retaddr
13302 ret
13303 ENDPROC(_key_expansion_192a)
13304
13305@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13306
13307 movaps %xmm0, (TKEYP)
13308 add $0x10, TKEYP
13309+ pax_force_retaddr
13310 ret
13311 ENDPROC(_key_expansion_192b)
13312
13313@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13314 pxor %xmm1, %xmm2
13315 movaps %xmm2, (TKEYP)
13316 add $0x10, TKEYP
13317+ pax_force_retaddr
13318 ret
13319 ENDPROC(_key_expansion_256b)
13320
13321@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13322 #ifndef __x86_64__
13323 popl KEYP
13324 #endif
13325+ pax_force_retaddr
13326 ret
13327 ENDPROC(aesni_set_key)
13328
13329@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13330 popl KLEN
13331 popl KEYP
13332 #endif
13333+ pax_force_retaddr
13334 ret
13335 ENDPROC(aesni_enc)
13336
13337@@ -1985,6 +1994,7 @@ _aesni_enc1:
13338 AESENC KEY STATE
13339 movaps 0x70(TKEYP), KEY
13340 AESENCLAST KEY STATE
13341+ pax_force_retaddr
13342 ret
13343 ENDPROC(_aesni_enc1)
13344
13345@@ -2094,6 +2104,7 @@ _aesni_enc4:
13346 AESENCLAST KEY STATE2
13347 AESENCLAST KEY STATE3
13348 AESENCLAST KEY STATE4
13349+ pax_force_retaddr
13350 ret
13351 ENDPROC(_aesni_enc4)
13352
13353@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13354 popl KLEN
13355 popl KEYP
13356 #endif
13357+ pax_force_retaddr
13358 ret
13359 ENDPROC(aesni_dec)
13360
13361@@ -2175,6 +2187,7 @@ _aesni_dec1:
13362 AESDEC KEY STATE
13363 movaps 0x70(TKEYP), KEY
13364 AESDECLAST KEY STATE
13365+ pax_force_retaddr
13366 ret
13367 ENDPROC(_aesni_dec1)
13368
13369@@ -2284,6 +2297,7 @@ _aesni_dec4:
13370 AESDECLAST KEY STATE2
13371 AESDECLAST KEY STATE3
13372 AESDECLAST KEY STATE4
13373+ pax_force_retaddr
13374 ret
13375 ENDPROC(_aesni_dec4)
13376
13377@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13378 popl KEYP
13379 popl LEN
13380 #endif
13381+ pax_force_retaddr
13382 ret
13383 ENDPROC(aesni_ecb_enc)
13384
13385@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13386 popl KEYP
13387 popl LEN
13388 #endif
13389+ pax_force_retaddr
13390 ret
13391 ENDPROC(aesni_ecb_dec)
13392
13393@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13394 popl LEN
13395 popl IVP
13396 #endif
13397+ pax_force_retaddr
13398 ret
13399 ENDPROC(aesni_cbc_enc)
13400
13401@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13402 popl LEN
13403 popl IVP
13404 #endif
13405+ pax_force_retaddr
13406 ret
13407 ENDPROC(aesni_cbc_dec)
13408
13409@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13410 mov $1, TCTR_LOW
13411 MOVQ_R64_XMM TCTR_LOW INC
13412 MOVQ_R64_XMM CTR TCTR_LOW
13413+ pax_force_retaddr
13414 ret
13415 ENDPROC(_aesni_inc_init)
13416
13417@@ -2590,6 +2609,7 @@ _aesni_inc:
13418 .Linc_low:
13419 movaps CTR, IV
13420 PSHUFB_XMM BSWAP_MASK IV
13421+ pax_force_retaddr
13422 ret
13423 ENDPROC(_aesni_inc)
13424
13425@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13426 .Lctr_enc_ret:
13427 movups IV, (IVP)
13428 .Lctr_enc_just_ret:
13429+ pax_force_retaddr
13430 ret
13431 ENDPROC(aesni_ctr_enc)
13432
13433@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13434 pxor INC, STATE4
13435 movdqu STATE4, 0x70(OUTP)
13436
13437+ pax_force_retaddr
13438 ret
13439 ENDPROC(aesni_xts_crypt8)
13440
13441diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13442index 246c670..466e2d6 100644
13443--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13445@@ -21,6 +21,7 @@
13446 */
13447
13448 #include <linux/linkage.h>
13449+#include <asm/alternative-asm.h>
13450
13451 .file "blowfish-x86_64-asm.S"
13452 .text
13453@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13454 jnz .L__enc_xor;
13455
13456 write_block();
13457+ pax_force_retaddr
13458 ret;
13459 .L__enc_xor:
13460 xor_block();
13461+ pax_force_retaddr
13462 ret;
13463 ENDPROC(__blowfish_enc_blk)
13464
13465@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13466
13467 movq %r11, %rbp;
13468
13469+ pax_force_retaddr
13470 ret;
13471 ENDPROC(blowfish_dec_blk)
13472
13473@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13474
13475 popq %rbx;
13476 popq %rbp;
13477+ pax_force_retaddr
13478 ret;
13479
13480 .L__enc_xor4:
13481@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13482
13483 popq %rbx;
13484 popq %rbp;
13485+ pax_force_retaddr
13486 ret;
13487 ENDPROC(__blowfish_enc_blk_4way)
13488
13489@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13490 popq %rbx;
13491 popq %rbp;
13492
13493+ pax_force_retaddr
13494 ret;
13495 ENDPROC(blowfish_dec_blk_4way)
13496diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13497index ce71f92..1dce7ec 100644
13498--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13500@@ -16,6 +16,7 @@
13501 */
13502
13503 #include <linux/linkage.h>
13504+#include <asm/alternative-asm.h>
13505
13506 #define CAMELLIA_TABLE_BYTE_LEN 272
13507
13508@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13509 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13510 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13511 %rcx, (%r9));
13512+ pax_force_retaddr
13513 ret;
13514 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13515
13516@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13517 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13518 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13519 %rax, (%r9));
13520+ pax_force_retaddr
13521 ret;
13522 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13523
13524@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13526 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13527
13528+ pax_force_retaddr
13529 ret;
13530
13531 .align 8
13532@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13533 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13534 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13535
13536+ pax_force_retaddr
13537 ret;
13538
13539 .align 8
13540@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13541 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13542 %xmm8, %rsi);
13543
13544+ pax_force_retaddr
13545 ret;
13546 ENDPROC(camellia_ecb_enc_16way)
13547
13548@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13549 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13550 %xmm8, %rsi);
13551
13552+ pax_force_retaddr
13553 ret;
13554 ENDPROC(camellia_ecb_dec_16way)
13555
13556@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13557 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13558 %xmm8, %rsi);
13559
13560+ pax_force_retaddr
13561 ret;
13562 ENDPROC(camellia_cbc_dec_16way)
13563
13564@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13565 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13566 %xmm8, %rsi);
13567
13568+ pax_force_retaddr
13569 ret;
13570 ENDPROC(camellia_ctr_16way)
13571
13572@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13573 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13574 %xmm8, %rsi);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(camellia_xts_crypt_16way)
13579
13580diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13581index 0e0b886..5a3123c 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13584@@ -11,6 +11,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13594 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13602 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13610 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13617 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13618 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13625
13626 vzeroupper;
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_32way)
13631
13632@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13633
13634 vzeroupper;
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_32way)
13639
13640@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13641
13642 vzeroupper;
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_32way)
13647
13648@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13649
13650 vzeroupper;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_32way)
13655
13656@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13657
13658 vzeroupper;
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_32way)
13663
13664diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13665index 310319c..db3d7b5 100644
13666--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13667+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13668@@ -21,6 +21,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 .file "camellia-x86_64-asm_64.S"
13675 .text
13676@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13677 enc_outunpack(mov, RT1);
13678
13679 movq RRBP, %rbp;
13680+ pax_force_retaddr
13681 ret;
13682
13683 .L__enc_xor:
13684 enc_outunpack(xor, RT1);
13685
13686 movq RRBP, %rbp;
13687+ pax_force_retaddr
13688 ret;
13689 ENDPROC(__camellia_enc_blk)
13690
13691@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13692 dec_outunpack();
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697 ENDPROC(camellia_dec_blk)
13698
13699@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13700
13701 movq RRBP, %rbp;
13702 popq %rbx;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc2_xor:
13707@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13708
13709 movq RRBP, %rbp;
13710 popq %rbx;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__camellia_enc_blk_2way)
13714
13715@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13716
13717 movq RRBP, %rbp;
13718 movq RXOR, %rbx;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(camellia_dec_blk_2way)
13722diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13723index c35fd5d..2d8c7db 100644
13724--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13726@@ -24,6 +24,7 @@
13727 */
13728
13729 #include <linux/linkage.h>
13730+#include <asm/alternative-asm.h>
13731
13732 .file "cast5-avx-x86_64-asm_64.S"
13733
13734@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13735 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13736 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13737
13738+ pax_force_retaddr
13739 ret;
13740 ENDPROC(__cast5_enc_blk16)
13741
13742@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13743 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13744 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13745
13746+ pax_force_retaddr
13747 ret;
13748
13749 .L__skip_dec:
13750@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13751 vmovdqu RR4, (6*4*4)(%r11);
13752 vmovdqu RL4, (7*4*4)(%r11);
13753
13754+ pax_force_retaddr
13755 ret;
13756 ENDPROC(cast5_ecb_enc_16way)
13757
13758@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13759 vmovdqu RR4, (6*4*4)(%r11);
13760 vmovdqu RL4, (7*4*4)(%r11);
13761
13762+ pax_force_retaddr
13763 ret;
13764 ENDPROC(cast5_ecb_dec_16way)
13765
13766@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13767 * %rdx: src
13768 */
13769
13770- pushq %r12;
13771+ pushq %r14;
13772
13773 movq %rsi, %r11;
13774- movq %rdx, %r12;
13775+ movq %rdx, %r14;
13776
13777 vmovdqu (0*16)(%rdx), RL1;
13778 vmovdqu (1*16)(%rdx), RR1;
13779@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13780 call __cast5_dec_blk16;
13781
13782 /* xor with src */
13783- vmovq (%r12), RX;
13784+ vmovq (%r14), RX;
13785 vpshufd $0x4f, RX, RX;
13786 vpxor RX, RR1, RR1;
13787- vpxor 0*16+8(%r12), RL1, RL1;
13788- vpxor 1*16+8(%r12), RR2, RR2;
13789- vpxor 2*16+8(%r12), RL2, RL2;
13790- vpxor 3*16+8(%r12), RR3, RR3;
13791- vpxor 4*16+8(%r12), RL3, RL3;
13792- vpxor 5*16+8(%r12), RR4, RR4;
13793- vpxor 6*16+8(%r12), RL4, RL4;
13794+ vpxor 0*16+8(%r14), RL1, RL1;
13795+ vpxor 1*16+8(%r14), RR2, RR2;
13796+ vpxor 2*16+8(%r14), RL2, RL2;
13797+ vpxor 3*16+8(%r14), RR3, RR3;
13798+ vpxor 4*16+8(%r14), RL3, RL3;
13799+ vpxor 5*16+8(%r14), RR4, RR4;
13800+ vpxor 6*16+8(%r14), RL4, RL4;
13801
13802 vmovdqu RR1, (0*16)(%r11);
13803 vmovdqu RL1, (1*16)(%r11);
13804@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13805 vmovdqu RR4, (6*16)(%r11);
13806 vmovdqu RL4, (7*16)(%r11);
13807
13808- popq %r12;
13809+ popq %r14;
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(cast5_cbc_dec_16way)
13814
13815@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13816 * %rcx: iv (big endian, 64bit)
13817 */
13818
13819- pushq %r12;
13820+ pushq %r14;
13821
13822 movq %rsi, %r11;
13823- movq %rdx, %r12;
13824+ movq %rdx, %r14;
13825
13826 vpcmpeqd RTMP, RTMP, RTMP;
13827 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13828@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13829 call __cast5_enc_blk16;
13830
13831 /* dst = src ^ iv */
13832- vpxor (0*16)(%r12), RR1, RR1;
13833- vpxor (1*16)(%r12), RL1, RL1;
13834- vpxor (2*16)(%r12), RR2, RR2;
13835- vpxor (3*16)(%r12), RL2, RL2;
13836- vpxor (4*16)(%r12), RR3, RR3;
13837- vpxor (5*16)(%r12), RL3, RL3;
13838- vpxor (6*16)(%r12), RR4, RR4;
13839- vpxor (7*16)(%r12), RL4, RL4;
13840+ vpxor (0*16)(%r14), RR1, RR1;
13841+ vpxor (1*16)(%r14), RL1, RL1;
13842+ vpxor (2*16)(%r14), RR2, RR2;
13843+ vpxor (3*16)(%r14), RL2, RL2;
13844+ vpxor (4*16)(%r14), RR3, RR3;
13845+ vpxor (5*16)(%r14), RL3, RL3;
13846+ vpxor (6*16)(%r14), RR4, RR4;
13847+ vpxor (7*16)(%r14), RL4, RL4;
13848 vmovdqu RR1, (0*16)(%r11);
13849 vmovdqu RL1, (1*16)(%r11);
13850 vmovdqu RR2, (2*16)(%r11);
13851@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13852 vmovdqu RR4, (6*16)(%r11);
13853 vmovdqu RL4, (7*16)(%r11);
13854
13855- popq %r12;
13856+ popq %r14;
13857
13858+ pax_force_retaddr
13859 ret;
13860 ENDPROC(cast5_ctr_16way)
13861diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13862index e3531f8..e123f35 100644
13863--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13865@@ -24,6 +24,7 @@
13866 */
13867
13868 #include <linux/linkage.h>
13869+#include <asm/alternative-asm.h>
13870 #include "glue_helper-asm-avx.S"
13871
13872 .file "cast6-avx-x86_64-asm_64.S"
13873@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13874 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13875 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13876
13877+ pax_force_retaddr
13878 ret;
13879 ENDPROC(__cast6_enc_blk8)
13880
13881@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13882 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13883 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13884
13885+ pax_force_retaddr
13886 ret;
13887 ENDPROC(__cast6_dec_blk8)
13888
13889@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13890
13891 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13892
13893+ pax_force_retaddr
13894 ret;
13895 ENDPROC(cast6_ecb_enc_8way)
13896
13897@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13898
13899 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13900
13901+ pax_force_retaddr
13902 ret;
13903 ENDPROC(cast6_ecb_dec_8way)
13904
13905@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13906 * %rdx: src
13907 */
13908
13909- pushq %r12;
13910+ pushq %r14;
13911
13912 movq %rsi, %r11;
13913- movq %rdx, %r12;
13914+ movq %rdx, %r14;
13915
13916 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13917
13918 call __cast6_dec_blk8;
13919
13920- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13921+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13922
13923- popq %r12;
13924+ popq %r14;
13925
13926+ pax_force_retaddr
13927 ret;
13928 ENDPROC(cast6_cbc_dec_8way)
13929
13930@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13931 * %rcx: iv (little endian, 128bit)
13932 */
13933
13934- pushq %r12;
13935+ pushq %r14;
13936
13937 movq %rsi, %r11;
13938- movq %rdx, %r12;
13939+ movq %rdx, %r14;
13940
13941 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13942 RD2, RX, RKR, RKM);
13943
13944 call __cast6_enc_blk8;
13945
13946- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13947+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13948
13949- popq %r12;
13950+ popq %r14;
13951
13952+ pax_force_retaddr
13953 ret;
13954 ENDPROC(cast6_ctr_8way)
13955
13956@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13957 /* dst <= regs xor IVs(in dst) */
13958 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959
13960+ pax_force_retaddr
13961 ret;
13962 ENDPROC(cast6_xts_enc_8way)
13963
13964@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13965 /* dst <= regs xor IVs(in dst) */
13966 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13967
13968+ pax_force_retaddr
13969 ret;
13970 ENDPROC(cast6_xts_dec_8way)
13971diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13972index 26d49eb..c0a8c84 100644
13973--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13975@@ -45,6 +45,7 @@
13976
13977 #include <asm/inst.h>
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13982
13983@@ -309,6 +310,7 @@ do_return:
13984 popq %rsi
13985 popq %rdi
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989
13990 ################################################################
13991diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13992index 5d1e007..098cb4f 100644
13993--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13994+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13995@@ -18,6 +18,7 @@
13996
13997 #include <linux/linkage.h>
13998 #include <asm/inst.h>
13999+#include <asm/alternative-asm.h>
14000
14001 .data
14002
14003@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14004 psrlq $1, T2
14005 pxor T2, T1
14006 pxor T1, DATA
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(__clmul_gf128mul_ble)
14010
14011@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14012 call __clmul_gf128mul_ble
14013 PSHUFB_XMM BSWAP DATA
14014 movups DATA, (%rdi)
14015+ pax_force_retaddr
14016 ret
14017 ENDPROC(clmul_ghash_mul)
14018
14019@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14020 PSHUFB_XMM BSWAP DATA
14021 movups DATA, (%rdi)
14022 .Lupdate_just_ret:
14023+ pax_force_retaddr
14024 ret
14025 ENDPROC(clmul_ghash_update)
14026diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14027index 9279e0b..c4b3d2c 100644
14028--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14029+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14030@@ -1,4 +1,5 @@
14031 #include <linux/linkage.h>
14032+#include <asm/alternative-asm.h>
14033
14034 # enter salsa20_encrypt_bytes
14035 ENTRY(salsa20_encrypt_bytes)
14036@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14037 add %r11,%rsp
14038 mov %rdi,%rax
14039 mov %rsi,%rdx
14040+ pax_force_retaddr
14041 ret
14042 # bytesatleast65:
14043 ._bytesatleast65:
14044@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14045 add %r11,%rsp
14046 mov %rdi,%rax
14047 mov %rsi,%rdx
14048+ pax_force_retaddr
14049 ret
14050 ENDPROC(salsa20_keysetup)
14051
14052@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14053 add %r11,%rsp
14054 mov %rdi,%rax
14055 mov %rsi,%rdx
14056+ pax_force_retaddr
14057 ret
14058 ENDPROC(salsa20_ivsetup)
14059diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14060index 2f202f4..d9164d6 100644
14061--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14062+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14063@@ -24,6 +24,7 @@
14064 */
14065
14066 #include <linux/linkage.h>
14067+#include <asm/alternative-asm.h>
14068 #include "glue_helper-asm-avx.S"
14069
14070 .file "serpent-avx-x86_64-asm_64.S"
14071@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14072 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14073 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14074
14075+ pax_force_retaddr
14076 ret;
14077 ENDPROC(__serpent_enc_blk8_avx)
14078
14079@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14080 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14081 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14082
14083+ pax_force_retaddr
14084 ret;
14085 ENDPROC(__serpent_dec_blk8_avx)
14086
14087@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14088
14089 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14090
14091+ pax_force_retaddr
14092 ret;
14093 ENDPROC(serpent_ecb_enc_8way_avx)
14094
14095@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14096
14097 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14098
14099+ pax_force_retaddr
14100 ret;
14101 ENDPROC(serpent_ecb_dec_8way_avx)
14102
14103@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14104
14105 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(serpent_cbc_dec_8way_avx)
14110
14111@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14112
14113 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(serpent_ctr_8way_avx)
14118
14119@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14120 /* dst <= regs xor IVs(in dst) */
14121 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(serpent_xts_enc_8way_avx)
14126
14127@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14128 /* dst <= regs xor IVs(in dst) */
14129 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(serpent_xts_dec_8way_avx)
14134diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14135index b222085..abd483c 100644
14136--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14137+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14138@@ -15,6 +15,7 @@
14139 */
14140
14141 #include <linux/linkage.h>
14142+#include <asm/alternative-asm.h>
14143 #include "glue_helper-asm-avx2.S"
14144
14145 .file "serpent-avx2-asm_64.S"
14146@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14147 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14148 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14149
14150+ pax_force_retaddr
14151 ret;
14152 ENDPROC(__serpent_enc_blk16)
14153
14154@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14155 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14156 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14157
14158+ pax_force_retaddr
14159 ret;
14160 ENDPROC(__serpent_dec_blk16)
14161
14162@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14163
14164 vzeroupper;
14165
14166+ pax_force_retaddr
14167 ret;
14168 ENDPROC(serpent_ecb_enc_16way)
14169
14170@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14171
14172 vzeroupper;
14173
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(serpent_ecb_dec_16way)
14177
14178@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14179
14180 vzeroupper;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(serpent_cbc_dec_16way)
14185
14186@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14187
14188 vzeroupper;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(serpent_ctr_16way)
14193
14194@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14195
14196 vzeroupper;
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(serpent_xts_enc_16way)
14201
14202@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14203
14204 vzeroupper;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(serpent_xts_dec_16way)
14209diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14210index acc066c..1559cc4 100644
14211--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14212+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14213@@ -25,6 +25,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 .file "serpent-sse2-x86_64-asm_64.S"
14220 .text
14221@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14222 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14223 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14224
14225+ pax_force_retaddr
14226 ret;
14227
14228 .L__enc_xor8:
14229 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14230 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14231
14232+ pax_force_retaddr
14233 ret;
14234 ENDPROC(__serpent_enc_blk_8way)
14235
14236@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14237 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14238 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242 ENDPROC(serpent_dec_blk_8way)
14243diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14244index a410950..9dfe7ad 100644
14245--- a/arch/x86/crypto/sha1_ssse3_asm.S
14246+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14247@@ -29,6 +29,7 @@
14248 */
14249
14250 #include <linux/linkage.h>
14251+#include <asm/alternative-asm.h>
14252
14253 #define CTX %rdi // arg1
14254 #define BUF %rsi // arg2
14255@@ -75,9 +76,9 @@
14256
14257 push %rbx
14258 push %rbp
14259- push %r12
14260+ push %r14
14261
14262- mov %rsp, %r12
14263+ mov %rsp, %r14
14264 sub $64, %rsp # allocate workspace
14265 and $~15, %rsp # align stack
14266
14267@@ -99,11 +100,12 @@
14268 xor %rax, %rax
14269 rep stosq
14270
14271- mov %r12, %rsp # deallocate workspace
14272+ mov %r14, %rsp # deallocate workspace
14273
14274- pop %r12
14275+ pop %r14
14276 pop %rbp
14277 pop %rbx
14278+ pax_force_retaddr
14279 ret
14280
14281 ENDPROC(\name)
14282diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14283index 642f156..51a513c 100644
14284--- a/arch/x86/crypto/sha256-avx-asm.S
14285+++ b/arch/x86/crypto/sha256-avx-asm.S
14286@@ -49,6 +49,7 @@
14287
14288 #ifdef CONFIG_AS_AVX
14289 #include <linux/linkage.h>
14290+#include <asm/alternative-asm.h>
14291
14292 ## assume buffers not aligned
14293 #define VMOVDQ vmovdqu
14294@@ -460,6 +461,7 @@ done_hash:
14295 popq %r13
14296 popq %rbp
14297 popq %rbx
14298+ pax_force_retaddr
14299 ret
14300 ENDPROC(sha256_transform_avx)
14301
14302diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14303index 9e86944..3795e6a 100644
14304--- a/arch/x86/crypto/sha256-avx2-asm.S
14305+++ b/arch/x86/crypto/sha256-avx2-asm.S
14306@@ -50,6 +50,7 @@
14307
14308 #ifdef CONFIG_AS_AVX2
14309 #include <linux/linkage.h>
14310+#include <asm/alternative-asm.h>
14311
14312 ## assume buffers not aligned
14313 #define VMOVDQ vmovdqu
14314@@ -720,6 +721,7 @@ done_hash:
14315 popq %r12
14316 popq %rbp
14317 popq %rbx
14318+ pax_force_retaddr
14319 ret
14320 ENDPROC(sha256_transform_rorx)
14321
14322diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14323index f833b74..8c62a9e 100644
14324--- a/arch/x86/crypto/sha256-ssse3-asm.S
14325+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14326@@ -47,6 +47,7 @@
14327 ########################################################################
14328
14329 #include <linux/linkage.h>
14330+#include <asm/alternative-asm.h>
14331
14332 ## assume buffers not aligned
14333 #define MOVDQ movdqu
14334@@ -471,6 +472,7 @@ done_hash:
14335 popq %rbp
14336 popq %rbx
14337
14338+ pax_force_retaddr
14339 ret
14340 ENDPROC(sha256_transform_ssse3)
14341
14342diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14343index 974dde9..a823ff9 100644
14344--- a/arch/x86/crypto/sha512-avx-asm.S
14345+++ b/arch/x86/crypto/sha512-avx-asm.S
14346@@ -49,6 +49,7 @@
14347
14348 #ifdef CONFIG_AS_AVX
14349 #include <linux/linkage.h>
14350+#include <asm/alternative-asm.h>
14351
14352 .text
14353
14354@@ -364,6 +365,7 @@ updateblock:
14355 mov frame_RSPSAVE(%rsp), %rsp
14356
14357 nowork:
14358+ pax_force_retaddr
14359 ret
14360 ENDPROC(sha512_transform_avx)
14361
14362diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14363index 568b961..ed20c37 100644
14364--- a/arch/x86/crypto/sha512-avx2-asm.S
14365+++ b/arch/x86/crypto/sha512-avx2-asm.S
14366@@ -51,6 +51,7 @@
14367
14368 #ifdef CONFIG_AS_AVX2
14369 #include <linux/linkage.h>
14370+#include <asm/alternative-asm.h>
14371
14372 .text
14373
14374@@ -678,6 +679,7 @@ done_hash:
14375
14376 # Restore Stack Pointer
14377 mov frame_RSPSAVE(%rsp), %rsp
14378+ pax_force_retaddr
14379 ret
14380 ENDPROC(sha512_transform_rorx)
14381
14382diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14383index fb56855..6edd768 100644
14384--- a/arch/x86/crypto/sha512-ssse3-asm.S
14385+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14386@@ -48,6 +48,7 @@
14387 ########################################################################
14388
14389 #include <linux/linkage.h>
14390+#include <asm/alternative-asm.h>
14391
14392 .text
14393
14394@@ -363,6 +364,7 @@ updateblock:
14395 mov frame_RSPSAVE(%rsp), %rsp
14396
14397 nowork:
14398+ pax_force_retaddr
14399 ret
14400 ENDPROC(sha512_transform_ssse3)
14401
14402diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14403index 0505813..b067311 100644
14404--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14406@@ -24,6 +24,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411 #include "glue_helper-asm-avx.S"
14412
14413 .file "twofish-avx-x86_64-asm_64.S"
14414@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14415 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14416 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420 ENDPROC(__twofish_enc_blk8)
14421
14422@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14423 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14424 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14425
14426+ pax_force_retaddr
14427 ret;
14428 ENDPROC(__twofish_dec_blk8)
14429
14430@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14431
14432 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14433
14434+ pax_force_retaddr
14435 ret;
14436 ENDPROC(twofish_ecb_enc_8way)
14437
14438@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14439
14440 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14441
14442+ pax_force_retaddr
14443 ret;
14444 ENDPROC(twofish_ecb_dec_8way)
14445
14446@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14447 * %rdx: src
14448 */
14449
14450- pushq %r12;
14451+ pushq %r14;
14452
14453 movq %rsi, %r11;
14454- movq %rdx, %r12;
14455+ movq %rdx, %r14;
14456
14457 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14458
14459 call __twofish_dec_blk8;
14460
14461- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14462+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464- popq %r12;
14465+ popq %r14;
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(twofish_cbc_dec_8way)
14470
14471@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14472 * %rcx: iv (little endian, 128bit)
14473 */
14474
14475- pushq %r12;
14476+ pushq %r14;
14477
14478 movq %rsi, %r11;
14479- movq %rdx, %r12;
14480+ movq %rdx, %r14;
14481
14482 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14483 RD2, RX0, RX1, RY0);
14484
14485 call __twofish_enc_blk8;
14486
14487- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14488+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14489
14490- popq %r12;
14491+ popq %r14;
14492
14493+ pax_force_retaddr
14494 ret;
14495 ENDPROC(twofish_ctr_8way)
14496
14497@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14498 /* dst <= regs xor IVs(in dst) */
14499 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501+ pax_force_retaddr
14502 ret;
14503 ENDPROC(twofish_xts_enc_8way)
14504
14505@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14506 /* dst <= regs xor IVs(in dst) */
14507 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14508
14509+ pax_force_retaddr
14510 ret;
14511 ENDPROC(twofish_xts_dec_8way)
14512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14513index 1c3b7ce..02f578d 100644
14514--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14515+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14516@@ -21,6 +21,7 @@
14517 */
14518
14519 #include <linux/linkage.h>
14520+#include <asm/alternative-asm.h>
14521
14522 .file "twofish-x86_64-asm-3way.S"
14523 .text
14524@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14525 popq %r13;
14526 popq %r14;
14527 popq %r15;
14528+ pax_force_retaddr
14529 ret;
14530
14531 .L__enc_xor3:
14532@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14533 popq %r13;
14534 popq %r14;
14535 popq %r15;
14536+ pax_force_retaddr
14537 ret;
14538 ENDPROC(__twofish_enc_blk_3way)
14539
14540@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14541 popq %r13;
14542 popq %r14;
14543 popq %r15;
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(twofish_dec_blk_3way)
14547diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14548index a039d21..524b8b2 100644
14549--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14551@@ -22,6 +22,7 @@
14552
14553 #include <linux/linkage.h>
14554 #include <asm/asm-offsets.h>
14555+#include <asm/alternative-asm.h>
14556
14557 #define a_offset 0
14558 #define b_offset 4
14559@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14560
14561 popq R1
14562 movq $1,%rax
14563+ pax_force_retaddr
14564 ret
14565 ENDPROC(twofish_enc_blk)
14566
14567@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14568
14569 popq R1
14570 movq $1,%rax
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(twofish_dec_blk)
14574diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14575index ae6aad1..719d6d9 100644
14576--- a/arch/x86/ia32/ia32_aout.c
14577+++ b/arch/x86/ia32/ia32_aout.c
14578@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14579 unsigned long dump_start, dump_size;
14580 struct user32 dump;
14581
14582+ memset(&dump, 0, sizeof(dump));
14583+
14584 fs = get_fs();
14585 set_fs(KERNEL_DS);
14586 has_dumped = 1;
14587diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14588index d0165c9..0d5639b 100644
14589--- a/arch/x86/ia32/ia32_signal.c
14590+++ b/arch/x86/ia32/ia32_signal.c
14591@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14592 if (__get_user(set.sig[0], &frame->sc.oldmask)
14593 || (_COMPAT_NSIG_WORDS > 1
14594 && __copy_from_user((((char *) &set.sig) + 4),
14595- &frame->extramask,
14596+ frame->extramask,
14597 sizeof(frame->extramask))))
14598 goto badframe;
14599
14600@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14601 sp -= frame_size;
14602 /* Align the stack pointer according to the i386 ABI,
14603 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14604- sp = ((sp + 4) & -16ul) - 4;
14605+ sp = ((sp - 12) & -16ul) - 4;
14606 return (void __user *) sp;
14607 }
14608
14609@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14610 } else {
14611 /* Return stub is in 32bit vsyscall page */
14612 if (current->mm->context.vdso)
14613- restorer = current->mm->context.vdso +
14614- selected_vdso32->sym___kernel_sigreturn;
14615+ restorer = (void __force_user *)(current->mm->context.vdso +
14616+ selected_vdso32->sym___kernel_sigreturn);
14617 else
14618- restorer = &frame->retcode;
14619+ restorer = frame->retcode;
14620 }
14621
14622 put_user_try {
14623@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14624 * These are actually not used anymore, but left because some
14625 * gdb versions depend on them as a marker.
14626 */
14627- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14628+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14629 } put_user_catch(err);
14630
14631 if (err)
14632@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14633 0xb8,
14634 __NR_ia32_rt_sigreturn,
14635 0x80cd,
14636- 0,
14637+ 0
14638 };
14639
14640 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14641@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14642
14643 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14644 restorer = ksig->ka.sa.sa_restorer;
14645+ else if (current->mm->context.vdso)
14646+ /* Return stub is in 32bit vsyscall page */
14647+ restorer = (void __force_user *)(current->mm->context.vdso +
14648+ selected_vdso32->sym___kernel_rt_sigreturn);
14649 else
14650- restorer = current->mm->context.vdso +
14651- selected_vdso32->sym___kernel_rt_sigreturn;
14652+ restorer = frame->retcode;
14653 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14654
14655 /*
14656 * Not actually used anymore, but left because some gdb
14657 * versions need it.
14658 */
14659- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14660+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14661 } put_user_catch(err);
14662
14663 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14664diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14665index 156ebca..9591cf0 100644
14666--- a/arch/x86/ia32/ia32entry.S
14667+++ b/arch/x86/ia32/ia32entry.S
14668@@ -15,8 +15,10 @@
14669 #include <asm/irqflags.h>
14670 #include <asm/asm.h>
14671 #include <asm/smap.h>
14672+#include <asm/pgtable.h>
14673 #include <linux/linkage.h>
14674 #include <linux/err.h>
14675+#include <asm/alternative-asm.h>
14676
14677 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14678 #include <linux/elf-em.h>
14679@@ -62,12 +64,12 @@
14680 */
14681 .macro LOAD_ARGS32 offset, _r9=0
14682 .if \_r9
14683- movl \offset+16(%rsp),%r9d
14684+ movl \offset+R9(%rsp),%r9d
14685 .endif
14686- movl \offset+40(%rsp),%ecx
14687- movl \offset+48(%rsp),%edx
14688- movl \offset+56(%rsp),%esi
14689- movl \offset+64(%rsp),%edi
14690+ movl \offset+RCX(%rsp),%ecx
14691+ movl \offset+RDX(%rsp),%edx
14692+ movl \offset+RSI(%rsp),%esi
14693+ movl \offset+RDI(%rsp),%edi
14694 movl %eax,%eax /* zero extension */
14695 .endm
14696
14697@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14698 ENDPROC(native_irq_enable_sysexit)
14699 #endif
14700
14701+ .macro pax_enter_kernel_user
14702+ pax_set_fptr_mask
14703+#ifdef CONFIG_PAX_MEMORY_UDEREF
14704+ call pax_enter_kernel_user
14705+#endif
14706+ .endm
14707+
14708+ .macro pax_exit_kernel_user
14709+#ifdef CONFIG_PAX_MEMORY_UDEREF
14710+ call pax_exit_kernel_user
14711+#endif
14712+#ifdef CONFIG_PAX_RANDKSTACK
14713+ pushq %rax
14714+ pushq %r11
14715+ call pax_randomize_kstack
14716+ popq %r11
14717+ popq %rax
14718+#endif
14719+ .endm
14720+
14721+ .macro pax_erase_kstack
14722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14723+ call pax_erase_kstack
14724+#endif
14725+ .endm
14726+
14727 /*
14728 * 32bit SYSENTER instruction entry.
14729 *
14730@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14731 CFI_REGISTER rsp,rbp
14732 SWAPGS_UNSAFE_STACK
14733 movq PER_CPU_VAR(kernel_stack), %rsp
14734- addq $(KERNEL_STACK_OFFSET),%rsp
14735- /*
14736- * No need to follow this irqs on/off section: the syscall
14737- * disabled irqs, here we enable it straight after entry:
14738- */
14739- ENABLE_INTERRUPTS(CLBR_NONE)
14740 movl %ebp,%ebp /* zero extension */
14741 pushq_cfi $__USER32_DS
14742 /*CFI_REL_OFFSET ss,0*/
14743@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14744 CFI_REL_OFFSET rsp,0
14745 pushfq_cfi
14746 /*CFI_REL_OFFSET rflags,0*/
14747- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14748- CFI_REGISTER rip,r10
14749+ orl $X86_EFLAGS_IF,(%rsp)
14750+ GET_THREAD_INFO(%r11)
14751+ movl TI_sysenter_return(%r11), %r11d
14752+ CFI_REGISTER rip,r11
14753 pushq_cfi $__USER32_CS
14754 /*CFI_REL_OFFSET cs,0*/
14755 movl %eax, %eax
14756- pushq_cfi %r10
14757+ pushq_cfi %r11
14758 CFI_REL_OFFSET rip,0
14759 pushq_cfi %rax
14760 cld
14761 SAVE_ARGS 0,1,0
14762+ pax_enter_kernel_user
14763+
14764+#ifdef CONFIG_PAX_RANDKSTACK
14765+ pax_erase_kstack
14766+#endif
14767+
14768+ /*
14769+ * No need to follow this irqs on/off section: the syscall
14770+ * disabled irqs, here we enable it straight after entry:
14771+ */
14772+ ENABLE_INTERRUPTS(CLBR_NONE)
14773 /* no need to do an access_ok check here because rbp has been
14774 32bit zero extended */
14775+
14776+#ifdef CONFIG_PAX_MEMORY_UDEREF
14777+ addq pax_user_shadow_base,%rbp
14778+ ASM_PAX_OPEN_USERLAND
14779+#endif
14780+
14781 ASM_STAC
14782 1: movl (%rbp),%ebp
14783 _ASM_EXTABLE(1b,ia32_badarg)
14784 ASM_CLAC
14785
14786+#ifdef CONFIG_PAX_MEMORY_UDEREF
14787+ ASM_PAX_CLOSE_USERLAND
14788+#endif
14789+
14790 /*
14791 * Sysenter doesn't filter flags, so we need to clear NT
14792 * ourselves. To save a few cycles, we can check whether
14793@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14794 jnz sysenter_fix_flags
14795 sysenter_flags_fixed:
14796
14797- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14798- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14799+ GET_THREAD_INFO(%r11)
14800+ orl $TS_COMPAT,TI_status(%r11)
14801+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14802 CFI_REMEMBER_STATE
14803 jnz sysenter_tracesys
14804 cmpq $(IA32_NR_syscalls-1),%rax
14805@@ -172,14 +218,17 @@ sysenter_do_call:
14806 sysenter_dispatch:
14807 call *ia32_sys_call_table(,%rax,8)
14808 movq %rax,RAX-ARGOFFSET(%rsp)
14809+ GET_THREAD_INFO(%r11)
14810 DISABLE_INTERRUPTS(CLBR_NONE)
14811 TRACE_IRQS_OFF
14812- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14814 jnz sysexit_audit
14815 sysexit_from_sys_call:
14816- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14817+ pax_exit_kernel_user
14818+ pax_erase_kstack
14819+ andl $~TS_COMPAT,TI_status(%r11)
14820 /* clear IF, that popfq doesn't enable interrupts early */
14821- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14822+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14823 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14824 CFI_REGISTER rip,rdx
14825 RESTORE_ARGS 0,24,0,0,0,0
14826@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14827 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14828 movl %eax,%edi /* 1st arg: syscall number */
14829 call __audit_syscall_entry
14830+
14831+ pax_erase_kstack
14832+
14833 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14834 cmpq $(IA32_NR_syscalls-1),%rax
14835 ja ia32_badsys
14836@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14837 .endm
14838
14839 .macro auditsys_exit exit
14840- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14841+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14842 jnz ia32_ret_from_sys_call
14843 TRACE_IRQS_ON
14844 ENABLE_INTERRUPTS(CLBR_NONE)
14845@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14846 1: setbe %al /* 1 if error, 0 if not */
14847 movzbl %al,%edi /* zero-extend that into %edi */
14848 call __audit_syscall_exit
14849+ GET_THREAD_INFO(%r11)
14850 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14851 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14852 DISABLE_INTERRUPTS(CLBR_NONE)
14853 TRACE_IRQS_OFF
14854- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl %edi,TI_flags(%r11)
14856 jz \exit
14857 CLEAR_RREGS -ARGOFFSET
14858 jmp int_with_check
14859@@ -253,7 +306,7 @@ sysenter_fix_flags:
14860
14861 sysenter_tracesys:
14862 #ifdef CONFIG_AUDITSYSCALL
14863- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14865 jz sysenter_auditsys
14866 #endif
14867 SAVE_REST
14868@@ -265,6 +318,9 @@ sysenter_tracesys:
14869 RESTORE_REST
14870 cmpq $(IA32_NR_syscalls-1),%rax
14871 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14872+
14873+ pax_erase_kstack
14874+
14875 jmp sysenter_do_call
14876 CFI_ENDPROC
14877 ENDPROC(ia32_sysenter_target)
14878@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14879 ENTRY(ia32_cstar_target)
14880 CFI_STARTPROC32 simple
14881 CFI_SIGNAL_FRAME
14882- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14883+ CFI_DEF_CFA rsp,0
14884 CFI_REGISTER rip,rcx
14885 /*CFI_REGISTER rflags,r11*/
14886 SWAPGS_UNSAFE_STACK
14887 movl %esp,%r8d
14888 CFI_REGISTER rsp,r8
14889 movq PER_CPU_VAR(kernel_stack),%rsp
14890+ SAVE_ARGS 8*6,0,0
14891+ pax_enter_kernel_user
14892+
14893+#ifdef CONFIG_PAX_RANDKSTACK
14894+ pax_erase_kstack
14895+#endif
14896+
14897 /*
14898 * No need to follow this irqs on/off section: the syscall
14899 * disabled irqs and here we enable it straight after entry:
14900 */
14901 ENABLE_INTERRUPTS(CLBR_NONE)
14902- SAVE_ARGS 8,0,0
14903 movl %eax,%eax /* zero extension */
14904 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14905 movq %rcx,RIP-ARGOFFSET(%rsp)
14906@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14907 /* no need to do an access_ok check here because r8 has been
14908 32bit zero extended */
14909 /* hardware stack frame is complete now */
14910+
14911+#ifdef CONFIG_PAX_MEMORY_UDEREF
14912+ ASM_PAX_OPEN_USERLAND
14913+ movq pax_user_shadow_base,%r8
14914+ addq RSP-ARGOFFSET(%rsp),%r8
14915+#endif
14916+
14917 ASM_STAC
14918 1: movl (%r8),%r9d
14919 _ASM_EXTABLE(1b,ia32_badarg)
14920 ASM_CLAC
14921- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14922- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14923+
14924+#ifdef CONFIG_PAX_MEMORY_UDEREF
14925+ ASM_PAX_CLOSE_USERLAND
14926+#endif
14927+
14928+ GET_THREAD_INFO(%r11)
14929+ orl $TS_COMPAT,TI_status(%r11)
14930+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14931 CFI_REMEMBER_STATE
14932 jnz cstar_tracesys
14933 cmpq $IA32_NR_syscalls-1,%rax
14934@@ -335,13 +410,16 @@ cstar_do_call:
14935 cstar_dispatch:
14936 call *ia32_sys_call_table(,%rax,8)
14937 movq %rax,RAX-ARGOFFSET(%rsp)
14938+ GET_THREAD_INFO(%r11)
14939 DISABLE_INTERRUPTS(CLBR_NONE)
14940 TRACE_IRQS_OFF
14941- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14942+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14943 jnz sysretl_audit
14944 sysretl_from_sys_call:
14945- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14947+ pax_exit_kernel_user
14948+ pax_erase_kstack
14949+ andl $~TS_COMPAT,TI_status(%r11)
14950+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14951 movl RIP-ARGOFFSET(%rsp),%ecx
14952 CFI_REGISTER rip,rcx
14953 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14954@@ -368,7 +446,7 @@ sysretl_audit:
14955
14956 cstar_tracesys:
14957 #ifdef CONFIG_AUDITSYSCALL
14958- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14960 jz cstar_auditsys
14961 #endif
14962 xchgl %r9d,%ebp
14963@@ -382,11 +460,19 @@ cstar_tracesys:
14964 xchgl %ebp,%r9d
14965 cmpq $(IA32_NR_syscalls-1),%rax
14966 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14967+
14968+ pax_erase_kstack
14969+
14970 jmp cstar_do_call
14971 END(ia32_cstar_target)
14972
14973 ia32_badarg:
14974 ASM_CLAC
14975+
14976+#ifdef CONFIG_PAX_MEMORY_UDEREF
14977+ ASM_PAX_CLOSE_USERLAND
14978+#endif
14979+
14980 movq $-EFAULT,%rax
14981 jmp ia32_sysret
14982 CFI_ENDPROC
14983@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14984 CFI_REL_OFFSET rip,RIP-RIP
14985 PARAVIRT_ADJUST_EXCEPTION_FRAME
14986 SWAPGS
14987- /*
14988- * No need to follow this irqs on/off section: the syscall
14989- * disabled irqs and here we enable it straight after entry:
14990- */
14991- ENABLE_INTERRUPTS(CLBR_NONE)
14992 movl %eax,%eax
14993 pushq_cfi %rax
14994 cld
14995 /* note the registers are not zero extended to the sf.
14996 this could be a problem. */
14997 SAVE_ARGS 0,1,0
14998- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14999- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15000+ pax_enter_kernel_user
15001+
15002+#ifdef CONFIG_PAX_RANDKSTACK
15003+ pax_erase_kstack
15004+#endif
15005+
15006+ /*
15007+ * No need to follow this irqs on/off section: the syscall
15008+ * disabled irqs and here we enable it straight after entry:
15009+ */
15010+ ENABLE_INTERRUPTS(CLBR_NONE)
15011+ GET_THREAD_INFO(%r11)
15012+ orl $TS_COMPAT,TI_status(%r11)
15013+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15014 jnz ia32_tracesys
15015 cmpq $(IA32_NR_syscalls-1),%rax
15016 ja ia32_badsys
15017@@ -458,6 +551,9 @@ ia32_tracesys:
15018 RESTORE_REST
15019 cmpq $(IA32_NR_syscalls-1),%rax
15020 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15021+
15022+ pax_erase_kstack
15023+
15024 jmp ia32_do_call
15025 END(ia32_syscall)
15026
15027diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15028index 8e0ceec..af13504 100644
15029--- a/arch/x86/ia32/sys_ia32.c
15030+++ b/arch/x86/ia32/sys_ia32.c
15031@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15032 */
15033 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15034 {
15035- typeof(ubuf->st_uid) uid = 0;
15036- typeof(ubuf->st_gid) gid = 0;
15037+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15038+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15039 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15040 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15041 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15042diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15043index 372231c..51b537d 100644
15044--- a/arch/x86/include/asm/alternative-asm.h
15045+++ b/arch/x86/include/asm/alternative-asm.h
15046@@ -18,6 +18,45 @@
15047 .endm
15048 #endif
15049
15050+#ifdef KERNEXEC_PLUGIN
15051+ .macro pax_force_retaddr_bts rip=0
15052+ btsq $63,\rip(%rsp)
15053+ .endm
15054+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15055+ .macro pax_force_retaddr rip=0, reload=0
15056+ btsq $63,\rip(%rsp)
15057+ .endm
15058+ .macro pax_force_fptr ptr
15059+ btsq $63,\ptr
15060+ .endm
15061+ .macro pax_set_fptr_mask
15062+ .endm
15063+#endif
15064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065+ .macro pax_force_retaddr rip=0, reload=0
15066+ .if \reload
15067+ pax_set_fptr_mask
15068+ .endif
15069+ orq %r12,\rip(%rsp)
15070+ .endm
15071+ .macro pax_force_fptr ptr
15072+ orq %r12,\ptr
15073+ .endm
15074+ .macro pax_set_fptr_mask
15075+ movabs $0x8000000000000000,%r12
15076+ .endm
15077+#endif
15078+#else
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .endm
15081+ .macro pax_force_fptr ptr
15082+ .endm
15083+ .macro pax_force_retaddr_bts rip=0
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ .endm
15087+#endif
15088+
15089 .macro altinstruction_entry orig alt feature orig_len alt_len
15090 .long \orig - .
15091 .long \alt - .
15092diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15093index 473bdbe..b1e3377 100644
15094--- a/arch/x86/include/asm/alternative.h
15095+++ b/arch/x86/include/asm/alternative.h
15096@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15097 ".pushsection .discard,\"aw\",@progbits\n" \
15098 DISCARD_ENTRY(1) \
15099 ".popsection\n" \
15100- ".pushsection .altinstr_replacement, \"ax\"\n" \
15101+ ".pushsection .altinstr_replacement, \"a\"\n" \
15102 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15103 ".popsection"
15104
15105@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15106 DISCARD_ENTRY(1) \
15107 DISCARD_ENTRY(2) \
15108 ".popsection\n" \
15109- ".pushsection .altinstr_replacement, \"ax\"\n" \
15110+ ".pushsection .altinstr_replacement, \"a\"\n" \
15111 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15112 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15113 ".popsection"
15114diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15115index efc3b22..85c4f3a 100644
15116--- a/arch/x86/include/asm/apic.h
15117+++ b/arch/x86/include/asm/apic.h
15118@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15119
15120 #ifdef CONFIG_X86_LOCAL_APIC
15121
15122-extern unsigned int apic_verbosity;
15123+extern int apic_verbosity;
15124 extern int local_apic_timer_c2_ok;
15125
15126 extern int disable_apic;
15127diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15128index 20370c6..a2eb9b0 100644
15129--- a/arch/x86/include/asm/apm.h
15130+++ b/arch/x86/include/asm/apm.h
15131@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15132 __asm__ __volatile__(APM_DO_ZERO_SEGS
15133 "pushl %%edi\n\t"
15134 "pushl %%ebp\n\t"
15135- "lcall *%%cs:apm_bios_entry\n\t"
15136+ "lcall *%%ss:apm_bios_entry\n\t"
15137 "setc %%al\n\t"
15138 "popl %%ebp\n\t"
15139 "popl %%edi\n\t"
15140@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15141 __asm__ __volatile__(APM_DO_ZERO_SEGS
15142 "pushl %%edi\n\t"
15143 "pushl %%ebp\n\t"
15144- "lcall *%%cs:apm_bios_entry\n\t"
15145+ "lcall *%%ss:apm_bios_entry\n\t"
15146 "setc %%bl\n\t"
15147 "popl %%ebp\n\t"
15148 "popl %%edi\n\t"
15149diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15150index 5e5cd12..51cdc93 100644
15151--- a/arch/x86/include/asm/atomic.h
15152+++ b/arch/x86/include/asm/atomic.h
15153@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15154 }
15155
15156 /**
15157+ * atomic_read_unchecked - read atomic variable
15158+ * @v: pointer of type atomic_unchecked_t
15159+ *
15160+ * Atomically reads the value of @v.
15161+ */
15162+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15163+{
15164+ return ACCESS_ONCE((v)->counter);
15165+}
15166+
15167+/**
15168 * atomic_set - set atomic variable
15169 * @v: pointer of type atomic_t
15170 * @i: required value
15171@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15172 }
15173
15174 /**
15175+ * atomic_set_unchecked - set atomic variable
15176+ * @v: pointer of type atomic_unchecked_t
15177+ * @i: required value
15178+ *
15179+ * Atomically sets the value of @v to @i.
15180+ */
15181+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15182+{
15183+ v->counter = i;
15184+}
15185+
15186+/**
15187 * atomic_add - add integer to atomic variable
15188 * @i: integer value to add
15189 * @v: pointer of type atomic_t
15190@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15191 */
15192 static inline void atomic_add(int i, atomic_t *v)
15193 {
15194- asm volatile(LOCK_PREFIX "addl %1,%0"
15195+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15196+
15197+#ifdef CONFIG_PAX_REFCOUNT
15198+ "jno 0f\n"
15199+ LOCK_PREFIX "subl %1,%0\n"
15200+ "int $4\n0:\n"
15201+ _ASM_EXTABLE(0b, 0b)
15202+#endif
15203+
15204+ : "+m" (v->counter)
15205+ : "ir" (i));
15206+}
15207+
15208+/**
15209+ * atomic_add_unchecked - add integer to atomic variable
15210+ * @i: integer value to add
15211+ * @v: pointer of type atomic_unchecked_t
15212+ *
15213+ * Atomically adds @i to @v.
15214+ */
15215+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15216+{
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218 : "+m" (v->counter)
15219 : "ir" (i));
15220 }
15221@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15222 */
15223 static inline void atomic_sub(int i, atomic_t *v)
15224 {
15225- asm volatile(LOCK_PREFIX "subl %1,%0"
15226+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15227+
15228+#ifdef CONFIG_PAX_REFCOUNT
15229+ "jno 0f\n"
15230+ LOCK_PREFIX "addl %1,%0\n"
15231+ "int $4\n0:\n"
15232+ _ASM_EXTABLE(0b, 0b)
15233+#endif
15234+
15235+ : "+m" (v->counter)
15236+ : "ir" (i));
15237+}
15238+
15239+/**
15240+ * atomic_sub_unchecked - subtract integer from atomic variable
15241+ * @i: integer value to subtract
15242+ * @v: pointer of type atomic_unchecked_t
15243+ *
15244+ * Atomically subtracts @i from @v.
15245+ */
15246+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15247+{
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249 : "+m" (v->counter)
15250 : "ir" (i));
15251 }
15252@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15253 */
15254 static inline int atomic_sub_and_test(int i, atomic_t *v)
15255 {
15256- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15257+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15258 }
15259
15260 /**
15261@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15262 */
15263 static inline void atomic_inc(atomic_t *v)
15264 {
15265- asm volatile(LOCK_PREFIX "incl %0"
15266+ asm volatile(LOCK_PREFIX "incl %0\n"
15267+
15268+#ifdef CONFIG_PAX_REFCOUNT
15269+ "jno 0f\n"
15270+ LOCK_PREFIX "decl %0\n"
15271+ "int $4\n0:\n"
15272+ _ASM_EXTABLE(0b, 0b)
15273+#endif
15274+
15275+ : "+m" (v->counter));
15276+}
15277+
15278+/**
15279+ * atomic_inc_unchecked - increment atomic variable
15280+ * @v: pointer of type atomic_unchecked_t
15281+ *
15282+ * Atomically increments @v by 1.
15283+ */
15284+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15285+{
15286+ asm volatile(LOCK_PREFIX "incl %0\n"
15287 : "+m" (v->counter));
15288 }
15289
15290@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15291 */
15292 static inline void atomic_dec(atomic_t *v)
15293 {
15294- asm volatile(LOCK_PREFIX "decl %0"
15295+ asm volatile(LOCK_PREFIX "decl %0\n"
15296+
15297+#ifdef CONFIG_PAX_REFCOUNT
15298+ "jno 0f\n"
15299+ LOCK_PREFIX "incl %0\n"
15300+ "int $4\n0:\n"
15301+ _ASM_EXTABLE(0b, 0b)
15302+#endif
15303+
15304+ : "+m" (v->counter));
15305+}
15306+
15307+/**
15308+ * atomic_dec_unchecked - decrement atomic variable
15309+ * @v: pointer of type atomic_unchecked_t
15310+ *
15311+ * Atomically decrements @v by 1.
15312+ */
15313+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15314+{
15315+ asm volatile(LOCK_PREFIX "decl %0\n"
15316 : "+m" (v->counter));
15317 }
15318
15319@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15320 */
15321 static inline int atomic_dec_and_test(atomic_t *v)
15322 {
15323- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15324+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15325 }
15326
15327 /**
15328@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15329 */
15330 static inline int atomic_inc_and_test(atomic_t *v)
15331 {
15332- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15333+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15334+}
15335+
15336+/**
15337+ * atomic_inc_and_test_unchecked - increment and test
15338+ * @v: pointer of type atomic_unchecked_t
15339+ *
15340+ * Atomically increments @v by 1
15341+ * and returns true if the result is zero, or false for all
15342+ * other cases.
15343+ */
15344+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15345+{
15346+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_add_negative(int i, atomic_t *v)
15353 {
15354- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15355+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15356 }
15357
15358 /**
15359@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15360 *
15361 * Atomically adds @i to @v and returns @i + @v
15362 */
15363-static inline int atomic_add_return(int i, atomic_t *v)
15364+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15365+{
15366+ return i + xadd_check_overflow(&v->counter, i);
15367+}
15368+
15369+/**
15370+ * atomic_add_return_unchecked - add integer and return
15371+ * @i: integer value to add
15372+ * @v: pointer of type atomic_unchecked_t
15373+ *
15374+ * Atomically adds @i to @v and returns @i + @v
15375+ */
15376+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15377 {
15378 return i + xadd(&v->counter, i);
15379 }
15380@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15381 *
15382 * Atomically subtracts @i from @v and returns @v - @i
15383 */
15384-static inline int atomic_sub_return(int i, atomic_t *v)
15385+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15386 {
15387 return atomic_add_return(-i, v);
15388 }
15389
15390 #define atomic_inc_return(v) (atomic_add_return(1, v))
15391+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15392+{
15393+ return atomic_add_return_unchecked(1, v);
15394+}
15395 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15396
15397-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15398+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15399+{
15400+ return cmpxchg(&v->counter, old, new);
15401+}
15402+
15403+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15404 {
15405 return cmpxchg(&v->counter, old, new);
15406 }
15407@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15408 return xchg(&v->counter, new);
15409 }
15410
15411+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15412+{
15413+ return xchg(&v->counter, new);
15414+}
15415+
15416 /**
15417 * __atomic_add_unless - add unless the number is already a given value
15418 * @v: pointer of type atomic_t
15419@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15420 */
15421 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15422 {
15423- int c, old;
15424+ int c, old, new;
15425 c = atomic_read(v);
15426 for (;;) {
15427- if (unlikely(c == (u)))
15428+ if (unlikely(c == u))
15429 break;
15430- old = atomic_cmpxchg((v), c, c + (a));
15431+
15432+ asm volatile("addl %2,%0\n"
15433+
15434+#ifdef CONFIG_PAX_REFCOUNT
15435+ "jno 0f\n"
15436+ "subl %2,%0\n"
15437+ "int $4\n0:\n"
15438+ _ASM_EXTABLE(0b, 0b)
15439+#endif
15440+
15441+ : "=r" (new)
15442+ : "0" (c), "ir" (a));
15443+
15444+ old = atomic_cmpxchg(v, c, new);
15445 if (likely(old == c))
15446 break;
15447 c = old;
15448@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15449 }
15450
15451 /**
15452+ * atomic_inc_not_zero_hint - increment if not null
15453+ * @v: pointer of type atomic_t
15454+ * @hint: probable value of the atomic before the increment
15455+ *
15456+ * This version of atomic_inc_not_zero() gives a hint of probable
15457+ * value of the atomic. This helps processor to not read the memory
15458+ * before doing the atomic read/modify/write cycle, lowering
15459+ * number of bus transactions on some arches.
15460+ *
15461+ * Returns: 0 if increment was not done, 1 otherwise.
15462+ */
15463+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15464+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15465+{
15466+ int val, c = hint, new;
15467+
15468+ /* sanity test, should be removed by compiler if hint is a constant */
15469+ if (!hint)
15470+ return __atomic_add_unless(v, 1, 0);
15471+
15472+ do {
15473+ asm volatile("incl %0\n"
15474+
15475+#ifdef CONFIG_PAX_REFCOUNT
15476+ "jno 0f\n"
15477+ "decl %0\n"
15478+ "int $4\n0:\n"
15479+ _ASM_EXTABLE(0b, 0b)
15480+#endif
15481+
15482+ : "=r" (new)
15483+ : "0" (c));
15484+
15485+ val = atomic_cmpxchg(v, c, new);
15486+ if (val == c)
15487+ return 1;
15488+ c = val;
15489+ } while (c);
15490+
15491+ return 0;
15492+}
15493+
15494+/**
15495 * atomic_inc_short - increment of a short integer
15496 * @v: pointer to type int
15497 *
15498@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15499 }
15500
15501 /* These are x86-specific, used by some header files */
15502-#define atomic_clear_mask(mask, addr) \
15503- asm volatile(LOCK_PREFIX "andl %0,%1" \
15504- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15505+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15506+{
15507+ asm volatile(LOCK_PREFIX "andl %1,%0"
15508+ : "+m" (v->counter)
15509+ : "r" (~(mask))
15510+ : "memory");
15511+}
15512
15513-#define atomic_set_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "orl %0,%1" \
15515- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15516- : "memory")
15517+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15518+{
15519+ asm volatile(LOCK_PREFIX "andl %1,%0"
15520+ : "+m" (v->counter)
15521+ : "r" (~(mask))
15522+ : "memory");
15523+}
15524+
15525+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15526+{
15527+ asm volatile(LOCK_PREFIX "orl %1,%0"
15528+ : "+m" (v->counter)
15529+ : "r" (mask)
15530+ : "memory");
15531+}
15532+
15533+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15534+{
15535+ asm volatile(LOCK_PREFIX "orl %1,%0"
15536+ : "+m" (v->counter)
15537+ : "r" (mask)
15538+ : "memory");
15539+}
15540
15541 #ifdef CONFIG_X86_32
15542 # include <asm/atomic64_32.h>
15543diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15544index b154de7..bf18a5a 100644
15545--- a/arch/x86/include/asm/atomic64_32.h
15546+++ b/arch/x86/include/asm/atomic64_32.h
15547@@ -12,6 +12,14 @@ typedef struct {
15548 u64 __aligned(8) counter;
15549 } atomic64_t;
15550
15551+#ifdef CONFIG_PAX_REFCOUNT
15552+typedef struct {
15553+ u64 __aligned(8) counter;
15554+} atomic64_unchecked_t;
15555+#else
15556+typedef atomic64_t atomic64_unchecked_t;
15557+#endif
15558+
15559 #define ATOMIC64_INIT(val) { (val) }
15560
15561 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15562@@ -37,21 +45,31 @@ typedef struct {
15563 ATOMIC64_DECL_ONE(sym##_386)
15564
15565 ATOMIC64_DECL_ONE(add_386);
15566+ATOMIC64_DECL_ONE(add_unchecked_386);
15567 ATOMIC64_DECL_ONE(sub_386);
15568+ATOMIC64_DECL_ONE(sub_unchecked_386);
15569 ATOMIC64_DECL_ONE(inc_386);
15570+ATOMIC64_DECL_ONE(inc_unchecked_386);
15571 ATOMIC64_DECL_ONE(dec_386);
15572+ATOMIC64_DECL_ONE(dec_unchecked_386);
15573 #endif
15574
15575 #define alternative_atomic64(f, out, in...) \
15576 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15577
15578 ATOMIC64_DECL(read);
15579+ATOMIC64_DECL(read_unchecked);
15580 ATOMIC64_DECL(set);
15581+ATOMIC64_DECL(set_unchecked);
15582 ATOMIC64_DECL(xchg);
15583 ATOMIC64_DECL(add_return);
15584+ATOMIC64_DECL(add_return_unchecked);
15585 ATOMIC64_DECL(sub_return);
15586+ATOMIC64_DECL(sub_return_unchecked);
15587 ATOMIC64_DECL(inc_return);
15588+ATOMIC64_DECL(inc_return_unchecked);
15589 ATOMIC64_DECL(dec_return);
15590+ATOMIC64_DECL(dec_return_unchecked);
15591 ATOMIC64_DECL(dec_if_positive);
15592 ATOMIC64_DECL(inc_not_zero);
15593 ATOMIC64_DECL(add_unless);
15594@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15595 }
15596
15597 /**
15598+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15599+ * @p: pointer to type atomic64_unchecked_t
15600+ * @o: expected value
15601+ * @n: new value
15602+ *
15603+ * Atomically sets @v to @n if it was equal to @o and returns
15604+ * the old value.
15605+ */
15606+
15607+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15608+{
15609+ return cmpxchg64(&v->counter, o, n);
15610+}
15611+
15612+/**
15613 * atomic64_xchg - xchg atomic64 variable
15614 * @v: pointer to type atomic64_t
15615 * @n: value to assign
15616@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15617 }
15618
15619 /**
15620+ * atomic64_set_unchecked - set atomic64 variable
15621+ * @v: pointer to type atomic64_unchecked_t
15622+ * @n: value to assign
15623+ *
15624+ * Atomically sets the value of @v to @n.
15625+ */
15626+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15627+{
15628+ unsigned high = (unsigned)(i >> 32);
15629+ unsigned low = (unsigned)i;
15630+ alternative_atomic64(set, /* no output */,
15631+ "S" (v), "b" (low), "c" (high)
15632+ : "eax", "edx", "memory");
15633+}
15634+
15635+/**
15636 * atomic64_read - read atomic64 variable
15637 * @v: pointer to type atomic64_t
15638 *
15639@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15640 }
15641
15642 /**
15643+ * atomic64_read_unchecked - read atomic64 variable
15644+ * @v: pointer to type atomic64_unchecked_t
15645+ *
15646+ * Atomically reads the value of @v and returns it.
15647+ */
15648+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15649+{
15650+ long long r;
15651+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15652+ return r;
15653+ }
15654+
15655+/**
15656 * atomic64_add_return - add and return
15657 * @i: integer value to add
15658 * @v: pointer to type atomic64_t
15659@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15660 return i;
15661 }
15662
15663+/**
15664+ * atomic64_add_return_unchecked - add and return
15665+ * @i: integer value to add
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically adds @i to @v and returns @i + *@v
15669+ */
15670+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15671+{
15672+ alternative_atomic64(add_return_unchecked,
15673+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15674+ ASM_NO_INPUT_CLOBBER("memory"));
15675+ return i;
15676+}
15677+
15678 /*
15679 * Other variants with different arithmetic operators:
15680 */
15681@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15682 return a;
15683 }
15684
15685+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15686+{
15687+ long long a;
15688+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15689+ "S" (v) : "memory", "ecx");
15690+ return a;
15691+}
15692+
15693 static inline long long atomic64_dec_return(atomic64_t *v)
15694 {
15695 long long a;
15696@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15697 }
15698
15699 /**
15700+ * atomic64_add_unchecked - add integer to atomic64 variable
15701+ * @i: integer value to add
15702+ * @v: pointer to type atomic64_unchecked_t
15703+ *
15704+ * Atomically adds @i to @v.
15705+ */
15706+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15707+{
15708+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15709+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15710+ ASM_NO_INPUT_CLOBBER("memory"));
15711+ return i;
15712+}
15713+
15714+/**
15715 * atomic64_sub - subtract the atomic64 variable
15716 * @i: integer value to subtract
15717 * @v: pointer to type atomic64_t
15718diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15719index f8d273e..02f39f3 100644
15720--- a/arch/x86/include/asm/atomic64_64.h
15721+++ b/arch/x86/include/asm/atomic64_64.h
15722@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15723 }
15724
15725 /**
15726+ * atomic64_read_unchecked - read atomic64 variable
15727+ * @v: pointer of type atomic64_unchecked_t
15728+ *
15729+ * Atomically reads the value of @v.
15730+ * Doesn't imply a read memory barrier.
15731+ */
15732+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15733+{
15734+ return ACCESS_ONCE((v)->counter);
15735+}
15736+
15737+/**
15738 * atomic64_set - set atomic64 variable
15739 * @v: pointer to type atomic64_t
15740 * @i: required value
15741@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15742 }
15743
15744 /**
15745+ * atomic64_set_unchecked - set atomic64 variable
15746+ * @v: pointer to type atomic64_unchecked_t
15747+ * @i: required value
15748+ *
15749+ * Atomically sets the value of @v to @i.
15750+ */
15751+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15752+{
15753+ v->counter = i;
15754+}
15755+
15756+/**
15757 * atomic64_add - add integer to atomic64 variable
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15761 */
15762 static inline void atomic64_add(long i, atomic64_t *v)
15763 {
15764+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15765+
15766+#ifdef CONFIG_PAX_REFCOUNT
15767+ "jno 0f\n"
15768+ LOCK_PREFIX "subq %1,%0\n"
15769+ "int $4\n0:\n"
15770+ _ASM_EXTABLE(0b, 0b)
15771+#endif
15772+
15773+ : "=m" (v->counter)
15774+ : "er" (i), "m" (v->counter));
15775+}
15776+
15777+/**
15778+ * atomic64_add_unchecked - add integer to atomic64 variable
15779+ * @i: integer value to add
15780+ * @v: pointer to type atomic64_unchecked_t
15781+ *
15782+ * Atomically adds @i to @v.
15783+ */
15784+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15785+{
15786 asm volatile(LOCK_PREFIX "addq %1,%0"
15787 : "=m" (v->counter)
15788 : "er" (i), "m" (v->counter));
15789@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15790 */
15791 static inline void atomic64_sub(long i, atomic64_t *v)
15792 {
15793- asm volatile(LOCK_PREFIX "subq %1,%0"
15794+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15795+
15796+#ifdef CONFIG_PAX_REFCOUNT
15797+ "jno 0f\n"
15798+ LOCK_PREFIX "addq %1,%0\n"
15799+ "int $4\n0:\n"
15800+ _ASM_EXTABLE(0b, 0b)
15801+#endif
15802+
15803+ : "=m" (v->counter)
15804+ : "er" (i), "m" (v->counter));
15805+}
15806+
15807+/**
15808+ * atomic64_sub_unchecked - subtract the atomic64 variable
15809+ * @i: integer value to subtract
15810+ * @v: pointer to type atomic64_unchecked_t
15811+ *
15812+ * Atomically subtracts @i from @v.
15813+ */
15814+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15815+{
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817 : "=m" (v->counter)
15818 : "er" (i), "m" (v->counter));
15819 }
15820@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15821 */
15822 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15823 {
15824- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15825+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15826 }
15827
15828 /**
15829@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15830 */
15831 static inline void atomic64_inc(atomic64_t *v)
15832 {
15833+ asm volatile(LOCK_PREFIX "incq %0\n"
15834+
15835+#ifdef CONFIG_PAX_REFCOUNT
15836+ "jno 0f\n"
15837+ LOCK_PREFIX "decq %0\n"
15838+ "int $4\n0:\n"
15839+ _ASM_EXTABLE(0b, 0b)
15840+#endif
15841+
15842+ : "=m" (v->counter)
15843+ : "m" (v->counter));
15844+}
15845+
15846+/**
15847+ * atomic64_inc_unchecked - increment atomic64 variable
15848+ * @v: pointer to type atomic64_unchecked_t
15849+ *
15850+ * Atomically increments @v by 1.
15851+ */
15852+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15853+{
15854 asm volatile(LOCK_PREFIX "incq %0"
15855 : "=m" (v->counter)
15856 : "m" (v->counter));
15857@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15858 */
15859 static inline void atomic64_dec(atomic64_t *v)
15860 {
15861- asm volatile(LOCK_PREFIX "decq %0"
15862+ asm volatile(LOCK_PREFIX "decq %0\n"
15863+
15864+#ifdef CONFIG_PAX_REFCOUNT
15865+ "jno 0f\n"
15866+ LOCK_PREFIX "incq %0\n"
15867+ "int $4\n0:\n"
15868+ _ASM_EXTABLE(0b, 0b)
15869+#endif
15870+
15871+ : "=m" (v->counter)
15872+ : "m" (v->counter));
15873+}
15874+
15875+/**
15876+ * atomic64_dec_unchecked - decrement atomic64 variable
15877+ * @v: pointer to type atomic64_t
15878+ *
15879+ * Atomically decrements @v by 1.
15880+ */
15881+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15882+{
15883+ asm volatile(LOCK_PREFIX "decq %0\n"
15884 : "=m" (v->counter)
15885 : "m" (v->counter));
15886 }
15887@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15888 */
15889 static inline int atomic64_dec_and_test(atomic64_t *v)
15890 {
15891- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15892+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15893 }
15894
15895 /**
15896@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15897 */
15898 static inline int atomic64_inc_and_test(atomic64_t *v)
15899 {
15900- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15901+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15902 }
15903
15904 /**
15905@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15906 */
15907 static inline int atomic64_add_negative(long i, atomic64_t *v)
15908 {
15909- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15910+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15911 }
15912
15913 /**
15914@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15915 */
15916 static inline long atomic64_add_return(long i, atomic64_t *v)
15917 {
15918+ return i + xadd_check_overflow(&v->counter, i);
15919+}
15920+
15921+/**
15922+ * atomic64_add_return_unchecked - add and return
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v and returns @i + @v
15927+ */
15928+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15929+{
15930 return i + xadd(&v->counter, i);
15931 }
15932
15933@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15934 }
15935
15936 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15937+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15938+{
15939+ return atomic64_add_return_unchecked(1, v);
15940+}
15941 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15942
15943 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15944@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15945 return cmpxchg(&v->counter, old, new);
15946 }
15947
15948+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15949+{
15950+ return cmpxchg(&v->counter, old, new);
15951+}
15952+
15953 static inline long atomic64_xchg(atomic64_t *v, long new)
15954 {
15955 return xchg(&v->counter, new);
15956@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15957 */
15958 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15959 {
15960- long c, old;
15961+ long c, old, new;
15962 c = atomic64_read(v);
15963 for (;;) {
15964- if (unlikely(c == (u)))
15965+ if (unlikely(c == u))
15966 break;
15967- old = atomic64_cmpxchg((v), c, c + (a));
15968+
15969+ asm volatile("add %2,%0\n"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "sub %2,%0\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978+ : "=r" (new)
15979+ : "0" (c), "ir" (a));
15980+
15981+ old = atomic64_cmpxchg(v, c, new);
15982 if (likely(old == c))
15983 break;
15984 c = old;
15985 }
15986- return c != (u);
15987+ return c != u;
15988 }
15989
15990 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15991diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15992index 2ab1eb3..1e8cc5d 100644
15993--- a/arch/x86/include/asm/barrier.h
15994+++ b/arch/x86/include/asm/barrier.h
15995@@ -57,7 +57,7 @@
15996 do { \
15997 compiletime_assert_atomic_type(*p); \
15998 smp_mb(); \
15999- ACCESS_ONCE(*p) = (v); \
16000+ ACCESS_ONCE_RW(*p) = (v); \
16001 } while (0)
16002
16003 #define smp_load_acquire(p) \
16004@@ -74,7 +74,7 @@ do { \
16005 do { \
16006 compiletime_assert_atomic_type(*p); \
16007 barrier(); \
16008- ACCESS_ONCE(*p) = (v); \
16009+ ACCESS_ONCE_RW(*p) = (v); \
16010 } while (0)
16011
16012 #define smp_load_acquire(p) \
16013diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16014index cfe3b95..d01b118 100644
16015--- a/arch/x86/include/asm/bitops.h
16016+++ b/arch/x86/include/asm/bitops.h
16017@@ -50,7 +50,7 @@
16018 * a mask operation on a byte.
16019 */
16020 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16021-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16022+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16023 #define CONST_MASK(nr) (1 << ((nr) & 7))
16024
16025 /**
16026@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16027 */
16028 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16029 {
16030- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16031+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16032 }
16033
16034 /**
16035@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16036 */
16037 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16038 {
16039- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16040+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16041 }
16042
16043 /**
16044@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16045 */
16046 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16049+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16050 }
16051
16052 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16053@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16054 *
16055 * Undefined if no bit exists, so code should check against 0 first.
16056 */
16057-static inline unsigned long __ffs(unsigned long word)
16058+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16059 {
16060 asm("rep; bsf %1,%0"
16061 : "=r" (word)
16062@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16063 *
16064 * Undefined if no zero exists, so code should check against ~0UL first.
16065 */
16066-static inline unsigned long ffz(unsigned long word)
16067+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16068 {
16069 asm("rep; bsf %1,%0"
16070 : "=r" (word)
16071@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16072 *
16073 * Undefined if no set bit exists, so code should check against 0 first.
16074 */
16075-static inline unsigned long __fls(unsigned long word)
16076+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16077 {
16078 asm("bsr %1,%0"
16079 : "=r" (word)
16080@@ -434,7 +434,7 @@ static inline int ffs(int x)
16081 * set bit if value is nonzero. The last (most significant) bit is
16082 * at position 32.
16083 */
16084-static inline int fls(int x)
16085+static inline int __intentional_overflow(-1) fls(int x)
16086 {
16087 int r;
16088
16089@@ -476,7 +476,7 @@ static inline int fls(int x)
16090 * at position 64.
16091 */
16092 #ifdef CONFIG_X86_64
16093-static __always_inline int fls64(__u64 x)
16094+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16095 {
16096 int bitpos = -1;
16097 /*
16098diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16099index 4fa687a..60f2d39 100644
16100--- a/arch/x86/include/asm/boot.h
16101+++ b/arch/x86/include/asm/boot.h
16102@@ -6,10 +6,15 @@
16103 #include <uapi/asm/boot.h>
16104
16105 /* Physical address where kernel should be loaded. */
16106-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16107+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16108 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16109 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16110
16111+#ifndef __ASSEMBLY__
16112+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16113+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16114+#endif
16115+
16116 /* Minimum kernel alignment, as a power of two */
16117 #ifdef CONFIG_X86_64
16118 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16119diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16120index 48f99f1..d78ebf9 100644
16121--- a/arch/x86/include/asm/cache.h
16122+++ b/arch/x86/include/asm/cache.h
16123@@ -5,12 +5,13 @@
16124
16125 /* L1 cache line size */
16126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16127-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16128+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16129
16130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16131+#define __read_only __attribute__((__section__(".data..read_only")))
16132
16133 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16134-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16135+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16136
16137 #ifdef CONFIG_X86_VSMP
16138 #ifdef CONFIG_SMP
16139diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16140index 1f1297b..72b8439 100644
16141--- a/arch/x86/include/asm/calling.h
16142+++ b/arch/x86/include/asm/calling.h
16143@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16144 #define RSP 152
16145 #define SS 160
16146
16147-#define ARGOFFSET R11
16148+#define ARGOFFSET R15
16149
16150 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16151- subq $9*8+\addskip, %rsp
16152- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16153- movq_cfi rdi, 8*8
16154- movq_cfi rsi, 7*8
16155- movq_cfi rdx, 6*8
16156+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16157+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16158+ movq_cfi rdi, RDI
16159+ movq_cfi rsi, RSI
16160+ movq_cfi rdx, RDX
16161
16162 .if \save_rcx
16163- movq_cfi rcx, 5*8
16164+ movq_cfi rcx, RCX
16165 .endif
16166
16167 .if \rax_enosys
16168- movq $-ENOSYS, 4*8(%rsp)
16169+ movq $-ENOSYS, RAX(%rsp)
16170 .else
16171- movq_cfi rax, 4*8
16172+ movq_cfi rax, RAX
16173 .endif
16174
16175 .if \save_r891011
16176- movq_cfi r8, 3*8
16177- movq_cfi r9, 2*8
16178- movq_cfi r10, 1*8
16179- movq_cfi r11, 0*8
16180+ movq_cfi r8, R8
16181+ movq_cfi r9, R9
16182+ movq_cfi r10, R10
16183+ movq_cfi r11, R11
16184 .endif
16185
16186+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16187+ movq_cfi r12, R12
16188+#endif
16189+
16190 .endm
16191
16192-#define ARG_SKIP (9*8)
16193+#define ARG_SKIP ORIG_RAX
16194
16195 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16196 rstor_r8910=1, rstor_rdx=1
16197+
16198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16199+ movq_cfi_restore R12, r12
16200+#endif
16201+
16202 .if \rstor_r11
16203- movq_cfi_restore 0*8, r11
16204+ movq_cfi_restore R11, r11
16205 .endif
16206
16207 .if \rstor_r8910
16208- movq_cfi_restore 1*8, r10
16209- movq_cfi_restore 2*8, r9
16210- movq_cfi_restore 3*8, r8
16211+ movq_cfi_restore R10, r10
16212+ movq_cfi_restore R9, r9
16213+ movq_cfi_restore R8, r8
16214 .endif
16215
16216 .if \rstor_rax
16217- movq_cfi_restore 4*8, rax
16218+ movq_cfi_restore RAX, rax
16219 .endif
16220
16221 .if \rstor_rcx
16222- movq_cfi_restore 5*8, rcx
16223+ movq_cfi_restore RCX, rcx
16224 .endif
16225
16226 .if \rstor_rdx
16227- movq_cfi_restore 6*8, rdx
16228+ movq_cfi_restore RDX, rdx
16229 .endif
16230
16231- movq_cfi_restore 7*8, rsi
16232- movq_cfi_restore 8*8, rdi
16233+ movq_cfi_restore RSI, rsi
16234+ movq_cfi_restore RDI, rdi
16235
16236- .if ARG_SKIP+\addskip > 0
16237- addq $ARG_SKIP+\addskip, %rsp
16238- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16239+ .if ORIG_RAX+\addskip > 0
16240+ addq $ORIG_RAX+\addskip, %rsp
16241+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16242 .endif
16243 .endm
16244
16245- .macro LOAD_ARGS offset, skiprax=0
16246- movq \offset(%rsp), %r11
16247- movq \offset+8(%rsp), %r10
16248- movq \offset+16(%rsp), %r9
16249- movq \offset+24(%rsp), %r8
16250- movq \offset+40(%rsp), %rcx
16251- movq \offset+48(%rsp), %rdx
16252- movq \offset+56(%rsp), %rsi
16253- movq \offset+64(%rsp), %rdi
16254+ .macro LOAD_ARGS skiprax=0
16255+ movq R11(%rsp), %r11
16256+ movq R10(%rsp), %r10
16257+ movq R9(%rsp), %r9
16258+ movq R8(%rsp), %r8
16259+ movq RCX(%rsp), %rcx
16260+ movq RDX(%rsp), %rdx
16261+ movq RSI(%rsp), %rsi
16262+ movq RDI(%rsp), %rdi
16263 .if \skiprax
16264 .else
16265- movq \offset+72(%rsp), %rax
16266+ movq ORIG_RAX(%rsp), %rax
16267 .endif
16268 .endm
16269
16270-#define REST_SKIP (6*8)
16271-
16272 .macro SAVE_REST
16273- subq $REST_SKIP, %rsp
16274- CFI_ADJUST_CFA_OFFSET REST_SKIP
16275- movq_cfi rbx, 5*8
16276- movq_cfi rbp, 4*8
16277- movq_cfi r12, 3*8
16278- movq_cfi r13, 2*8
16279- movq_cfi r14, 1*8
16280- movq_cfi r15, 0*8
16281+ movq_cfi rbx, RBX
16282+ movq_cfi rbp, RBP
16283+
16284+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16285+ movq_cfi r12, R12
16286+#endif
16287+
16288+ movq_cfi r13, R13
16289+ movq_cfi r14, R14
16290+ movq_cfi r15, R15
16291 .endm
16292
16293 .macro RESTORE_REST
16294- movq_cfi_restore 0*8, r15
16295- movq_cfi_restore 1*8, r14
16296- movq_cfi_restore 2*8, r13
16297- movq_cfi_restore 3*8, r12
16298- movq_cfi_restore 4*8, rbp
16299- movq_cfi_restore 5*8, rbx
16300- addq $REST_SKIP, %rsp
16301- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16302+ movq_cfi_restore R15, r15
16303+ movq_cfi_restore R14, r14
16304+ movq_cfi_restore R13, r13
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi_restore R12, r12
16308+#endif
16309+
16310+ movq_cfi_restore RBP, rbp
16311+ movq_cfi_restore RBX, rbx
16312 .endm
16313
16314 .macro SAVE_ALL
16315diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16316index f50de69..2b0a458 100644
16317--- a/arch/x86/include/asm/checksum_32.h
16318+++ b/arch/x86/include/asm/checksum_32.h
16319@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16320 int len, __wsum sum,
16321 int *src_err_ptr, int *dst_err_ptr);
16322
16323+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16324+ int len, __wsum sum,
16325+ int *src_err_ptr, int *dst_err_ptr);
16326+
16327+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16328+ int len, __wsum sum,
16329+ int *src_err_ptr, int *dst_err_ptr);
16330+
16331 /*
16332 * Note: when you get a NULL pointer exception here this means someone
16333 * passed in an incorrect kernel address to one of these functions.
16334@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16335
16336 might_sleep();
16337 stac();
16338- ret = csum_partial_copy_generic((__force void *)src, dst,
16339+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16340 len, sum, err_ptr, NULL);
16341 clac();
16342
16343@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16344 might_sleep();
16345 if (access_ok(VERIFY_WRITE, dst, len)) {
16346 stac();
16347- ret = csum_partial_copy_generic(src, (__force void *)dst,
16348+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16349 len, sum, NULL, err_ptr);
16350 clac();
16351 return ret;
16352diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16353index 99c105d7..2f667ac 100644
16354--- a/arch/x86/include/asm/cmpxchg.h
16355+++ b/arch/x86/include/asm/cmpxchg.h
16356@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16357 __compiletime_error("Bad argument size for cmpxchg");
16358 extern void __xadd_wrong_size(void)
16359 __compiletime_error("Bad argument size for xadd");
16360+extern void __xadd_check_overflow_wrong_size(void)
16361+ __compiletime_error("Bad argument size for xadd_check_overflow");
16362 extern void __add_wrong_size(void)
16363 __compiletime_error("Bad argument size for add");
16364+extern void __add_check_overflow_wrong_size(void)
16365+ __compiletime_error("Bad argument size for add_check_overflow");
16366
16367 /*
16368 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16369@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16370 __ret; \
16371 })
16372
16373+#ifdef CONFIG_PAX_REFCOUNT
16374+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16375+ ({ \
16376+ __typeof__ (*(ptr)) __ret = (arg); \
16377+ switch (sizeof(*(ptr))) { \
16378+ case __X86_CASE_L: \
16379+ asm volatile (lock #op "l %0, %1\n" \
16380+ "jno 0f\n" \
16381+ "mov %0,%1\n" \
16382+ "int $4\n0:\n" \
16383+ _ASM_EXTABLE(0b, 0b) \
16384+ : "+r" (__ret), "+m" (*(ptr)) \
16385+ : : "memory", "cc"); \
16386+ break; \
16387+ case __X86_CASE_Q: \
16388+ asm volatile (lock #op "q %q0, %1\n" \
16389+ "jno 0f\n" \
16390+ "mov %0,%1\n" \
16391+ "int $4\n0:\n" \
16392+ _ASM_EXTABLE(0b, 0b) \
16393+ : "+r" (__ret), "+m" (*(ptr)) \
16394+ : : "memory", "cc"); \
16395+ break; \
16396+ default: \
16397+ __ ## op ## _check_overflow_wrong_size(); \
16398+ } \
16399+ __ret; \
16400+ })
16401+#else
16402+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16403+#endif
16404+
16405 /*
16406 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16407 * Since this is generally used to protect other memory information, we
16408@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16409 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16410 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16411
16412+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16413+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16414+
16415 #define __add(ptr, inc, lock) \
16416 ({ \
16417 __typeof__ (*(ptr)) __ret = (inc); \
16418diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16419index 59c6c40..5e0b22c 100644
16420--- a/arch/x86/include/asm/compat.h
16421+++ b/arch/x86/include/asm/compat.h
16422@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16423 typedef u32 compat_uint_t;
16424 typedef u32 compat_ulong_t;
16425 typedef u64 __attribute__((aligned(4))) compat_u64;
16426-typedef u32 compat_uptr_t;
16427+typedef u32 __user compat_uptr_t;
16428
16429 struct compat_timespec {
16430 compat_time_t tv_sec;
16431diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16432index 90a5485..43b6211 100644
16433--- a/arch/x86/include/asm/cpufeature.h
16434+++ b/arch/x86/include/asm/cpufeature.h
16435@@ -213,7 +213,7 @@
16436 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16437 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16438 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16439-
16440+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16441
16442 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16443 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16444@@ -221,7 +221,7 @@
16445 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16446 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16447 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16448-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16449+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16450 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16451 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16452 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16453@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16454 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16455 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16456 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16457+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16458
16459 #if __GNUC__ >= 4
16460 extern void warn_pre_alternatives(void);
16461@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16462
16463 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16464 t_warn:
16465- warn_pre_alternatives();
16466+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16467+ warn_pre_alternatives();
16468 return false;
16469 #endif
16470
16471@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16472 ".section .discard,\"aw\",@progbits\n"
16473 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16474 ".previous\n"
16475- ".section .altinstr_replacement,\"ax\"\n"
16476+ ".section .altinstr_replacement,\"a\"\n"
16477 "3: movb $1,%0\n"
16478 "4:\n"
16479 ".previous\n"
16480@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16481 " .byte 2b - 1b\n" /* src len */
16482 " .byte 4f - 3f\n" /* repl len */
16483 ".previous\n"
16484- ".section .altinstr_replacement,\"ax\"\n"
16485+ ".section .altinstr_replacement,\"a\"\n"
16486 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16487 "4:\n"
16488 ".previous\n"
16489@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $0,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 ".section .discard,\"aw\",@progbits\n"
16500 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "5: movb $1,%0\n"
16505 "6:\n"
16506 ".previous\n"
16507diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16508index a94b82e..59ecefa 100644
16509--- a/arch/x86/include/asm/desc.h
16510+++ b/arch/x86/include/asm/desc.h
16511@@ -4,6 +4,7 @@
16512 #include <asm/desc_defs.h>
16513 #include <asm/ldt.h>
16514 #include <asm/mmu.h>
16515+#include <asm/pgtable.h>
16516
16517 #include <linux/smp.h>
16518 #include <linux/percpu.h>
16519@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16520
16521 desc->type = (info->read_exec_only ^ 1) << 1;
16522 desc->type |= info->contents << 2;
16523+ desc->type |= info->seg_not_present ^ 1;
16524
16525 desc->s = 1;
16526 desc->dpl = 0x3;
16527@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16528 }
16529
16530 extern struct desc_ptr idt_descr;
16531-extern gate_desc idt_table[];
16532-extern struct desc_ptr debug_idt_descr;
16533-extern gate_desc debug_idt_table[];
16534-
16535-struct gdt_page {
16536- struct desc_struct gdt[GDT_ENTRIES];
16537-} __attribute__((aligned(PAGE_SIZE)));
16538-
16539-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16540+extern gate_desc idt_table[IDT_ENTRIES];
16541+extern const struct desc_ptr debug_idt_descr;
16542+extern gate_desc debug_idt_table[IDT_ENTRIES];
16543
16544+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16545 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16546 {
16547- return per_cpu(gdt_page, cpu).gdt;
16548+ return cpu_gdt_table[cpu];
16549 }
16550
16551 #ifdef CONFIG_X86_64
16552@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16553 unsigned long base, unsigned dpl, unsigned flags,
16554 unsigned short seg)
16555 {
16556- gate->a = (seg << 16) | (base & 0xffff);
16557- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16558+ gate->gate.offset_low = base;
16559+ gate->gate.seg = seg;
16560+ gate->gate.reserved = 0;
16561+ gate->gate.type = type;
16562+ gate->gate.s = 0;
16563+ gate->gate.dpl = dpl;
16564+ gate->gate.p = 1;
16565+ gate->gate.offset_high = base >> 16;
16566 }
16567
16568 #endif
16569@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16570
16571 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16572 {
16573+ pax_open_kernel();
16574 memcpy(&idt[entry], gate, sizeof(*gate));
16575+ pax_close_kernel();
16576 }
16577
16578 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16579 {
16580+ pax_open_kernel();
16581 memcpy(&ldt[entry], desc, 8);
16582+ pax_close_kernel();
16583 }
16584
16585 static inline void
16586@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16587 default: size = sizeof(*gdt); break;
16588 }
16589
16590+ pax_open_kernel();
16591 memcpy(&gdt[entry], desc, size);
16592+ pax_close_kernel();
16593 }
16594
16595 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16596@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16597
16598 static inline void native_load_tr_desc(void)
16599 {
16600+ pax_open_kernel();
16601 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16602+ pax_close_kernel();
16603 }
16604
16605 static inline void native_load_gdt(const struct desc_ptr *dtr)
16606@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16607 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16608 unsigned int i;
16609
16610+ pax_open_kernel();
16611 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16612 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16613+ pax_close_kernel();
16614 }
16615
16616 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16617@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16618 preempt_enable();
16619 }
16620
16621-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16622+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16623 {
16624 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16625 }
16626@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16627 }
16628
16629 #ifdef CONFIG_X86_64
16630-static inline void set_nmi_gate(int gate, void *addr)
16631+static inline void set_nmi_gate(int gate, const void *addr)
16632 {
16633 gate_desc s;
16634
16635@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16636 #endif
16637
16638 #ifdef CONFIG_TRACING
16639-extern struct desc_ptr trace_idt_descr;
16640-extern gate_desc trace_idt_table[];
16641+extern const struct desc_ptr trace_idt_descr;
16642+extern gate_desc trace_idt_table[IDT_ENTRIES];
16643 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16644 {
16645 write_idt_entry(trace_idt_table, entry, gate);
16646 }
16647
16648-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16649+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16650 unsigned dpl, unsigned ist, unsigned seg)
16651 {
16652 gate_desc s;
16653@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16654 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16655 #endif
16656
16657-static inline void _set_gate(int gate, unsigned type, void *addr,
16658+static inline void _set_gate(int gate, unsigned type, const void *addr,
16659 unsigned dpl, unsigned ist, unsigned seg)
16660 {
16661 gate_desc s;
16662@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16663 #define set_intr_gate(n, addr) \
16664 do { \
16665 BUG_ON((unsigned)n > 0xFF); \
16666- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16667+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16668 __KERNEL_CS); \
16669- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16670+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16671 0, 0, __KERNEL_CS); \
16672 } while (0)
16673
16674@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16675 /*
16676 * This routine sets up an interrupt gate at directory privilege level 3.
16677 */
16678-static inline void set_system_intr_gate(unsigned int n, void *addr)
16679+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16680 {
16681 BUG_ON((unsigned)n > 0xFF);
16682 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16683 }
16684
16685-static inline void set_system_trap_gate(unsigned int n, void *addr)
16686+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16687 {
16688 BUG_ON((unsigned)n > 0xFF);
16689 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16690 }
16691
16692-static inline void set_trap_gate(unsigned int n, void *addr)
16693+static inline void set_trap_gate(unsigned int n, const void *addr)
16694 {
16695 BUG_ON((unsigned)n > 0xFF);
16696 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16697@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16698 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16699 {
16700 BUG_ON((unsigned)n > 0xFF);
16701- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16702+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16703 }
16704
16705-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16706+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16707 {
16708 BUG_ON((unsigned)n > 0xFF);
16709 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16710 }
16711
16712-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16713+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16717@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16718 else
16719 load_idt((const struct desc_ptr *)&idt_descr);
16720 }
16721+
16722+#ifdef CONFIG_X86_32
16723+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16724+{
16725+ struct desc_struct d;
16726+
16727+ if (likely(limit))
16728+ limit = (limit - 1UL) >> PAGE_SHIFT;
16729+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16730+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16731+}
16732+#endif
16733+
16734 #endif /* _ASM_X86_DESC_H */
16735diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16736index 278441f..b95a174 100644
16737--- a/arch/x86/include/asm/desc_defs.h
16738+++ b/arch/x86/include/asm/desc_defs.h
16739@@ -31,6 +31,12 @@ struct desc_struct {
16740 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16741 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16742 };
16743+ struct {
16744+ u16 offset_low;
16745+ u16 seg;
16746+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16747+ unsigned offset_high: 16;
16748+ } gate;
16749 };
16750 } __attribute__((packed));
16751
16752diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16753index ced283a..ffe04cc 100644
16754--- a/arch/x86/include/asm/div64.h
16755+++ b/arch/x86/include/asm/div64.h
16756@@ -39,7 +39,7 @@
16757 __mod; \
16758 })
16759
16760-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16761+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16762 {
16763 union {
16764 u64 v64;
16765diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16766index ca3347a..1a5082a 100644
16767--- a/arch/x86/include/asm/elf.h
16768+++ b/arch/x86/include/asm/elf.h
16769@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16770
16771 #include <asm/vdso.h>
16772
16773-#ifdef CONFIG_X86_64
16774-extern unsigned int vdso64_enabled;
16775-#endif
16776 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16777 extern unsigned int vdso32_enabled;
16778 #endif
16779@@ -249,7 +246,25 @@ extern int force_personality32;
16780 the loader. We need to make sure that it is out of the way of the program
16781 that it will "exec", and that there is sufficient room for the brk. */
16782
16783+#ifdef CONFIG_PAX_SEGMEXEC
16784+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16785+#else
16786 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_ASLR
16790+#ifdef CONFIG_X86_32
16791+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16792+
16793+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16794+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16795+#else
16796+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16797+
16798+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16799+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16800+#endif
16801+#endif
16802
16803 /* This yields a mask that user programs can use to figure out what
16804 instruction set this CPU supports. This could be done in user space,
16805@@ -298,17 +313,13 @@ do { \
16806
16807 #define ARCH_DLINFO \
16808 do { \
16809- if (vdso64_enabled) \
16810- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16811- (unsigned long __force)current->mm->context.vdso); \
16812+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16813 } while (0)
16814
16815 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16816 #define ARCH_DLINFO_X32 \
16817 do { \
16818- if (vdso64_enabled) \
16819- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16820- (unsigned long __force)current->mm->context.vdso); \
16821+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16822 } while (0)
16823
16824 #define AT_SYSINFO 32
16825@@ -323,10 +334,10 @@ else \
16826
16827 #endif /* !CONFIG_X86_32 */
16828
16829-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16830+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16831
16832 #define VDSO_ENTRY \
16833- ((unsigned long)current->mm->context.vdso + \
16834+ (current->mm->context.vdso + \
16835 selected_vdso32->sym___kernel_vsyscall)
16836
16837 struct linux_binprm;
16838@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16839 int uses_interp);
16840 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16841
16842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16843-#define arch_randomize_brk arch_randomize_brk
16844-
16845 /*
16846 * True on X86_32 or when emulating IA32 on X86_64
16847 */
16848diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16849index 77a99ac..39ff7f5 100644
16850--- a/arch/x86/include/asm/emergency-restart.h
16851+++ b/arch/x86/include/asm/emergency-restart.h
16852@@ -1,6 +1,6 @@
16853 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16854 #define _ASM_X86_EMERGENCY_RESTART_H
16855
16856-extern void machine_emergency_restart(void);
16857+extern void machine_emergency_restart(void) __noreturn;
16858
16859 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16860diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16861index 1c7eefe..d0e4702 100644
16862--- a/arch/x86/include/asm/floppy.h
16863+++ b/arch/x86/include/asm/floppy.h
16864@@ -229,18 +229,18 @@ static struct fd_routine_l {
16865 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16866 } fd_routine[] = {
16867 {
16868- request_dma,
16869- free_dma,
16870- get_dma_residue,
16871- dma_mem_alloc,
16872- hard_dma_setup
16873+ ._request_dma = request_dma,
16874+ ._free_dma = free_dma,
16875+ ._get_dma_residue = get_dma_residue,
16876+ ._dma_mem_alloc = dma_mem_alloc,
16877+ ._dma_setup = hard_dma_setup
16878 },
16879 {
16880- vdma_request_dma,
16881- vdma_nop,
16882- vdma_get_dma_residue,
16883- vdma_mem_alloc,
16884- vdma_dma_setup
16885+ ._request_dma = vdma_request_dma,
16886+ ._free_dma = vdma_nop,
16887+ ._get_dma_residue = vdma_get_dma_residue,
16888+ ._dma_mem_alloc = vdma_mem_alloc,
16889+ ._dma_setup = vdma_dma_setup
16890 }
16891 };
16892
16893diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16894index 72ba21a..79f3f66 100644
16895--- a/arch/x86/include/asm/fpu-internal.h
16896+++ b/arch/x86/include/asm/fpu-internal.h
16897@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16898 #define user_insn(insn, output, input...) \
16899 ({ \
16900 int err; \
16901+ pax_open_userland(); \
16902 asm volatile(ASM_STAC "\n" \
16903- "1:" #insn "\n\t" \
16904+ "1:" \
16905+ __copyuser_seg \
16906+ #insn "\n\t" \
16907 "2: " ASM_CLAC "\n" \
16908 ".section .fixup,\"ax\"\n" \
16909 "3: movl $-1,%[err]\n" \
16910@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16911 _ASM_EXTABLE(1b, 3b) \
16912 : [err] "=r" (err), output \
16913 : "0"(0), input); \
16914+ pax_close_userland(); \
16915 err; \
16916 })
16917
16918@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16919 "fnclex\n\t"
16920 "emms\n\t"
16921 "fildl %P[addr]" /* set F?P to defined value */
16922- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16923+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16924 }
16925
16926 return fpu_restore_checking(&tsk->thread.fpu);
16927diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16928index b4c1f54..e290c08 100644
16929--- a/arch/x86/include/asm/futex.h
16930+++ b/arch/x86/include/asm/futex.h
16931@@ -12,6 +12,7 @@
16932 #include <asm/smap.h>
16933
16934 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16935+ typecheck(u32 __user *, uaddr); \
16936 asm volatile("\t" ASM_STAC "\n" \
16937 "1:\t" insn "\n" \
16938 "2:\t" ASM_CLAC "\n" \
16939@@ -20,15 +21,16 @@
16940 "\tjmp\t2b\n" \
16941 "\t.previous\n" \
16942 _ASM_EXTABLE(1b, 3b) \
16943- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16944+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16945 : "i" (-EFAULT), "0" (oparg), "1" (0))
16946
16947 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16948+ typecheck(u32 __user *, uaddr); \
16949 asm volatile("\t" ASM_STAC "\n" \
16950 "1:\tmovl %2, %0\n" \
16951 "\tmovl\t%0, %3\n" \
16952 "\t" insn "\n" \
16953- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16954+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16955 "\tjnz\t1b\n" \
16956 "3:\t" ASM_CLAC "\n" \
16957 "\t.section .fixup,\"ax\"\n" \
16958@@ -38,7 +40,7 @@
16959 _ASM_EXTABLE(1b, 4b) \
16960 _ASM_EXTABLE(2b, 4b) \
16961 : "=&a" (oldval), "=&r" (ret), \
16962- "+m" (*uaddr), "=&r" (tem) \
16963+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16964 : "r" (oparg), "i" (-EFAULT), "1" (0))
16965
16966 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16967@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16968
16969 pagefault_disable();
16970
16971+ pax_open_userland();
16972 switch (op) {
16973 case FUTEX_OP_SET:
16974- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16975+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16976 break;
16977 case FUTEX_OP_ADD:
16978- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16979+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16980 uaddr, oparg);
16981 break;
16982 case FUTEX_OP_OR:
16983@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16984 default:
16985 ret = -ENOSYS;
16986 }
16987+ pax_close_userland();
16988
16989 pagefault_enable();
16990
16991diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16992index 9662290..49ca5e5 100644
16993--- a/arch/x86/include/asm/hw_irq.h
16994+++ b/arch/x86/include/asm/hw_irq.h
16995@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16996 #endif /* CONFIG_X86_LOCAL_APIC */
16997
16998 /* Statistics */
16999-extern atomic_t irq_err_count;
17000-extern atomic_t irq_mis_count;
17001+extern atomic_unchecked_t irq_err_count;
17002+extern atomic_unchecked_t irq_mis_count;
17003
17004 /* EISA */
17005 extern void eisa_set_level_irq(unsigned int irq);
17006diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17007index ccffa53..3c90c87 100644
17008--- a/arch/x86/include/asm/i8259.h
17009+++ b/arch/x86/include/asm/i8259.h
17010@@ -62,7 +62,7 @@ struct legacy_pic {
17011 void (*init)(int auto_eoi);
17012 int (*irq_pending)(unsigned int irq);
17013 void (*make_irq)(unsigned int irq);
17014-};
17015+} __do_const;
17016
17017 extern struct legacy_pic *legacy_pic;
17018 extern struct legacy_pic null_legacy_pic;
17019diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17020index 34a5b93..27e40a6 100644
17021--- a/arch/x86/include/asm/io.h
17022+++ b/arch/x86/include/asm/io.h
17023@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17024 "m" (*(volatile type __force *)addr) barrier); }
17025
17026 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17027-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17028-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17029+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17030+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17031
17032 build_mmio_read(__readb, "b", unsigned char, "=q", )
17033-build_mmio_read(__readw, "w", unsigned short, "=r", )
17034-build_mmio_read(__readl, "l", unsigned int, "=r", )
17035+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17036+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17037
17038 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17039 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17040@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17041 * this function
17042 */
17043
17044-static inline phys_addr_t virt_to_phys(volatile void *address)
17045+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17046 {
17047 return __pa(address);
17048 }
17049@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17050 return ioremap_nocache(offset, size);
17051 }
17052
17053-extern void iounmap(volatile void __iomem *addr);
17054+extern void iounmap(const volatile void __iomem *addr);
17055
17056 extern void set_iounmap_nonlazy(void);
17057
17058@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17059
17060 #include <linux/vmalloc.h>
17061
17062+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17063+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17064+{
17065+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17066+}
17067+
17068+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17069+{
17070+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17071+}
17072+
17073 /*
17074 * Convert a virtual cached pointer to an uncached pointer
17075 */
17076diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17077index 0a8b519..80e7d5b 100644
17078--- a/arch/x86/include/asm/irqflags.h
17079+++ b/arch/x86/include/asm/irqflags.h
17080@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17081 sti; \
17082 sysexit
17083
17084+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17085+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17086+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17087+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17088+
17089 #else
17090 #define INTERRUPT_RETURN iret
17091 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17092diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17093index 4421b5d..8543006 100644
17094--- a/arch/x86/include/asm/kprobes.h
17095+++ b/arch/x86/include/asm/kprobes.h
17096@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17097 #define RELATIVEJUMP_SIZE 5
17098 #define RELATIVECALL_OPCODE 0xe8
17099 #define RELATIVE_ADDR_SIZE 4
17100-#define MAX_STACK_SIZE 64
17101-#define MIN_STACK_SIZE(ADDR) \
17102- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17103- THREAD_SIZE - (unsigned long)(ADDR))) \
17104- ? (MAX_STACK_SIZE) \
17105- : (((unsigned long)current_thread_info()) + \
17106- THREAD_SIZE - (unsigned long)(ADDR)))
17107+#define MAX_STACK_SIZE 64UL
17108+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17109
17110 #define flush_insn_slot(p) do { } while (0)
17111
17112diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17113index 4ad6560..75c7bdd 100644
17114--- a/arch/x86/include/asm/local.h
17115+++ b/arch/x86/include/asm/local.h
17116@@ -10,33 +10,97 @@ typedef struct {
17117 atomic_long_t a;
17118 } local_t;
17119
17120+typedef struct {
17121+ atomic_long_unchecked_t a;
17122+} local_unchecked_t;
17123+
17124 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17125
17126 #define local_read(l) atomic_long_read(&(l)->a)
17127+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17128 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17129+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17130
17131 static inline void local_inc(local_t *l)
17132 {
17133- asm volatile(_ASM_INC "%0"
17134+ asm volatile(_ASM_INC "%0\n"
17135+
17136+#ifdef CONFIG_PAX_REFCOUNT
17137+ "jno 0f\n"
17138+ _ASM_DEC "%0\n"
17139+ "int $4\n0:\n"
17140+ _ASM_EXTABLE(0b, 0b)
17141+#endif
17142+
17143+ : "+m" (l->a.counter));
17144+}
17145+
17146+static inline void local_inc_unchecked(local_unchecked_t *l)
17147+{
17148+ asm volatile(_ASM_INC "%0\n"
17149 : "+m" (l->a.counter));
17150 }
17151
17152 static inline void local_dec(local_t *l)
17153 {
17154- asm volatile(_ASM_DEC "%0"
17155+ asm volatile(_ASM_DEC "%0\n"
17156+
17157+#ifdef CONFIG_PAX_REFCOUNT
17158+ "jno 0f\n"
17159+ _ASM_INC "%0\n"
17160+ "int $4\n0:\n"
17161+ _ASM_EXTABLE(0b, 0b)
17162+#endif
17163+
17164+ : "+m" (l->a.counter));
17165+}
17166+
17167+static inline void local_dec_unchecked(local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_DEC "%0\n"
17170 : "+m" (l->a.counter));
17171 }
17172
17173 static inline void local_add(long i, local_t *l)
17174 {
17175- asm volatile(_ASM_ADD "%1,%0"
17176+ asm volatile(_ASM_ADD "%1,%0\n"
17177+
17178+#ifdef CONFIG_PAX_REFCOUNT
17179+ "jno 0f\n"
17180+ _ASM_SUB "%1,%0\n"
17181+ "int $4\n0:\n"
17182+ _ASM_EXTABLE(0b, 0b)
17183+#endif
17184+
17185+ : "+m" (l->a.counter)
17186+ : "ir" (i));
17187+}
17188+
17189+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_ADD "%1,%0\n"
17192 : "+m" (l->a.counter)
17193 : "ir" (i));
17194 }
17195
17196 static inline void local_sub(long i, local_t *l)
17197 {
17198- asm volatile(_ASM_SUB "%1,%0"
17199+ asm volatile(_ASM_SUB "%1,%0\n"
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ "jno 0f\n"
17203+ _ASM_ADD "%1,%0\n"
17204+ "int $4\n0:\n"
17205+ _ASM_EXTABLE(0b, 0b)
17206+#endif
17207+
17208+ : "+m" (l->a.counter)
17209+ : "ir" (i));
17210+}
17211+
17212+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17213+{
17214+ asm volatile(_ASM_SUB "%1,%0\n"
17215 : "+m" (l->a.counter)
17216 : "ir" (i));
17217 }
17218@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17219 */
17220 static inline int local_sub_and_test(long i, local_t *l)
17221 {
17222- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17223+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17224 }
17225
17226 /**
17227@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17228 */
17229 static inline int local_dec_and_test(local_t *l)
17230 {
17231- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17232+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17233 }
17234
17235 /**
17236@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17237 */
17238 static inline int local_inc_and_test(local_t *l)
17239 {
17240- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17241+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17242 }
17243
17244 /**
17245@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17246 */
17247 static inline int local_add_negative(long i, local_t *l)
17248 {
17249- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17250+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17251 }
17252
17253 /**
17254@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17255 static inline long local_add_return(long i, local_t *l)
17256 {
17257 long __i = i;
17258+ asm volatile(_ASM_XADD "%0, %1\n"
17259+
17260+#ifdef CONFIG_PAX_REFCOUNT
17261+ "jno 0f\n"
17262+ _ASM_MOV "%0,%1\n"
17263+ "int $4\n0:\n"
17264+ _ASM_EXTABLE(0b, 0b)
17265+#endif
17266+
17267+ : "+r" (i), "+m" (l->a.counter)
17268+ : : "memory");
17269+ return i + __i;
17270+}
17271+
17272+/**
17273+ * local_add_return_unchecked - add and return
17274+ * @i: integer value to add
17275+ * @l: pointer to type local_unchecked_t
17276+ *
17277+ * Atomically adds @i to @l and returns @i + @l
17278+ */
17279+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17280+{
17281+ long __i = i;
17282 asm volatile(_ASM_XADD "%0, %1;"
17283 : "+r" (i), "+m" (l->a.counter)
17284 : : "memory");
17285@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17286
17287 #define local_cmpxchg(l, o, n) \
17288 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17289+#define local_cmpxchg_unchecked(l, o, n) \
17290+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17291 /* Always has a lock prefix */
17292 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17293
17294diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17295new file mode 100644
17296index 0000000..2bfd3ba
17297--- /dev/null
17298+++ b/arch/x86/include/asm/mman.h
17299@@ -0,0 +1,15 @@
17300+#ifndef _X86_MMAN_H
17301+#define _X86_MMAN_H
17302+
17303+#include <uapi/asm/mman.h>
17304+
17305+#ifdef __KERNEL__
17306+#ifndef __ASSEMBLY__
17307+#ifdef CONFIG_X86_32
17308+#define arch_mmap_check i386_mmap_check
17309+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17310+#endif
17311+#endif
17312+#endif
17313+
17314+#endif /* X86_MMAN_H */
17315diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17316index 09b9620..923aecd 100644
17317--- a/arch/x86/include/asm/mmu.h
17318+++ b/arch/x86/include/asm/mmu.h
17319@@ -9,7 +9,7 @@
17320 * we put the segment information here.
17321 */
17322 typedef struct {
17323- void *ldt;
17324+ struct desc_struct *ldt;
17325 int size;
17326
17327 #ifdef CONFIG_X86_64
17328@@ -18,7 +18,19 @@ typedef struct {
17329 #endif
17330
17331 struct mutex lock;
17332- void __user *vdso;
17333+ unsigned long vdso;
17334+
17335+#ifdef CONFIG_X86_32
17336+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17337+ unsigned long user_cs_base;
17338+ unsigned long user_cs_limit;
17339+
17340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17341+ cpumask_t cpu_user_cs_mask;
17342+#endif
17343+
17344+#endif
17345+#endif
17346
17347 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17348 } mm_context_t;
17349diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17350index 883f6b93..6869d96 100644
17351--- a/arch/x86/include/asm/mmu_context.h
17352+++ b/arch/x86/include/asm/mmu_context.h
17353@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17354
17355 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17356 {
17357+
17358+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17359+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17360+ unsigned int i;
17361+ pgd_t *pgd;
17362+
17363+ pax_open_kernel();
17364+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17365+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17366+ set_pgd_batched(pgd+i, native_make_pgd(0));
17367+ pax_close_kernel();
17368+ }
17369+#endif
17370+
17371 #ifdef CONFIG_SMP
17372 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17373 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17374@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17375 struct task_struct *tsk)
17376 {
17377 unsigned cpu = smp_processor_id();
17378+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17379+ int tlbstate = TLBSTATE_OK;
17380+#endif
17381
17382 if (likely(prev != next)) {
17383 #ifdef CONFIG_SMP
17384+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17385+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17386+#endif
17387 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17388 this_cpu_write(cpu_tlbstate.active_mm, next);
17389 #endif
17390 cpumask_set_cpu(cpu, mm_cpumask(next));
17391
17392 /* Re-load page tables */
17393+#ifdef CONFIG_PAX_PER_CPU_PGD
17394+ pax_open_kernel();
17395+
17396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17397+ if (static_cpu_has(X86_FEATURE_PCID))
17398+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17399+ else
17400+#endif
17401+
17402+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17403+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17404+ pax_close_kernel();
17405+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID)) {
17409+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17410+ u64 descriptor[2];
17411+ descriptor[0] = PCID_USER;
17412+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17413+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17414+ descriptor[0] = PCID_KERNEL;
17415+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17416+ }
17417+ } else {
17418+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17419+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17420+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17421+ else
17422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17423+ }
17424+ } else
17425+#endif
17426+
17427+ load_cr3(get_cpu_pgd(cpu, kernel));
17428+#else
17429 load_cr3(next->pgd);
17430+#endif
17431 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17432
17433 /* Stop flush ipis for the previous mm */
17434@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17435 */
17436 if (unlikely(prev->context.ldt != next->context.ldt))
17437 load_LDT_nolock(&next->context);
17438+
17439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17440+ if (!(__supported_pte_mask & _PAGE_NX)) {
17441+ smp_mb__before_atomic();
17442+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17443+ smp_mb__after_atomic();
17444+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17445+ }
17446+#endif
17447+
17448+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17449+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17450+ prev->context.user_cs_limit != next->context.user_cs_limit))
17451+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17452+#ifdef CONFIG_SMP
17453+ else if (unlikely(tlbstate != TLBSTATE_OK))
17454+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17455+#endif
17456+#endif
17457+
17458 }
17459+ else {
17460+
17461+#ifdef CONFIG_PAX_PER_CPU_PGD
17462+ pax_open_kernel();
17463+
17464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17465+ if (static_cpu_has(X86_FEATURE_PCID))
17466+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467+ else
17468+#endif
17469+
17470+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17471+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17472+ pax_close_kernel();
17473+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID)) {
17477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17478+ u64 descriptor[2];
17479+ descriptor[0] = PCID_USER;
17480+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17482+ descriptor[0] = PCID_KERNEL;
17483+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17484+ }
17485+ } else {
17486+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17487+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17488+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17489+ else
17490+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17491+ }
17492+ } else
17493+#endif
17494+
17495+ load_cr3(get_cpu_pgd(cpu, kernel));
17496+#endif
17497+
17498 #ifdef CONFIG_SMP
17499- else {
17500 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17501 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17502
17503@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17504 * tlb flush IPI delivery. We must reload CR3
17505 * to make sure to use no freed page tables.
17506 */
17507+
17508+#ifndef CONFIG_PAX_PER_CPU_PGD
17509 load_cr3(next->pgd);
17510 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17511+#endif
17512+
17513 load_mm_cr4(next);
17514 load_LDT_nolock(&next->context);
17515+
17516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17517+ if (!(__supported_pte_mask & _PAGE_NX))
17518+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17519+#endif
17520+
17521+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17522+#ifdef CONFIG_PAX_PAGEEXEC
17523+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17524+#endif
17525+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526+#endif
17527+
17528 }
17529+#endif
17530 }
17531-#endif
17532 }
17533
17534 #define activate_mm(prev, next) \
17535diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17536index e3b7819..b257c64 100644
17537--- a/arch/x86/include/asm/module.h
17538+++ b/arch/x86/include/asm/module.h
17539@@ -5,6 +5,7 @@
17540
17541 #ifdef CONFIG_X86_64
17542 /* X86_64 does not define MODULE_PROC_FAMILY */
17543+#define MODULE_PROC_FAMILY ""
17544 #elif defined CONFIG_M486
17545 #define MODULE_PROC_FAMILY "486 "
17546 #elif defined CONFIG_M586
17547@@ -57,8 +58,20 @@
17548 #error unknown processor family
17549 #endif
17550
17551-#ifdef CONFIG_X86_32
17552-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17553+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17554+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17555+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17556+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17557+#else
17558+#define MODULE_PAX_KERNEXEC ""
17559 #endif
17560
17561+#ifdef CONFIG_PAX_MEMORY_UDEREF
17562+#define MODULE_PAX_UDEREF "UDEREF "
17563+#else
17564+#define MODULE_PAX_UDEREF ""
17565+#endif
17566+
17567+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17568+
17569 #endif /* _ASM_X86_MODULE_H */
17570diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17571index 5f2fc44..106caa6 100644
17572--- a/arch/x86/include/asm/nmi.h
17573+++ b/arch/x86/include/asm/nmi.h
17574@@ -36,26 +36,35 @@ enum {
17575
17576 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17577
17578+struct nmiaction;
17579+
17580+struct nmiwork {
17581+ const struct nmiaction *action;
17582+ u64 max_duration;
17583+ struct irq_work irq_work;
17584+};
17585+
17586 struct nmiaction {
17587 struct list_head list;
17588 nmi_handler_t handler;
17589- u64 max_duration;
17590- struct irq_work irq_work;
17591 unsigned long flags;
17592 const char *name;
17593-};
17594+ struct nmiwork *work;
17595+} __do_const;
17596
17597 #define register_nmi_handler(t, fn, fg, n, init...) \
17598 ({ \
17599- static struct nmiaction init fn##_na = { \
17600+ static struct nmiwork fn##_nw; \
17601+ static const struct nmiaction init fn##_na = { \
17602 .handler = (fn), \
17603 .name = (n), \
17604 .flags = (fg), \
17605+ .work = &fn##_nw, \
17606 }; \
17607 __register_nmi_handler((t), &fn##_na); \
17608 })
17609
17610-int __register_nmi_handler(unsigned int, struct nmiaction *);
17611+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17612
17613 void unregister_nmi_handler(unsigned int, const char *);
17614
17615diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17616index 802dde3..9183e68 100644
17617--- a/arch/x86/include/asm/page.h
17618+++ b/arch/x86/include/asm/page.h
17619@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17620 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17621
17622 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17623+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17624
17625 #define __boot_va(x) __va(x)
17626 #define __boot_pa(x) __pa(x)
17627@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17628 * virt_to_page(kaddr) returns a valid pointer if and only if
17629 * virt_addr_valid(kaddr) returns true.
17630 */
17631-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17632 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17633 extern bool __virt_addr_valid(unsigned long kaddr);
17634 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17635
17636+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17637+#define virt_to_page(kaddr) \
17638+ ({ \
17639+ const void *__kaddr = (const void *)(kaddr); \
17640+ BUG_ON(!virt_addr_valid(__kaddr)); \
17641+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17642+ })
17643+#else
17644+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17645+#endif
17646+
17647 #endif /* __ASSEMBLY__ */
17648
17649 #include <asm-generic/memory_model.h>
17650diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17651index b3bebf9..13ac22e 100644
17652--- a/arch/x86/include/asm/page_64.h
17653+++ b/arch/x86/include/asm/page_64.h
17654@@ -7,9 +7,9 @@
17655
17656 /* duplicated to the one in bootmem.h */
17657 extern unsigned long max_pfn;
17658-extern unsigned long phys_base;
17659+extern const unsigned long phys_base;
17660
17661-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17662+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17663 {
17664 unsigned long y = x - __START_KERNEL_map;
17665
17666@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17667 }
17668
17669 #ifdef CONFIG_DEBUG_VIRTUAL
17670-extern unsigned long __phys_addr(unsigned long);
17671-extern unsigned long __phys_addr_symbol(unsigned long);
17672+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17673+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17674 #else
17675 #define __phys_addr(x) __phys_addr_nodebug(x)
17676 #define __phys_addr_symbol(x) \
17677diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17678index 965c47d..ffe0af8 100644
17679--- a/arch/x86/include/asm/paravirt.h
17680+++ b/arch/x86/include/asm/paravirt.h
17681@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17682 return (pmd_t) { ret };
17683 }
17684
17685-static inline pmdval_t pmd_val(pmd_t pmd)
17686+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17687 {
17688 pmdval_t ret;
17689
17690@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17691 val);
17692 }
17693
17694+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17695+{
17696+ pgdval_t val = native_pgd_val(pgd);
17697+
17698+ if (sizeof(pgdval_t) > sizeof(long))
17699+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17700+ val, (u64)val >> 32);
17701+ else
17702+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17703+ val);
17704+}
17705+
17706 static inline void pgd_clear(pgd_t *pgdp)
17707 {
17708 set_pgd(pgdp, __pgd(0));
17709@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17710 pv_mmu_ops.set_fixmap(idx, phys, flags);
17711 }
17712
17713+#ifdef CONFIG_PAX_KERNEXEC
17714+static inline unsigned long pax_open_kernel(void)
17715+{
17716+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17717+}
17718+
17719+static inline unsigned long pax_close_kernel(void)
17720+{
17721+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17722+}
17723+#else
17724+static inline unsigned long pax_open_kernel(void) { return 0; }
17725+static inline unsigned long pax_close_kernel(void) { return 0; }
17726+#endif
17727+
17728 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17729
17730 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17731@@ -906,7 +933,7 @@ extern void default_banner(void);
17732
17733 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17734 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17735-#define PARA_INDIRECT(addr) *%cs:addr
17736+#define PARA_INDIRECT(addr) *%ss:addr
17737 #endif
17738
17739 #define INTERRUPT_RETURN \
17740@@ -981,6 +1008,21 @@ extern void default_banner(void);
17741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17742 CLBR_NONE, \
17743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17744+
17745+#define GET_CR0_INTO_RDI \
17746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17747+ mov %rax,%rdi
17748+
17749+#define SET_RDI_INTO_CR0 \
17750+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17751+
17752+#define GET_CR3_INTO_RDI \
17753+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17754+ mov %rax,%rdi
17755+
17756+#define SET_RDI_INTO_CR3 \
17757+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17758+
17759 #endif /* CONFIG_X86_32 */
17760
17761 #endif /* __ASSEMBLY__ */
17762diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17763index 7549b8b..f0edfda 100644
17764--- a/arch/x86/include/asm/paravirt_types.h
17765+++ b/arch/x86/include/asm/paravirt_types.h
17766@@ -84,7 +84,7 @@ struct pv_init_ops {
17767 */
17768 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17769 unsigned long addr, unsigned len);
17770-};
17771+} __no_const __no_randomize_layout;
17772
17773
17774 struct pv_lazy_ops {
17775@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17776 void (*enter)(void);
17777 void (*leave)(void);
17778 void (*flush)(void);
17779-};
17780+} __no_randomize_layout;
17781
17782 struct pv_time_ops {
17783 unsigned long long (*sched_clock)(void);
17784 unsigned long long (*steal_clock)(int cpu);
17785 unsigned long (*get_tsc_khz)(void);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789 struct pv_cpu_ops {
17790 /* hooks for various privileged instructions */
17791@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17792
17793 void (*start_context_switch)(struct task_struct *prev);
17794 void (*end_context_switch)(struct task_struct *next);
17795-};
17796+} __no_const __no_randomize_layout;
17797
17798 struct pv_irq_ops {
17799 /*
17800@@ -215,7 +215,7 @@ struct pv_irq_ops {
17801 #ifdef CONFIG_X86_64
17802 void (*adjust_exception_frame)(void);
17803 #endif
17804-};
17805+} __no_randomize_layout;
17806
17807 struct pv_apic_ops {
17808 #ifdef CONFIG_X86_LOCAL_APIC
17809@@ -223,7 +223,7 @@ struct pv_apic_ops {
17810 unsigned long start_eip,
17811 unsigned long start_esp);
17812 #endif
17813-};
17814+} __no_const __no_randomize_layout;
17815
17816 struct pv_mmu_ops {
17817 unsigned long (*read_cr2)(void);
17818@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17819 struct paravirt_callee_save make_pud;
17820
17821 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17822+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17823 #endif /* PAGETABLE_LEVELS == 4 */
17824 #endif /* PAGETABLE_LEVELS >= 3 */
17825
17826@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17827 an mfn. We can tell which is which from the index. */
17828 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17829 phys_addr_t phys, pgprot_t flags);
17830-};
17831+
17832+#ifdef CONFIG_PAX_KERNEXEC
17833+ unsigned long (*pax_open_kernel)(void);
17834+ unsigned long (*pax_close_kernel)(void);
17835+#endif
17836+
17837+} __no_randomize_layout;
17838
17839 struct arch_spinlock;
17840 #ifdef CONFIG_SMP
17841@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17842 struct pv_lock_ops {
17843 struct paravirt_callee_save lock_spinning;
17844 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17845-};
17846+} __no_randomize_layout;
17847
17848 /* This contains all the paravirt structures: we get a convenient
17849 * number for each function using the offset which we use to indicate
17850- * what to patch. */
17851+ * what to patch.
17852+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17853+ */
17854+
17855 struct paravirt_patch_template {
17856 struct pv_init_ops pv_init_ops;
17857 struct pv_time_ops pv_time_ops;
17858@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17859 struct pv_apic_ops pv_apic_ops;
17860 struct pv_mmu_ops pv_mmu_ops;
17861 struct pv_lock_ops pv_lock_ops;
17862-};
17863+} __no_randomize_layout;
17864
17865 extern struct pv_info pv_info;
17866 extern struct pv_init_ops pv_init_ops;
17867diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17868index c4412e9..90e88c5 100644
17869--- a/arch/x86/include/asm/pgalloc.h
17870+++ b/arch/x86/include/asm/pgalloc.h
17871@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17872 pmd_t *pmd, pte_t *pte)
17873 {
17874 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17875+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17876+}
17877+
17878+static inline void pmd_populate_user(struct mm_struct *mm,
17879+ pmd_t *pmd, pte_t *pte)
17880+{
17881+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17882 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17883 }
17884
17885@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17886
17887 #ifdef CONFIG_X86_PAE
17888 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17890+{
17891+ pud_populate(mm, pudp, pmd);
17892+}
17893 #else /* !CONFIG_X86_PAE */
17894 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17895 {
17896 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17897 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17898 }
17899+
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17901+{
17902+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17903+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17904+}
17905 #endif /* CONFIG_X86_PAE */
17906
17907 #if PAGETABLE_LEVELS > 3
17908@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17909 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17910 }
17911
17912+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17913+{
17914+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17915+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17916+}
17917+
17918 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17919 {
17920 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17921diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17922index fd74a11..35fd5af 100644
17923--- a/arch/x86/include/asm/pgtable-2level.h
17924+++ b/arch/x86/include/asm/pgtable-2level.h
17925@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17926
17927 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17928 {
17929+ pax_open_kernel();
17930 *pmdp = pmd;
17931+ pax_close_kernel();
17932 }
17933
17934 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17935diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17936index cdaa58c..e61122b 100644
17937--- a/arch/x86/include/asm/pgtable-3level.h
17938+++ b/arch/x86/include/asm/pgtable-3level.h
17939@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17940
17941 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17942 {
17943+ pax_open_kernel();
17944 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17945+ pax_close_kernel();
17946 }
17947
17948 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17949 {
17950+ pax_open_kernel();
17951 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17952+ pax_close_kernel();
17953 }
17954
17955 /*
17956diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17957index a0c35bf..3647d79 100644
17958--- a/arch/x86/include/asm/pgtable.h
17959+++ b/arch/x86/include/asm/pgtable.h
17960@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17961
17962 #ifndef __PAGETABLE_PUD_FOLDED
17963 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17964+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17965 #define pgd_clear(pgd) native_pgd_clear(pgd)
17966 #endif
17967
17968@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17969
17970 #define arch_end_context_switch(prev) do {} while(0)
17971
17972+#define pax_open_kernel() native_pax_open_kernel()
17973+#define pax_close_kernel() native_pax_close_kernel()
17974 #endif /* CONFIG_PARAVIRT */
17975
17976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17978+
17979+#ifdef CONFIG_PAX_KERNEXEC
17980+static inline unsigned long native_pax_open_kernel(void)
17981+{
17982+ unsigned long cr0;
17983+
17984+ preempt_disable();
17985+ barrier();
17986+ cr0 = read_cr0() ^ X86_CR0_WP;
17987+ BUG_ON(cr0 & X86_CR0_WP);
17988+ write_cr0(cr0);
17989+ barrier();
17990+ return cr0 ^ X86_CR0_WP;
17991+}
17992+
17993+static inline unsigned long native_pax_close_kernel(void)
17994+{
17995+ unsigned long cr0;
17996+
17997+ barrier();
17998+ cr0 = read_cr0() ^ X86_CR0_WP;
17999+ BUG_ON(!(cr0 & X86_CR0_WP));
18000+ write_cr0(cr0);
18001+ barrier();
18002+ preempt_enable_no_resched();
18003+ return cr0 ^ X86_CR0_WP;
18004+}
18005+#else
18006+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18007+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18008+#endif
18009+
18010 /*
18011 * The following only work if pte_present() is true.
18012 * Undefined behaviour if not..
18013 */
18014+static inline int pte_user(pte_t pte)
18015+{
18016+ return pte_val(pte) & _PAGE_USER;
18017+}
18018+
18019 static inline int pte_dirty(pte_t pte)
18020 {
18021 return pte_flags(pte) & _PAGE_DIRTY;
18022@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18023 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18024 }
18025
18026+static inline unsigned long pgd_pfn(pgd_t pgd)
18027+{
18028+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18029+}
18030+
18031 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18032
18033 static inline int pmd_large(pmd_t pte)
18034@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18035 return pte_clear_flags(pte, _PAGE_RW);
18036 }
18037
18038+static inline pte_t pte_mkread(pte_t pte)
18039+{
18040+ return __pte(pte_val(pte) | _PAGE_USER);
18041+}
18042+
18043 static inline pte_t pte_mkexec(pte_t pte)
18044 {
18045- return pte_clear_flags(pte, _PAGE_NX);
18046+#ifdef CONFIG_X86_PAE
18047+ if (__supported_pte_mask & _PAGE_NX)
18048+ return pte_clear_flags(pte, _PAGE_NX);
18049+ else
18050+#endif
18051+ return pte_set_flags(pte, _PAGE_USER);
18052+}
18053+
18054+static inline pte_t pte_exprotect(pte_t pte)
18055+{
18056+#ifdef CONFIG_X86_PAE
18057+ if (__supported_pte_mask & _PAGE_NX)
18058+ return pte_set_flags(pte, _PAGE_NX);
18059+ else
18060+#endif
18061+ return pte_clear_flags(pte, _PAGE_USER);
18062 }
18063
18064 static inline pte_t pte_mkdirty(pte_t pte)
18065@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18066 #endif
18067
18068 #ifndef __ASSEMBLY__
18069+
18070+#ifdef CONFIG_PAX_PER_CPU_PGD
18071+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18072+enum cpu_pgd_type {kernel = 0, user = 1};
18073+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18074+{
18075+ return cpu_pgd[cpu][type];
18076+}
18077+#endif
18078+
18079 #include <linux/mm_types.h>
18080 #include <linux/mmdebug.h>
18081 #include <linux/log2.h>
18082@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18083 * Currently stuck as a macro due to indirect forward reference to
18084 * linux/mmzone.h's __section_mem_map_addr() definition:
18085 */
18086-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18087+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18088
18089 /* Find an entry in the second-level page table.. */
18090 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18091@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18092 * Currently stuck as a macro due to indirect forward reference to
18093 * linux/mmzone.h's __section_mem_map_addr() definition:
18094 */
18095-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18096+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18097
18098 /* to find an entry in a page-table-directory. */
18099 static inline unsigned long pud_index(unsigned long address)
18100@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18101
18102 static inline int pgd_bad(pgd_t pgd)
18103 {
18104- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18105+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18106 }
18107
18108 static inline int pgd_none(pgd_t pgd)
18109@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18110 * pgd_offset() returns a (pgd_t *)
18111 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18112 */
18113-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18114+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18115+
18116+#ifdef CONFIG_PAX_PER_CPU_PGD
18117+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18118+#endif
18119+
18120 /*
18121 * a shortcut which implies the use of the kernel's pgd, instead
18122 * of a process's
18123@@ -660,6 +742,23 @@ static inline int pgd_none(pgd_t pgd)
18124 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18125 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18126
18127+#ifdef CONFIG_X86_32
18128+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18129+#else
18130+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18131+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18132+
18133+#ifdef CONFIG_PAX_MEMORY_UDEREF
18134+#ifdef __ASSEMBLY__
18135+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18136+#else
18137+extern unsigned long pax_user_shadow_base;
18138+extern pgdval_t clone_pgd_mask;
18139+#endif
18140+#endif
18141+
18142+#endif
18143+
18144 #ifndef __ASSEMBLY__
18145
18146 extern int direct_gbpages;
18147@@ -826,11 +925,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18148 * dst and src can be on the same page, but the range must not overlap,
18149 * and must not cross a page boundary.
18150 */
18151-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18152+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18153 {
18154- memcpy(dst, src, count * sizeof(pgd_t));
18155+ pax_open_kernel();
18156+ while (count--)
18157+ *dst++ = *src++;
18158+ pax_close_kernel();
18159 }
18160
18161+#ifdef CONFIG_PAX_PER_CPU_PGD
18162+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18163+#endif
18164+
18165+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18166+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18167+#else
18168+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18169+#endif
18170+
18171 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18172 static inline int page_level_shift(enum pg_level level)
18173 {
18174diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18175index b6c0b40..3535d47 100644
18176--- a/arch/x86/include/asm/pgtable_32.h
18177+++ b/arch/x86/include/asm/pgtable_32.h
18178@@ -25,9 +25,6 @@
18179 struct mm_struct;
18180 struct vm_area_struct;
18181
18182-extern pgd_t swapper_pg_dir[1024];
18183-extern pgd_t initial_page_table[1024];
18184-
18185 static inline void pgtable_cache_init(void) { }
18186 static inline void check_pgt_cache(void) { }
18187 void paging_init(void);
18188@@ -45,6 +42,12 @@ void paging_init(void);
18189 # include <asm/pgtable-2level.h>
18190 #endif
18191
18192+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18193+extern pgd_t initial_page_table[PTRS_PER_PGD];
18194+#ifdef CONFIG_X86_PAE
18195+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18196+#endif
18197+
18198 #if defined(CONFIG_HIGHPTE)
18199 #define pte_offset_map(dir, address) \
18200 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18201@@ -59,12 +62,17 @@ void paging_init(void);
18202 /* Clear a kernel PTE and flush it from the TLB */
18203 #define kpte_clear_flush(ptep, vaddr) \
18204 do { \
18205+ pax_open_kernel(); \
18206 pte_clear(&init_mm, (vaddr), (ptep)); \
18207+ pax_close_kernel(); \
18208 __flush_tlb_one((vaddr)); \
18209 } while (0)
18210
18211 #endif /* !__ASSEMBLY__ */
18212
18213+#define HAVE_ARCH_UNMAPPED_AREA
18214+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18215+
18216 /*
18217 * kern_addr_valid() is (1) for FLATMEM and (0) for
18218 * SPARSEMEM and DISCONTIGMEM
18219diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18220index 9fb2f2b..b04b4bf 100644
18221--- a/arch/x86/include/asm/pgtable_32_types.h
18222+++ b/arch/x86/include/asm/pgtable_32_types.h
18223@@ -8,7 +8,7 @@
18224 */
18225 #ifdef CONFIG_X86_PAE
18226 # include <asm/pgtable-3level_types.h>
18227-# define PMD_SIZE (1UL << PMD_SHIFT)
18228+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18229 # define PMD_MASK (~(PMD_SIZE - 1))
18230 #else
18231 # include <asm/pgtable-2level_types.h>
18232@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18233 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18234 #endif
18235
18236+#ifdef CONFIG_PAX_KERNEXEC
18237+#ifndef __ASSEMBLY__
18238+extern unsigned char MODULES_EXEC_VADDR[];
18239+extern unsigned char MODULES_EXEC_END[];
18240+#endif
18241+#include <asm/boot.h>
18242+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18243+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18244+#else
18245+#define ktla_ktva(addr) (addr)
18246+#define ktva_ktla(addr) (addr)
18247+#endif
18248+
18249 #define MODULES_VADDR VMALLOC_START
18250 #define MODULES_END VMALLOC_END
18251 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18252diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18253index 2ee7811..db41d8c 100644
18254--- a/arch/x86/include/asm/pgtable_64.h
18255+++ b/arch/x86/include/asm/pgtable_64.h
18256@@ -16,11 +16,16 @@
18257
18258 extern pud_t level3_kernel_pgt[512];
18259 extern pud_t level3_ident_pgt[512];
18260+extern pud_t level3_vmalloc_start_pgt[512];
18261+extern pud_t level3_vmalloc_end_pgt[512];
18262+extern pud_t level3_vmemmap_pgt[512];
18263+extern pud_t level2_vmemmap_pgt[512];
18264 extern pmd_t level2_kernel_pgt[512];
18265 extern pmd_t level2_fixmap_pgt[512];
18266-extern pmd_t level2_ident_pgt[512];
18267+extern pmd_t level2_ident_pgt[512*2];
18268 extern pte_t level1_fixmap_pgt[512];
18269-extern pgd_t init_level4_pgt[];
18270+extern pte_t level1_vsyscall_pgt[512];
18271+extern pgd_t init_level4_pgt[512];
18272
18273 #define swapper_pg_dir init_level4_pgt
18274
18275@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18276
18277 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18278 {
18279+ pax_open_kernel();
18280 *pmdp = pmd;
18281+ pax_close_kernel();
18282 }
18283
18284 static inline void native_pmd_clear(pmd_t *pmd)
18285@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18286
18287 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18288 {
18289+ pax_open_kernel();
18290 *pudp = pud;
18291+ pax_close_kernel();
18292 }
18293
18294 static inline void native_pud_clear(pud_t *pud)
18295@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18296
18297 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18298 {
18299+ pax_open_kernel();
18300+ *pgdp = pgd;
18301+ pax_close_kernel();
18302+}
18303+
18304+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18305+{
18306 *pgdp = pgd;
18307 }
18308
18309diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18310index 602b602..acb53ed 100644
18311--- a/arch/x86/include/asm/pgtable_64_types.h
18312+++ b/arch/x86/include/asm/pgtable_64_types.h
18313@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18314 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18315 #define MODULES_END _AC(0xffffffffff000000, UL)
18316 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18317+#define MODULES_EXEC_VADDR MODULES_VADDR
18318+#define MODULES_EXEC_END MODULES_END
18319 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18320 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18321 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18322 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18323
18324+#define ktla_ktva(addr) (addr)
18325+#define ktva_ktla(addr) (addr)
18326+
18327 #define EARLY_DYNAMIC_PAGE_TABLES 64
18328
18329 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18330diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18331index 8c7c108..1c1b77f 100644
18332--- a/arch/x86/include/asm/pgtable_types.h
18333+++ b/arch/x86/include/asm/pgtable_types.h
18334@@ -85,8 +85,10 @@
18335
18336 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18337 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18338-#else
18339+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18340 #define _PAGE_NX (_AT(pteval_t, 0))
18341+#else
18342+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18343 #endif
18344
18345 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18346@@ -141,6 +143,9 @@ enum page_cache_mode {
18347 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18348 _PAGE_ACCESSED)
18349
18350+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18351+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18352+
18353 #define __PAGE_KERNEL_EXEC \
18354 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18355 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18356@@ -148,7 +153,7 @@ enum page_cache_mode {
18357 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18358 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18359 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18360-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18361+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18362 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18363 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18364 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18365@@ -194,7 +199,7 @@ enum page_cache_mode {
18366 #ifdef CONFIG_X86_64
18367 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18368 #else
18369-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18370+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18371 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18372 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18373 #endif
18374@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18375 {
18376 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18377 }
18378+#endif
18379
18380+#if PAGETABLE_LEVELS == 3
18381+#include <asm-generic/pgtable-nopud.h>
18382+#endif
18383+
18384+#if PAGETABLE_LEVELS == 2
18385+#include <asm-generic/pgtable-nopmd.h>
18386+#endif
18387+
18388+#ifndef __ASSEMBLY__
18389 #if PAGETABLE_LEVELS > 3
18390 typedef struct { pudval_t pud; } pud_t;
18391
18392@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18393 return pud.pud;
18394 }
18395 #else
18396-#include <asm-generic/pgtable-nopud.h>
18397-
18398 static inline pudval_t native_pud_val(pud_t pud)
18399 {
18400 return native_pgd_val(pud.pgd);
18401@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18402 return pmd.pmd;
18403 }
18404 #else
18405-#include <asm-generic/pgtable-nopmd.h>
18406-
18407 static inline pmdval_t native_pmd_val(pmd_t pmd)
18408 {
18409 return native_pgd_val(pmd.pud.pgd);
18410@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18411
18412 extern pteval_t __supported_pte_mask;
18413 extern void set_nx(void);
18414-extern int nx_enabled;
18415
18416 #define pgprot_writecombine pgprot_writecombine
18417 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18418diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18419index 8f327184..368fb29 100644
18420--- a/arch/x86/include/asm/preempt.h
18421+++ b/arch/x86/include/asm/preempt.h
18422@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18423 */
18424 static __always_inline bool __preempt_count_dec_and_test(void)
18425 {
18426- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18427+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18428 }
18429
18430 /*
18431diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18432index ec1c935..5cc6023 100644
18433--- a/arch/x86/include/asm/processor.h
18434+++ b/arch/x86/include/asm/processor.h
18435@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18436 /* Index into per_cpu list: */
18437 u16 cpu_index;
18438 u32 microcode;
18439-};
18440+} __randomize_layout;
18441
18442 #define X86_VENDOR_INTEL 0
18443 #define X86_VENDOR_CYRIX 1
18444@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18445 : "memory");
18446 }
18447
18448+/* invpcid (%rdx),%rax */
18449+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18450+
18451+#define INVPCID_SINGLE_ADDRESS 0UL
18452+#define INVPCID_SINGLE_CONTEXT 1UL
18453+#define INVPCID_ALL_GLOBAL 2UL
18454+#define INVPCID_ALL_NONGLOBAL 3UL
18455+
18456+#define PCID_KERNEL 0UL
18457+#define PCID_USER 1UL
18458+#define PCID_NOFLUSH (1UL << 63)
18459+
18460 static inline void load_cr3(pgd_t *pgdir)
18461 {
18462- write_cr3(__pa(pgdir));
18463+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18464 }
18465
18466 #ifdef CONFIG_X86_32
18467@@ -282,7 +294,7 @@ struct tss_struct {
18468
18469 } ____cacheline_aligned;
18470
18471-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18472+extern struct tss_struct init_tss[NR_CPUS];
18473
18474 /*
18475 * Save the original ist values for checking stack pointers during debugging
18476@@ -479,6 +491,7 @@ struct thread_struct {
18477 unsigned short ds;
18478 unsigned short fsindex;
18479 unsigned short gsindex;
18480+ unsigned short ss;
18481 #endif
18482 #ifdef CONFIG_X86_32
18483 unsigned long ip;
18484@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18485 */
18486 #define TASK_SIZE PAGE_OFFSET
18487 #define TASK_SIZE_MAX TASK_SIZE
18488+
18489+#ifdef CONFIG_PAX_SEGMEXEC
18490+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18491+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18492+#else
18493 #define STACK_TOP TASK_SIZE
18494-#define STACK_TOP_MAX STACK_TOP
18495+#endif
18496+
18497+#define STACK_TOP_MAX TASK_SIZE
18498
18499 #define INIT_THREAD { \
18500- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18501+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18502 .vm86_info = NULL, \
18503 .sysenter_cs = __KERNEL_CS, \
18504 .io_bitmap_ptr = NULL, \
18505@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18506 */
18507 #define INIT_TSS { \
18508 .x86_tss = { \
18509- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18510+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18511 .ss0 = __KERNEL_DS, \
18512 .ss1 = __KERNEL_CS, \
18513 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18514@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18515 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18516
18517 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18518-#define KSTK_TOP(info) \
18519-({ \
18520- unsigned long *__ptr = (unsigned long *)(info); \
18521- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18522-})
18523+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18524
18525 /*
18526 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18527@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18528 #define task_pt_regs(task) \
18529 ({ \
18530 struct pt_regs *__regs__; \
18531- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18532+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18533 __regs__ - 1; \
18534 })
18535
18536@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18537 * particular problem by preventing anything from being mapped
18538 * at the maximum canonical address.
18539 */
18540-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18541+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18542
18543 /* This decides where the kernel will search for a free chunk of vm
18544 * space during mmap's.
18545 */
18546 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18547- 0xc0000000 : 0xFFFFe000)
18548+ 0xc0000000 : 0xFFFFf000)
18549
18550 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18551 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18552@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18553 #define STACK_TOP_MAX TASK_SIZE_MAX
18554
18555 #define INIT_THREAD { \
18556- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18557+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18558 }
18559
18560 #define INIT_TSS { \
18561- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18562+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18563 }
18564
18565 /*
18566@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18567 */
18568 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18569
18570+#ifdef CONFIG_PAX_SEGMEXEC
18571+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18572+#endif
18573+
18574 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18575
18576 /* Get/set a process' ability to use the timestamp counter instruction */
18577@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18578 return 0;
18579 }
18580
18581-extern unsigned long arch_align_stack(unsigned long sp);
18582+#define arch_align_stack(x) ((x) & ~0xfUL)
18583 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18584
18585 void default_idle(void);
18586@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18587 #define xen_set_default_idle 0
18588 #endif
18589
18590-void stop_this_cpu(void *dummy);
18591+void stop_this_cpu(void *dummy) __noreturn;
18592 void df_debug(struct pt_regs *regs, long error_code);
18593 #endif /* _ASM_X86_PROCESSOR_H */
18594diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18595index 86fc2bb..bd5049a 100644
18596--- a/arch/x86/include/asm/ptrace.h
18597+++ b/arch/x86/include/asm/ptrace.h
18598@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18599 }
18600
18601 /*
18602- * user_mode_vm(regs) determines whether a register set came from user mode.
18603+ * user_mode(regs) determines whether a register set came from user mode.
18604 * This is true if V8086 mode was enabled OR if the register set was from
18605 * protected mode with RPL-3 CS value. This tricky test checks that with
18606 * one comparison. Many places in the kernel can bypass this full check
18607- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18608+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18609+ * be used.
18610 */
18611-static inline int user_mode(struct pt_regs *regs)
18612+static inline int user_mode_novm(struct pt_regs *regs)
18613 {
18614 #ifdef CONFIG_X86_32
18615 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18616 #else
18617- return !!(regs->cs & 3);
18618+ return !!(regs->cs & SEGMENT_RPL_MASK);
18619 #endif
18620 }
18621
18622-static inline int user_mode_vm(struct pt_regs *regs)
18623+static inline int user_mode(struct pt_regs *regs)
18624 {
18625 #ifdef CONFIG_X86_32
18626 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18627 USER_RPL;
18628 #else
18629- return user_mode(regs);
18630+ return user_mode_novm(regs);
18631 #endif
18632 }
18633
18634@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18635 #ifdef CONFIG_X86_64
18636 static inline bool user_64bit_mode(struct pt_regs *regs)
18637 {
18638+ unsigned long cs = regs->cs & 0xffff;
18639 #ifndef CONFIG_PARAVIRT
18640 /*
18641 * On non-paravirt systems, this is the only long mode CPL 3
18642 * selector. We do not allow long mode selectors in the LDT.
18643 */
18644- return regs->cs == __USER_CS;
18645+ return cs == __USER_CS;
18646 #else
18647 /* Headers are too twisted for this to go in paravirt.h. */
18648- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18649+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18650 #endif
18651 }
18652
18653@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18654 * Traps from the kernel do not save sp and ss.
18655 * Use the helper function to retrieve sp.
18656 */
18657- if (offset == offsetof(struct pt_regs, sp) &&
18658- regs->cs == __KERNEL_CS)
18659- return kernel_stack_pointer(regs);
18660+ if (offset == offsetof(struct pt_regs, sp)) {
18661+ unsigned long cs = regs->cs & 0xffff;
18662+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18663+ return kernel_stack_pointer(regs);
18664+ }
18665 #endif
18666 return *(unsigned long *)((unsigned long)regs + offset);
18667 }
18668diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18669index ae0e241..e80b10b 100644
18670--- a/arch/x86/include/asm/qrwlock.h
18671+++ b/arch/x86/include/asm/qrwlock.h
18672@@ -7,8 +7,8 @@
18673 #define queue_write_unlock queue_write_unlock
18674 static inline void queue_write_unlock(struct qrwlock *lock)
18675 {
18676- barrier();
18677- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18678+ barrier();
18679+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18680 }
18681 #endif
18682
18683diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18684index 9c6b890..5305f53 100644
18685--- a/arch/x86/include/asm/realmode.h
18686+++ b/arch/x86/include/asm/realmode.h
18687@@ -22,16 +22,14 @@ struct real_mode_header {
18688 #endif
18689 /* APM/BIOS reboot */
18690 u32 machine_real_restart_asm;
18691-#ifdef CONFIG_X86_64
18692 u32 machine_real_restart_seg;
18693-#endif
18694 };
18695
18696 /* This must match data at trampoline_32/64.S */
18697 struct trampoline_header {
18698 #ifdef CONFIG_X86_32
18699 u32 start;
18700- u16 gdt_pad;
18701+ u16 boot_cs;
18702 u16 gdt_limit;
18703 u32 gdt_base;
18704 #else
18705diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18706index a82c4f1..ac45053 100644
18707--- a/arch/x86/include/asm/reboot.h
18708+++ b/arch/x86/include/asm/reboot.h
18709@@ -6,13 +6,13 @@
18710 struct pt_regs;
18711
18712 struct machine_ops {
18713- void (*restart)(char *cmd);
18714- void (*halt)(void);
18715- void (*power_off)(void);
18716+ void (* __noreturn restart)(char *cmd);
18717+ void (* __noreturn halt)(void);
18718+ void (* __noreturn power_off)(void);
18719 void (*shutdown)(void);
18720 void (*crash_shutdown)(struct pt_regs *);
18721- void (*emergency_restart)(void);
18722-};
18723+ void (* __noreturn emergency_restart)(void);
18724+} __no_const;
18725
18726 extern struct machine_ops machine_ops;
18727
18728diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18729index 8f7866a..e442f20 100644
18730--- a/arch/x86/include/asm/rmwcc.h
18731+++ b/arch/x86/include/asm/rmwcc.h
18732@@ -3,7 +3,34 @@
18733
18734 #ifdef CC_HAVE_ASM_GOTO
18735
18736-#define __GEN_RMWcc(fullop, var, cc, ...) \
18737+#ifdef CONFIG_PAX_REFCOUNT
18738+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18739+do { \
18740+ asm_volatile_goto (fullop \
18741+ ";jno 0f\n" \
18742+ fullantiop \
18743+ ";int $4\n0:\n" \
18744+ _ASM_EXTABLE(0b, 0b) \
18745+ ";j" cc " %l[cc_label]" \
18746+ : : "m" (var), ## __VA_ARGS__ \
18747+ : "memory" : cc_label); \
18748+ return 0; \
18749+cc_label: \
18750+ return 1; \
18751+} while (0)
18752+#else
18753+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18754+do { \
18755+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18756+ : : "m" (var), ## __VA_ARGS__ \
18757+ : "memory" : cc_label); \
18758+ return 0; \
18759+cc_label: \
18760+ return 1; \
18761+} while (0)
18762+#endif
18763+
18764+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18765 do { \
18766 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18767 : : "m" (var), ## __VA_ARGS__ \
18768@@ -13,15 +40,46 @@ cc_label: \
18769 return 1; \
18770 } while (0)
18771
18772-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18773- __GEN_RMWcc(op " " arg0, var, cc)
18774+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18775+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18776
18777-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18778- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18779+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18780+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18781+
18782+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18783+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18784+
18785+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18786+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18787
18788 #else /* !CC_HAVE_ASM_GOTO */
18789
18790-#define __GEN_RMWcc(fullop, var, cc, ...) \
18791+#ifdef CONFIG_PAX_REFCOUNT
18792+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18793+do { \
18794+ char c; \
18795+ asm volatile (fullop \
18796+ ";jno 0f\n" \
18797+ fullantiop \
18798+ ";int $4\n0:\n" \
18799+ _ASM_EXTABLE(0b, 0b) \
18800+ "; set" cc " %1" \
18801+ : "+m" (var), "=qm" (c) \
18802+ : __VA_ARGS__ : "memory"); \
18803+ return c != 0; \
18804+} while (0)
18805+#else
18806+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18807+do { \
18808+ char c; \
18809+ asm volatile (fullop "; set" cc " %1" \
18810+ : "+m" (var), "=qm" (c) \
18811+ : __VA_ARGS__ : "memory"); \
18812+ return c != 0; \
18813+} while (0)
18814+#endif
18815+
18816+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18817 do { \
18818 char c; \
18819 asm volatile (fullop "; set" cc " %1" \
18820@@ -30,11 +88,17 @@ do { \
18821 return c != 0; \
18822 } while (0)
18823
18824-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18825- __GEN_RMWcc(op " " arg0, var, cc)
18826+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18827+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18828+
18829+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18830+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18831+
18832+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18833+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18834
18835-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18836- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18837+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18838+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18839
18840 #endif /* CC_HAVE_ASM_GOTO */
18841
18842diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18843index cad82c9..2e5c5c1 100644
18844--- a/arch/x86/include/asm/rwsem.h
18845+++ b/arch/x86/include/asm/rwsem.h
18846@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18847 {
18848 asm volatile("# beginning down_read\n\t"
18849 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18850+
18851+#ifdef CONFIG_PAX_REFCOUNT
18852+ "jno 0f\n"
18853+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18854+ "int $4\n0:\n"
18855+ _ASM_EXTABLE(0b, 0b)
18856+#endif
18857+
18858 /* adds 0x00000001 */
18859 " jns 1f\n"
18860 " call call_rwsem_down_read_failed\n"
18861@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18862 "1:\n\t"
18863 " mov %1,%2\n\t"
18864 " add %3,%2\n\t"
18865+
18866+#ifdef CONFIG_PAX_REFCOUNT
18867+ "jno 0f\n"
18868+ "sub %3,%2\n"
18869+ "int $4\n0:\n"
18870+ _ASM_EXTABLE(0b, 0b)
18871+#endif
18872+
18873 " jle 2f\n\t"
18874 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18875 " jnz 1b\n\t"
18876@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18877 long tmp;
18878 asm volatile("# beginning down_write\n\t"
18879 LOCK_PREFIX " xadd %1,(%2)\n\t"
18880+
18881+#ifdef CONFIG_PAX_REFCOUNT
18882+ "jno 0f\n"
18883+ "mov %1,(%2)\n"
18884+ "int $4\n0:\n"
18885+ _ASM_EXTABLE(0b, 0b)
18886+#endif
18887+
18888 /* adds 0xffff0001, returns the old value */
18889 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18890 /* was the active mask 0 before? */
18891@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18892 long tmp;
18893 asm volatile("# beginning __up_read\n\t"
18894 LOCK_PREFIX " xadd %1,(%2)\n\t"
18895+
18896+#ifdef CONFIG_PAX_REFCOUNT
18897+ "jno 0f\n"
18898+ "mov %1,(%2)\n"
18899+ "int $4\n0:\n"
18900+ _ASM_EXTABLE(0b, 0b)
18901+#endif
18902+
18903 /* subtracts 1, returns the old value */
18904 " jns 1f\n\t"
18905 " call call_rwsem_wake\n" /* expects old value in %edx */
18906@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18907 long tmp;
18908 asm volatile("# beginning __up_write\n\t"
18909 LOCK_PREFIX " xadd %1,(%2)\n\t"
18910+
18911+#ifdef CONFIG_PAX_REFCOUNT
18912+ "jno 0f\n"
18913+ "mov %1,(%2)\n"
18914+ "int $4\n0:\n"
18915+ _ASM_EXTABLE(0b, 0b)
18916+#endif
18917+
18918 /* subtracts 0xffff0001, returns the old value */
18919 " jns 1f\n\t"
18920 " call call_rwsem_wake\n" /* expects old value in %edx */
18921@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18922 {
18923 asm volatile("# beginning __downgrade_write\n\t"
18924 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18925+
18926+#ifdef CONFIG_PAX_REFCOUNT
18927+ "jno 0f\n"
18928+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18929+ "int $4\n0:\n"
18930+ _ASM_EXTABLE(0b, 0b)
18931+#endif
18932+
18933 /*
18934 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18935 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18936@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18937 */
18938 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18939 {
18940- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18941+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18942+
18943+#ifdef CONFIG_PAX_REFCOUNT
18944+ "jno 0f\n"
18945+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18946+ "int $4\n0:\n"
18947+ _ASM_EXTABLE(0b, 0b)
18948+#endif
18949+
18950 : "+m" (sem->count)
18951 : "er" (delta));
18952 }
18953@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18954 */
18955 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18956 {
18957- return delta + xadd(&sem->count, delta);
18958+ return delta + xadd_check_overflow(&sem->count, delta);
18959 }
18960
18961 #endif /* __KERNEL__ */
18962diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18963index db257a5..b91bc77 100644
18964--- a/arch/x86/include/asm/segment.h
18965+++ b/arch/x86/include/asm/segment.h
18966@@ -73,10 +73,15 @@
18967 * 26 - ESPFIX small SS
18968 * 27 - per-cpu [ offset to per-cpu data area ]
18969 * 28 - stack_canary-20 [ for stack protector ]
18970- * 29 - unused
18971- * 30 - unused
18972+ * 29 - PCI BIOS CS
18973+ * 30 - PCI BIOS DS
18974 * 31 - TSS for double fault handler
18975 */
18976+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18977+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18978+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18979+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18980+
18981 #define GDT_ENTRY_TLS_MIN 6
18982 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18983
18984@@ -88,6 +93,8 @@
18985
18986 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18987
18988+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18989+
18990 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18991
18992 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18993@@ -113,6 +120,12 @@
18994 #define __KERNEL_STACK_CANARY 0
18995 #endif
18996
18997+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18998+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18999+
19000+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19001+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19002+
19003 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19004
19005 /*
19006@@ -140,7 +153,7 @@
19007 */
19008
19009 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19010-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19011+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19012
19013
19014 #else
19015@@ -164,6 +177,8 @@
19016 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19017 #define __USER32_DS __USER_DS
19018
19019+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19020+
19021 #define GDT_ENTRY_TSS 8 /* needs two entries */
19022 #define GDT_ENTRY_LDT 10 /* needs two entries */
19023 #define GDT_ENTRY_TLS_MIN 12
19024@@ -172,6 +187,8 @@
19025 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19026 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19027
19028+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19029+
19030 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19031 #define FS_TLS 0
19032 #define GS_TLS 1
19033@@ -179,12 +196,14 @@
19034 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19035 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19036
19037-#define GDT_ENTRIES 16
19038+#define GDT_ENTRIES 17
19039
19040 #endif
19041
19042 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19043+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19044 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19045+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19046 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19047 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19048 #ifndef CONFIG_PARAVIRT
19049@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19050 {
19051 unsigned long __limit;
19052 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19053- return __limit + 1;
19054+ return __limit;
19055 }
19056
19057 #endif /* !__ASSEMBLY__ */
19058diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19059index 8d3120f..352b440 100644
19060--- a/arch/x86/include/asm/smap.h
19061+++ b/arch/x86/include/asm/smap.h
19062@@ -25,11 +25,40 @@
19063
19064 #include <asm/alternative-asm.h>
19065
19066+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19067+#define ASM_PAX_OPEN_USERLAND \
19068+ 661: jmp 663f; \
19069+ .pushsection .altinstr_replacement, "a" ; \
19070+ 662: pushq %rax; nop; \
19071+ .popsection ; \
19072+ .pushsection .altinstructions, "a" ; \
19073+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19074+ .popsection ; \
19075+ call __pax_open_userland; \
19076+ popq %rax; \
19077+ 663:
19078+
19079+#define ASM_PAX_CLOSE_USERLAND \
19080+ 661: jmp 663f; \
19081+ .pushsection .altinstr_replacement, "a" ; \
19082+ 662: pushq %rax; nop; \
19083+ .popsection; \
19084+ .pushsection .altinstructions, "a" ; \
19085+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19086+ .popsection; \
19087+ call __pax_close_userland; \
19088+ popq %rax; \
19089+ 663:
19090+#else
19091+#define ASM_PAX_OPEN_USERLAND
19092+#define ASM_PAX_CLOSE_USERLAND
19093+#endif
19094+
19095 #ifdef CONFIG_X86_SMAP
19096
19097 #define ASM_CLAC \
19098 661: ASM_NOP3 ; \
19099- .pushsection .altinstr_replacement, "ax" ; \
19100+ .pushsection .altinstr_replacement, "a" ; \
19101 662: __ASM_CLAC ; \
19102 .popsection ; \
19103 .pushsection .altinstructions, "a" ; \
19104@@ -38,7 +67,7 @@
19105
19106 #define ASM_STAC \
19107 661: ASM_NOP3 ; \
19108- .pushsection .altinstr_replacement, "ax" ; \
19109+ .pushsection .altinstr_replacement, "a" ; \
19110 662: __ASM_STAC ; \
19111 .popsection ; \
19112 .pushsection .altinstructions, "a" ; \
19113@@ -56,6 +85,37 @@
19114
19115 #include <asm/alternative.h>
19116
19117+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19118+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19119+
19120+extern void __pax_open_userland(void);
19121+static __always_inline unsigned long pax_open_userland(void)
19122+{
19123+
19124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19125+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19126+ :
19127+ : [open] "i" (__pax_open_userland)
19128+ : "memory", "rax");
19129+#endif
19130+
19131+ return 0;
19132+}
19133+
19134+extern void __pax_close_userland(void);
19135+static __always_inline unsigned long pax_close_userland(void)
19136+{
19137+
19138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19139+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19140+ :
19141+ : [close] "i" (__pax_close_userland)
19142+ : "memory", "rax");
19143+#endif
19144+
19145+ return 0;
19146+}
19147+
19148 #ifdef CONFIG_X86_SMAP
19149
19150 static __always_inline void clac(void)
19151diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19152index 8cd1cc3..827e09e 100644
19153--- a/arch/x86/include/asm/smp.h
19154+++ b/arch/x86/include/asm/smp.h
19155@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19156 /* cpus sharing the last level cache: */
19157 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19158 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19159-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19160+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19161
19162 static inline struct cpumask *cpu_sibling_mask(int cpu)
19163 {
19164@@ -78,7 +78,7 @@ struct smp_ops {
19165
19166 void (*send_call_func_ipi)(const struct cpumask *mask);
19167 void (*send_call_func_single_ipi)(int cpu);
19168-};
19169+} __no_const;
19170
19171 /* Globals due to paravirt */
19172 extern void set_cpu_sibling_map(int cpu);
19173@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19174 extern int safe_smp_processor_id(void);
19175
19176 #elif defined(CONFIG_X86_64_SMP)
19177-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19178-
19179-#define stack_smp_processor_id() \
19180-({ \
19181- struct thread_info *ti; \
19182- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19183- ti->cpu; \
19184-})
19185+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19186+#define stack_smp_processor_id() raw_smp_processor_id()
19187 #define safe_smp_processor_id() smp_processor_id()
19188
19189 #endif
19190diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19191index 6a99859..03cb807 100644
19192--- a/arch/x86/include/asm/stackprotector.h
19193+++ b/arch/x86/include/asm/stackprotector.h
19194@@ -47,7 +47,7 @@
19195 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19196 */
19197 #define GDT_STACK_CANARY_INIT \
19198- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19199+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19200
19201 /*
19202 * Initialize the stackprotector canary value.
19203@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19204
19205 static inline void load_stack_canary_segment(void)
19206 {
19207-#ifdef CONFIG_X86_32
19208+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19209 asm volatile ("mov %0, %%gs" : : "r" (0));
19210 #endif
19211 }
19212diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19213index 70bbe39..4ae2bd4 100644
19214--- a/arch/x86/include/asm/stacktrace.h
19215+++ b/arch/x86/include/asm/stacktrace.h
19216@@ -11,28 +11,20 @@
19217
19218 extern int kstack_depth_to_print;
19219
19220-struct thread_info;
19221+struct task_struct;
19222 struct stacktrace_ops;
19223
19224-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19225- unsigned long *stack,
19226- unsigned long bp,
19227- const struct stacktrace_ops *ops,
19228- void *data,
19229- unsigned long *end,
19230- int *graph);
19231+typedef unsigned long walk_stack_t(struct task_struct *task,
19232+ void *stack_start,
19233+ unsigned long *stack,
19234+ unsigned long bp,
19235+ const struct stacktrace_ops *ops,
19236+ void *data,
19237+ unsigned long *end,
19238+ int *graph);
19239
19240-extern unsigned long
19241-print_context_stack(struct thread_info *tinfo,
19242- unsigned long *stack, unsigned long bp,
19243- const struct stacktrace_ops *ops, void *data,
19244- unsigned long *end, int *graph);
19245-
19246-extern unsigned long
19247-print_context_stack_bp(struct thread_info *tinfo,
19248- unsigned long *stack, unsigned long bp,
19249- const struct stacktrace_ops *ops, void *data,
19250- unsigned long *end, int *graph);
19251+extern walk_stack_t print_context_stack;
19252+extern walk_stack_t print_context_stack_bp;
19253
19254 /* Generic stack tracer with callbacks */
19255
19256@@ -40,7 +32,7 @@ struct stacktrace_ops {
19257 void (*address)(void *data, unsigned long address, int reliable);
19258 /* On negative return stop dumping */
19259 int (*stack)(void *data, char *name);
19260- walk_stack_t walk_stack;
19261+ walk_stack_t *walk_stack;
19262 };
19263
19264 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19265diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19266index 751bf4b..a1278b5 100644
19267--- a/arch/x86/include/asm/switch_to.h
19268+++ b/arch/x86/include/asm/switch_to.h
19269@@ -112,7 +112,7 @@ do { \
19270 "call __switch_to\n\t" \
19271 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19272 __switch_canary \
19273- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19274+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19275 "movq %%rax,%%rdi\n\t" \
19276 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19277 "jnz ret_from_fork\n\t" \
19278@@ -123,7 +123,7 @@ do { \
19279 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19280 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19281 [_tif_fork] "i" (_TIF_FORK), \
19282- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19283+ [thread_info] "m" (current_tinfo), \
19284 [current_task] "m" (current_task) \
19285 __switch_canary_iparam \
19286 : "memory", "cc" __EXTRA_CLOBBER)
19287diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19288index 1d4e4f2..506db18 100644
19289--- a/arch/x86/include/asm/thread_info.h
19290+++ b/arch/x86/include/asm/thread_info.h
19291@@ -24,7 +24,6 @@ struct exec_domain;
19292 #include <linux/atomic.h>
19293
19294 struct thread_info {
19295- struct task_struct *task; /* main task structure */
19296 struct exec_domain *exec_domain; /* execution domain */
19297 __u32 flags; /* low level flags */
19298 __u32 status; /* thread synchronous flags */
19299@@ -32,13 +31,13 @@ struct thread_info {
19300 int saved_preempt_count;
19301 mm_segment_t addr_limit;
19302 void __user *sysenter_return;
19303+ unsigned long lowest_stack;
19304 unsigned int sig_on_uaccess_error:1;
19305 unsigned int uaccess_err:1; /* uaccess failed */
19306 };
19307
19308-#define INIT_THREAD_INFO(tsk) \
19309+#define INIT_THREAD_INFO \
19310 { \
19311- .task = &tsk, \
19312 .exec_domain = &default_exec_domain, \
19313 .flags = 0, \
19314 .cpu = 0, \
19315@@ -46,7 +45,7 @@ struct thread_info {
19316 .addr_limit = KERNEL_DS, \
19317 }
19318
19319-#define init_thread_info (init_thread_union.thread_info)
19320+#define init_thread_info (init_thread_union.stack)
19321 #define init_stack (init_thread_union.stack)
19322
19323 #else /* !__ASSEMBLY__ */
19324@@ -86,6 +85,7 @@ struct thread_info {
19325 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19326 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19327 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19328+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19329
19330 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19331 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19332@@ -109,17 +109,18 @@ struct thread_info {
19333 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19334 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19335 #define _TIF_X32 (1 << TIF_X32)
19336+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19337
19338 /* work to do in syscall_trace_enter() */
19339 #define _TIF_WORK_SYSCALL_ENTRY \
19340 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19341 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19342- _TIF_NOHZ)
19343+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19344
19345 /* work to do in syscall_trace_leave() */
19346 #define _TIF_WORK_SYSCALL_EXIT \
19347 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19348- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19349+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19350
19351 /* work to do on interrupt/exception return */
19352 #define _TIF_WORK_MASK \
19353@@ -130,7 +131,7 @@ struct thread_info {
19354 /* work to do on any return to user space */
19355 #define _TIF_ALLWORK_MASK \
19356 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19357- _TIF_NOHZ)
19358+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19359
19360 /* Only used for 64 bit */
19361 #define _TIF_DO_NOTIFY_MASK \
19362@@ -145,7 +146,6 @@ struct thread_info {
19363 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19364
19365 #define STACK_WARN (THREAD_SIZE/8)
19366-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19367
19368 /*
19369 * macros/functions for gaining access to the thread information structure
19370@@ -156,12 +156,11 @@ struct thread_info {
19371
19372 DECLARE_PER_CPU(unsigned long, kernel_stack);
19373
19374+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19375+
19376 static inline struct thread_info *current_thread_info(void)
19377 {
19378- struct thread_info *ti;
19379- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19380- KERNEL_STACK_OFFSET - THREAD_SIZE);
19381- return ti;
19382+ return this_cpu_read_stable(current_tinfo);
19383 }
19384
19385 static inline unsigned long current_stack_pointer(void)
19386@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19387
19388 /* how to get the thread information struct from ASM */
19389 #define GET_THREAD_INFO(reg) \
19390- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19391- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19392-
19393-/*
19394- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19395- * a certain register (to be used in assembler memory operands).
19396- */
19397-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19398+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19399
19400 #endif
19401
19402@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19403 extern void arch_task_cache_init(void);
19404 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19405 extern void arch_release_task_struct(struct task_struct *tsk);
19406+
19407+#define __HAVE_THREAD_FUNCTIONS
19408+#define task_thread_info(task) (&(task)->tinfo)
19409+#define task_stack_page(task) ((task)->stack)
19410+#define setup_thread_stack(p, org) do {} while (0)
19411+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19412+
19413 #endif
19414 #endif /* _ASM_X86_THREAD_INFO_H */
19415diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19416index cd79194..e7a9491 100644
19417--- a/arch/x86/include/asm/tlbflush.h
19418+++ b/arch/x86/include/asm/tlbflush.h
19419@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19420
19421 static inline void __native_flush_tlb(void)
19422 {
19423+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19424+ u64 descriptor[2];
19425+
19426+ descriptor[0] = PCID_KERNEL;
19427+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19428+ return;
19429+ }
19430+
19431+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19432+ if (static_cpu_has(X86_FEATURE_PCID)) {
19433+ unsigned int cpu = raw_get_cpu();
19434+
19435+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19436+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19437+ raw_put_cpu_no_resched();
19438+ return;
19439+ }
19440+#endif
19441+
19442 native_write_cr3(native_read_cr3());
19443 }
19444
19445 static inline void __native_flush_tlb_global_irq_disabled(void)
19446 {
19447- unsigned long cr4;
19448+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19449+ u64 descriptor[2];
19450
19451- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19452- /* clear PGE */
19453- native_write_cr4(cr4 & ~X86_CR4_PGE);
19454- /* write old PGE again and flush TLBs */
19455- native_write_cr4(cr4);
19456+ descriptor[0] = PCID_KERNEL;
19457+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19458+ } else {
19459+ unsigned long cr4;
19460+
19461+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19462+ /* clear PGE */
19463+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19464+ /* write old PGE again and flush TLBs */
19465+ native_write_cr4(cr4);
19466+ }
19467 }
19468
19469 static inline void __native_flush_tlb_global(void)
19470@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19471
19472 static inline void __native_flush_tlb_single(unsigned long addr)
19473 {
19474+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19475+ u64 descriptor[2];
19476+
19477+ descriptor[0] = PCID_KERNEL;
19478+ descriptor[1] = addr;
19479+
19480+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19482+ if (addr < TASK_SIZE_MAX)
19483+ descriptor[1] += pax_user_shadow_base;
19484+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19485+ }
19486+
19487+ descriptor[0] = PCID_USER;
19488+ descriptor[1] = addr;
19489+#endif
19490+
19491+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19492+ return;
19493+ }
19494+
19495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19496+ if (static_cpu_has(X86_FEATURE_PCID)) {
19497+ unsigned int cpu = raw_get_cpu();
19498+
19499+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19500+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19501+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19502+ raw_put_cpu_no_resched();
19503+
19504+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19505+ addr += pax_user_shadow_base;
19506+ }
19507+#endif
19508+
19509 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19510 }
19511
19512diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19513index ace9dec..3f9e253 100644
19514--- a/arch/x86/include/asm/uaccess.h
19515+++ b/arch/x86/include/asm/uaccess.h
19516@@ -7,6 +7,7 @@
19517 #include <linux/compiler.h>
19518 #include <linux/thread_info.h>
19519 #include <linux/string.h>
19520+#include <linux/spinlock.h>
19521 #include <asm/asm.h>
19522 #include <asm/page.h>
19523 #include <asm/smap.h>
19524@@ -29,7 +30,12 @@
19525
19526 #define get_ds() (KERNEL_DS)
19527 #define get_fs() (current_thread_info()->addr_limit)
19528+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19529+void __set_fs(mm_segment_t x);
19530+void set_fs(mm_segment_t x);
19531+#else
19532 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19533+#endif
19534
19535 #define segment_eq(a, b) ((a).seg == (b).seg)
19536
19537@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19538 * checks that the pointer is in the user space range - after calling
19539 * this function, memory access functions may still return -EFAULT.
19540 */
19541-#define access_ok(type, addr, size) \
19542- likely(!__range_not_ok(addr, size, user_addr_max()))
19543+extern int _cond_resched(void);
19544+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19545+#define access_ok(type, addr, size) \
19546+({ \
19547+ unsigned long __size = size; \
19548+ unsigned long __addr = (unsigned long)addr; \
19549+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19550+ if (__ret_ao && __size) { \
19551+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19552+ unsigned long __end_ao = __addr + __size - 1; \
19553+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19554+ while (__addr_ao <= __end_ao) { \
19555+ char __c_ao; \
19556+ __addr_ao += PAGE_SIZE; \
19557+ if (__size > PAGE_SIZE) \
19558+ _cond_resched(); \
19559+ if (__get_user(__c_ao, (char __user *)__addr)) \
19560+ break; \
19561+ if (type != VERIFY_WRITE) { \
19562+ __addr = __addr_ao; \
19563+ continue; \
19564+ } \
19565+ if (__put_user(__c_ao, (char __user *)__addr)) \
19566+ break; \
19567+ __addr = __addr_ao; \
19568+ } \
19569+ } \
19570+ } \
19571+ __ret_ao; \
19572+})
19573
19574 /*
19575 * The exception table consists of pairs of addresses relative to the
19576@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19577 extern int __get_user_bad(void);
19578
19579 /*
19580- * This is a type: either unsigned long, if the argument fits into
19581- * that type, or otherwise unsigned long long.
19582+ * This is a type: either (un)signed int, if the argument fits into
19583+ * that type, or otherwise (un)signed long long.
19584 */
19585 #define __inttype(x) \
19586-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19587+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19588+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19589+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19590
19591 /**
19592 * get_user: - Get a simple variable from user space.
19593@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19594 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19595 __chk_user_ptr(ptr); \
19596 might_fault(); \
19597+ pax_open_userland(); \
19598 asm volatile("call __get_user_%P3" \
19599 : "=a" (__ret_gu), "=r" (__val_gu) \
19600 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19601 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19602+ pax_close_userland(); \
19603 __ret_gu; \
19604 })
19605
19606@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19607 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19608 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19609
19610-
19611+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19612+#define __copyuser_seg "gs;"
19613+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19614+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19615+#else
19616+#define __copyuser_seg
19617+#define __COPYUSER_SET_ES
19618+#define __COPYUSER_RESTORE_ES
19619+#endif
19620
19621 #ifdef CONFIG_X86_32
19622 #define __put_user_asm_u64(x, addr, err, errret) \
19623 asm volatile(ASM_STAC "\n" \
19624- "1: movl %%eax,0(%2)\n" \
19625- "2: movl %%edx,4(%2)\n" \
19626+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19627+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19628 "3: " ASM_CLAC "\n" \
19629 ".section .fixup,\"ax\"\n" \
19630 "4: movl %3,%0\n" \
19631@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19632
19633 #define __put_user_asm_ex_u64(x, addr) \
19634 asm volatile(ASM_STAC "\n" \
19635- "1: movl %%eax,0(%1)\n" \
19636- "2: movl %%edx,4(%1)\n" \
19637+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19638+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19639 "3: " ASM_CLAC "\n" \
19640 _ASM_EXTABLE_EX(1b, 2b) \
19641 _ASM_EXTABLE_EX(2b, 3b) \
19642@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19643 __typeof__(*(ptr)) __pu_val; \
19644 __chk_user_ptr(ptr); \
19645 might_fault(); \
19646- __pu_val = x; \
19647+ __pu_val = (x); \
19648+ pax_open_userland(); \
19649 switch (sizeof(*(ptr))) { \
19650 case 1: \
19651 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19652@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19653 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19654 break; \
19655 } \
19656+ pax_close_userland(); \
19657 __ret_pu; \
19658 })
19659
19660@@ -355,8 +403,10 @@ do { \
19661 } while (0)
19662
19663 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19664+do { \
19665+ pax_open_userland(); \
19666 asm volatile(ASM_STAC "\n" \
19667- "1: mov"itype" %2,%"rtype"1\n" \
19668+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19669 "2: " ASM_CLAC "\n" \
19670 ".section .fixup,\"ax\"\n" \
19671 "3: mov %3,%0\n" \
19672@@ -364,8 +414,10 @@ do { \
19673 " jmp 2b\n" \
19674 ".previous\n" \
19675 _ASM_EXTABLE(1b, 3b) \
19676- : "=r" (err), ltype(x) \
19677- : "m" (__m(addr)), "i" (errret), "0" (err))
19678+ : "=r" (err), ltype (x) \
19679+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19680+ pax_close_userland(); \
19681+} while (0)
19682
19683 #define __get_user_size_ex(x, ptr, size) \
19684 do { \
19685@@ -389,7 +441,7 @@ do { \
19686 } while (0)
19687
19688 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19689- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19690+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19691 "2:\n" \
19692 _ASM_EXTABLE_EX(1b, 2b) \
19693 : ltype(x) : "m" (__m(addr)))
19694@@ -406,13 +458,24 @@ do { \
19695 int __gu_err; \
19696 unsigned long __gu_val; \
19697 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19698- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19699+ (x) = (__typeof__(*(ptr)))__gu_val; \
19700 __gu_err; \
19701 })
19702
19703 /* FIXME: this hack is definitely wrong -AK */
19704 struct __large_struct { unsigned long buf[100]; };
19705-#define __m(x) (*(struct __large_struct __user *)(x))
19706+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19707+#define ____m(x) \
19708+({ \
19709+ unsigned long ____x = (unsigned long)(x); \
19710+ if (____x < pax_user_shadow_base) \
19711+ ____x += pax_user_shadow_base; \
19712+ (typeof(x))____x; \
19713+})
19714+#else
19715+#define ____m(x) (x)
19716+#endif
19717+#define __m(x) (*(struct __large_struct __user *)____m(x))
19718
19719 /*
19720 * Tell gcc we read from memory instead of writing: this is because
19721@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19722 * aliasing issues.
19723 */
19724 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19725+do { \
19726+ pax_open_userland(); \
19727 asm volatile(ASM_STAC "\n" \
19728- "1: mov"itype" %"rtype"1,%2\n" \
19729+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19730 "2: " ASM_CLAC "\n" \
19731 ".section .fixup,\"ax\"\n" \
19732 "3: mov %3,%0\n" \
19733@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19734 ".previous\n" \
19735 _ASM_EXTABLE(1b, 3b) \
19736 : "=r"(err) \
19737- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19738+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19739+ pax_close_userland(); \
19740+} while (0)
19741
19742 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19743- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19744+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19745 "2:\n" \
19746 _ASM_EXTABLE_EX(1b, 2b) \
19747 : : ltype(x), "m" (__m(addr)))
19748@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19749 */
19750 #define uaccess_try do { \
19751 current_thread_info()->uaccess_err = 0; \
19752+ pax_open_userland(); \
19753 stac(); \
19754 barrier();
19755
19756 #define uaccess_catch(err) \
19757 clac(); \
19758+ pax_close_userland(); \
19759 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19760 } while (0)
19761
19762@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19763 * On error, the variable @x is set to zero.
19764 */
19765
19766+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19767+#define __get_user(x, ptr) get_user((x), (ptr))
19768+#else
19769 #define __get_user(x, ptr) \
19770 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19771+#endif
19772
19773 /**
19774 * __put_user: - Write a simple value into user space, with less checking.
19775@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19776 * Returns zero on success, or -EFAULT on error.
19777 */
19778
19779+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19780+#define __put_user(x, ptr) put_user((x), (ptr))
19781+#else
19782 #define __put_user(x, ptr) \
19783 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19784+#endif
19785
19786 #define __get_user_unaligned __get_user
19787 #define __put_user_unaligned __put_user
19788@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19789 #define get_user_ex(x, ptr) do { \
19790 unsigned long __gue_val; \
19791 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19792- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19793+ (x) = (__typeof__(*(ptr)))__gue_val; \
19794 } while (0)
19795
19796 #define put_user_try uaccess_try
19797@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19798 extern __must_check long strnlen_user(const char __user *str, long n);
19799
19800 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19801-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19802+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19803
19804 extern void __cmpxchg_wrong_size(void)
19805 __compiletime_error("Bad argument size for cmpxchg");
19806@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19807 __typeof__(ptr) __uval = (uval); \
19808 __typeof__(*(ptr)) __old = (old); \
19809 __typeof__(*(ptr)) __new = (new); \
19810+ pax_open_userland(); \
19811 switch (size) { \
19812 case 1: \
19813 { \
19814 asm volatile("\t" ASM_STAC "\n" \
19815- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19816+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19817 "2:\t" ASM_CLAC "\n" \
19818 "\t.section .fixup, \"ax\"\n" \
19819 "3:\tmov %3, %0\n" \
19820 "\tjmp 2b\n" \
19821 "\t.previous\n" \
19822 _ASM_EXTABLE(1b, 3b) \
19823- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19824+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19825 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19826 : "memory" \
19827 ); \
19828@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19829 case 2: \
19830 { \
19831 asm volatile("\t" ASM_STAC "\n" \
19832- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19833+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19834 "2:\t" ASM_CLAC "\n" \
19835 "\t.section .fixup, \"ax\"\n" \
19836 "3:\tmov %3, %0\n" \
19837 "\tjmp 2b\n" \
19838 "\t.previous\n" \
19839 _ASM_EXTABLE(1b, 3b) \
19840- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19841+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19842 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19843 : "memory" \
19844 ); \
19845@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19846 case 4: \
19847 { \
19848 asm volatile("\t" ASM_STAC "\n" \
19849- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19850+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19851 "2:\t" ASM_CLAC "\n" \
19852 "\t.section .fixup, \"ax\"\n" \
19853 "3:\tmov %3, %0\n" \
19854 "\tjmp 2b\n" \
19855 "\t.previous\n" \
19856 _ASM_EXTABLE(1b, 3b) \
19857- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19858+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19859 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19860 : "memory" \
19861 ); \
19862@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19863 __cmpxchg_wrong_size(); \
19864 \
19865 asm volatile("\t" ASM_STAC "\n" \
19866- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19867+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19868 "2:\t" ASM_CLAC "\n" \
19869 "\t.section .fixup, \"ax\"\n" \
19870 "3:\tmov %3, %0\n" \
19871 "\tjmp 2b\n" \
19872 "\t.previous\n" \
19873 _ASM_EXTABLE(1b, 3b) \
19874- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19875+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19876 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19877 : "memory" \
19878 ); \
19879@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19880 default: \
19881 __cmpxchg_wrong_size(); \
19882 } \
19883+ pax_close_userland(); \
19884 *__uval = __old; \
19885 __ret; \
19886 })
19887@@ -636,17 +715,6 @@ extern struct movsl_mask {
19888
19889 #define ARCH_HAS_NOCACHE_UACCESS 1
19890
19891-#ifdef CONFIG_X86_32
19892-# include <asm/uaccess_32.h>
19893-#else
19894-# include <asm/uaccess_64.h>
19895-#endif
19896-
19897-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19898- unsigned n);
19899-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19900- unsigned n);
19901-
19902 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19903 # define copy_user_diag __compiletime_error
19904 #else
19905@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19906 extern void copy_user_diag("copy_from_user() buffer size is too small")
19907 copy_from_user_overflow(void);
19908 extern void copy_user_diag("copy_to_user() buffer size is too small")
19909-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19910+copy_to_user_overflow(void);
19911
19912 #undef copy_user_diag
19913
19914@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19915
19916 extern void
19917 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19918-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19919+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19920 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19921
19922 #else
19923@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19924
19925 #endif
19926
19927+#ifdef CONFIG_X86_32
19928+# include <asm/uaccess_32.h>
19929+#else
19930+# include <asm/uaccess_64.h>
19931+#endif
19932+
19933 static inline unsigned long __must_check
19934 copy_from_user(void *to, const void __user *from, unsigned long n)
19935 {
19936- int sz = __compiletime_object_size(to);
19937+ size_t sz = __compiletime_object_size(to);
19938
19939 might_fault();
19940
19941@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19942 * case, and do only runtime checking for non-constant sizes.
19943 */
19944
19945- if (likely(sz < 0 || sz >= n))
19946- n = _copy_from_user(to, from, n);
19947- else if(__builtin_constant_p(n))
19948- copy_from_user_overflow();
19949- else
19950- __copy_from_user_overflow(sz, n);
19951+ if (likely(sz != (size_t)-1 && sz < n)) {
19952+ if(__builtin_constant_p(n))
19953+ copy_from_user_overflow();
19954+ else
19955+ __copy_from_user_overflow(sz, n);
19956+ } else if (access_ok(VERIFY_READ, from, n))
19957+ n = __copy_from_user(to, from, n);
19958+ else if ((long)n > 0)
19959+ memset(to, 0, n);
19960
19961 return n;
19962 }
19963@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19964 static inline unsigned long __must_check
19965 copy_to_user(void __user *to, const void *from, unsigned long n)
19966 {
19967- int sz = __compiletime_object_size(from);
19968+ size_t sz = __compiletime_object_size(from);
19969
19970 might_fault();
19971
19972 /* See the comment in copy_from_user() above. */
19973- if (likely(sz < 0 || sz >= n))
19974- n = _copy_to_user(to, from, n);
19975- else if(__builtin_constant_p(n))
19976- copy_to_user_overflow();
19977- else
19978- __copy_to_user_overflow(sz, n);
19979+ if (likely(sz != (size_t)-1 && sz < n)) {
19980+ if(__builtin_constant_p(n))
19981+ copy_to_user_overflow();
19982+ else
19983+ __copy_to_user_overflow(sz, n);
19984+ } else if (access_ok(VERIFY_WRITE, to, n))
19985+ n = __copy_to_user(to, from, n);
19986
19987 return n;
19988 }
19989diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19990index 3c03a5d..edb68ae 100644
19991--- a/arch/x86/include/asm/uaccess_32.h
19992+++ b/arch/x86/include/asm/uaccess_32.h
19993@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19994 * anything, so this is accurate.
19995 */
19996
19997-static __always_inline unsigned long __must_check
19998+static __always_inline __size_overflow(3) unsigned long __must_check
19999 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20000 {
20001+ if ((long)n < 0)
20002+ return n;
20003+
20004+ check_object_size(from, n, true);
20005+
20006 if (__builtin_constant_p(n)) {
20007 unsigned long ret;
20008
20009@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20010 __copy_to_user(void __user *to, const void *from, unsigned long n)
20011 {
20012 might_fault();
20013+
20014 return __copy_to_user_inatomic(to, from, n);
20015 }
20016
20017-static __always_inline unsigned long
20018+static __always_inline __size_overflow(3) unsigned long
20019 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20020 {
20021+ if ((long)n < 0)
20022+ return n;
20023+
20024 /* Avoid zeroing the tail if the copy fails..
20025 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20026 * but as the zeroing behaviour is only significant when n is not
20027@@ -137,6 +146,12 @@ static __always_inline unsigned long
20028 __copy_from_user(void *to, const void __user *from, unsigned long n)
20029 {
20030 might_fault();
20031+
20032+ if ((long)n < 0)
20033+ return n;
20034+
20035+ check_object_size(to, n, false);
20036+
20037 if (__builtin_constant_p(n)) {
20038 unsigned long ret;
20039
20040@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20041 const void __user *from, unsigned long n)
20042 {
20043 might_fault();
20044+
20045+ if ((long)n < 0)
20046+ return n;
20047+
20048 if (__builtin_constant_p(n)) {
20049 unsigned long ret;
20050
20051@@ -181,7 +200,10 @@ static __always_inline unsigned long
20052 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20053 unsigned long n)
20054 {
20055- return __copy_from_user_ll_nocache_nozero(to, from, n);
20056+ if ((long)n < 0)
20057+ return n;
20058+
20059+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20060 }
20061
20062 #endif /* _ASM_X86_UACCESS_32_H */
20063diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20064index f2f9b39..2ae1bf8 100644
20065--- a/arch/x86/include/asm/uaccess_64.h
20066+++ b/arch/x86/include/asm/uaccess_64.h
20067@@ -10,6 +10,9 @@
20068 #include <asm/alternative.h>
20069 #include <asm/cpufeature.h>
20070 #include <asm/page.h>
20071+#include <asm/pgtable.h>
20072+
20073+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20074
20075 /*
20076 * Copy To/From Userspace
20077@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20078 __must_check unsigned long
20079 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20080
20081-static __always_inline __must_check unsigned long
20082-copy_user_generic(void *to, const void *from, unsigned len)
20083+static __always_inline __must_check __size_overflow(3) unsigned long
20084+copy_user_generic(void *to, const void *from, unsigned long len)
20085 {
20086 unsigned ret;
20087
20088@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20089 }
20090
20091 __must_check unsigned long
20092-copy_in_user(void __user *to, const void __user *from, unsigned len);
20093+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20094
20095 static __always_inline __must_check
20096-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20097+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20098 {
20099- int ret = 0;
20100+ size_t sz = __compiletime_object_size(dst);
20101+ unsigned ret = 0;
20102+
20103+ if (size > INT_MAX)
20104+ return size;
20105+
20106+ check_object_size(dst, size, false);
20107+
20108+#ifdef CONFIG_PAX_MEMORY_UDEREF
20109+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20110+ return size;
20111+#endif
20112+
20113+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20114+ if(__builtin_constant_p(size))
20115+ copy_from_user_overflow();
20116+ else
20117+ __copy_from_user_overflow(sz, size);
20118+ return size;
20119+ }
20120
20121 if (!__builtin_constant_p(size))
20122- return copy_user_generic(dst, (__force void *)src, size);
20123+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20124 switch (size) {
20125- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20126+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20127 ret, "b", "b", "=q", 1);
20128 return ret;
20129- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20130+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20131 ret, "w", "w", "=r", 2);
20132 return ret;
20133- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20134+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20135 ret, "l", "k", "=r", 4);
20136 return ret;
20137- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20138+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20139 ret, "q", "", "=r", 8);
20140 return ret;
20141 case 10:
20142- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20143+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20144 ret, "q", "", "=r", 10);
20145 if (unlikely(ret))
20146 return ret;
20147 __get_user_asm(*(u16 *)(8 + (char *)dst),
20148- (u16 __user *)(8 + (char __user *)src),
20149+ (const u16 __user *)(8 + (const char __user *)src),
20150 ret, "w", "w", "=r", 2);
20151 return ret;
20152 case 16:
20153- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20154+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20155 ret, "q", "", "=r", 16);
20156 if (unlikely(ret))
20157 return ret;
20158 __get_user_asm(*(u64 *)(8 + (char *)dst),
20159- (u64 __user *)(8 + (char __user *)src),
20160+ (const u64 __user *)(8 + (const char __user *)src),
20161 ret, "q", "", "=r", 8);
20162 return ret;
20163 default:
20164- return copy_user_generic(dst, (__force void *)src, size);
20165+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20166 }
20167 }
20168
20169 static __always_inline __must_check
20170-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20171+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20172 {
20173 might_fault();
20174 return __copy_from_user_nocheck(dst, src, size);
20175 }
20176
20177 static __always_inline __must_check
20178-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20179+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20180 {
20181- int ret = 0;
20182+ size_t sz = __compiletime_object_size(src);
20183+ unsigned ret = 0;
20184+
20185+ if (size > INT_MAX)
20186+ return size;
20187+
20188+ check_object_size(src, size, true);
20189+
20190+#ifdef CONFIG_PAX_MEMORY_UDEREF
20191+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20192+ return size;
20193+#endif
20194+
20195+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20196+ if(__builtin_constant_p(size))
20197+ copy_to_user_overflow();
20198+ else
20199+ __copy_to_user_overflow(sz, size);
20200+ return size;
20201+ }
20202
20203 if (!__builtin_constant_p(size))
20204- return copy_user_generic((__force void *)dst, src, size);
20205+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20206 switch (size) {
20207- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20208+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20209 ret, "b", "b", "iq", 1);
20210 return ret;
20211- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20212+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20213 ret, "w", "w", "ir", 2);
20214 return ret;
20215- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20216+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20217 ret, "l", "k", "ir", 4);
20218 return ret;
20219- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20220+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20221 ret, "q", "", "er", 8);
20222 return ret;
20223 case 10:
20224- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20225+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20226 ret, "q", "", "er", 10);
20227 if (unlikely(ret))
20228 return ret;
20229 asm("":::"memory");
20230- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20231+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20232 ret, "w", "w", "ir", 2);
20233 return ret;
20234 case 16:
20235- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20236+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20237 ret, "q", "", "er", 16);
20238 if (unlikely(ret))
20239 return ret;
20240 asm("":::"memory");
20241- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20242+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20243 ret, "q", "", "er", 8);
20244 return ret;
20245 default:
20246- return copy_user_generic((__force void *)dst, src, size);
20247+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20248 }
20249 }
20250
20251 static __always_inline __must_check
20252-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20253+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20254 {
20255 might_fault();
20256 return __copy_to_user_nocheck(dst, src, size);
20257 }
20258
20259 static __always_inline __must_check
20260-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20261+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20262 {
20263- int ret = 0;
20264+ unsigned ret = 0;
20265
20266 might_fault();
20267+
20268+ if (size > INT_MAX)
20269+ return size;
20270+
20271+#ifdef CONFIG_PAX_MEMORY_UDEREF
20272+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20273+ return size;
20274+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20275+ return size;
20276+#endif
20277+
20278 if (!__builtin_constant_p(size))
20279- return copy_user_generic((__force void *)dst,
20280- (__force void *)src, size);
20281+ return copy_user_generic((__force_kernel void *)____m(dst),
20282+ (__force_kernel const void *)____m(src), size);
20283 switch (size) {
20284 case 1: {
20285 u8 tmp;
20286- __get_user_asm(tmp, (u8 __user *)src,
20287+ __get_user_asm(tmp, (const u8 __user *)src,
20288 ret, "b", "b", "=q", 1);
20289 if (likely(!ret))
20290 __put_user_asm(tmp, (u8 __user *)dst,
20291@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20292 }
20293 case 2: {
20294 u16 tmp;
20295- __get_user_asm(tmp, (u16 __user *)src,
20296+ __get_user_asm(tmp, (const u16 __user *)src,
20297 ret, "w", "w", "=r", 2);
20298 if (likely(!ret))
20299 __put_user_asm(tmp, (u16 __user *)dst,
20300@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20301
20302 case 4: {
20303 u32 tmp;
20304- __get_user_asm(tmp, (u32 __user *)src,
20305+ __get_user_asm(tmp, (const u32 __user *)src,
20306 ret, "l", "k", "=r", 4);
20307 if (likely(!ret))
20308 __put_user_asm(tmp, (u32 __user *)dst,
20309@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20310 }
20311 case 8: {
20312 u64 tmp;
20313- __get_user_asm(tmp, (u64 __user *)src,
20314+ __get_user_asm(tmp, (const u64 __user *)src,
20315 ret, "q", "", "=r", 8);
20316 if (likely(!ret))
20317 __put_user_asm(tmp, (u64 __user *)dst,
20318@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20319 return ret;
20320 }
20321 default:
20322- return copy_user_generic((__force void *)dst,
20323- (__force void *)src, size);
20324+ return copy_user_generic((__force_kernel void *)____m(dst),
20325+ (__force_kernel const void *)____m(src), size);
20326 }
20327 }
20328
20329-static __must_check __always_inline int
20330-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20331+static __must_check __always_inline unsigned long
20332+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20333 {
20334 return __copy_from_user_nocheck(dst, src, size);
20335 }
20336
20337-static __must_check __always_inline int
20338-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20339+static __must_check __always_inline unsigned long
20340+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20341 {
20342 return __copy_to_user_nocheck(dst, src, size);
20343 }
20344
20345-extern long __copy_user_nocache(void *dst, const void __user *src,
20346- unsigned size, int zerorest);
20347+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20348+ unsigned long size, int zerorest);
20349
20350-static inline int
20351-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20352+static inline unsigned long
20353+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20354 {
20355 might_fault();
20356+
20357+ if (size > INT_MAX)
20358+ return size;
20359+
20360+#ifdef CONFIG_PAX_MEMORY_UDEREF
20361+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20362+ return size;
20363+#endif
20364+
20365 return __copy_user_nocache(dst, src, size, 1);
20366 }
20367
20368-static inline int
20369+static inline unsigned long
20370 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20371- unsigned size)
20372+ unsigned long size)
20373 {
20374+ if (size > INT_MAX)
20375+ return size;
20376+
20377+#ifdef CONFIG_PAX_MEMORY_UDEREF
20378+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20379+ return size;
20380+#endif
20381+
20382 return __copy_user_nocache(dst, src, size, 0);
20383 }
20384
20385 unsigned long
20386-copy_user_handle_tail(char *to, char *from, unsigned len);
20387+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20388
20389 #endif /* _ASM_X86_UACCESS_64_H */
20390diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20391index 5b238981..77fdd78 100644
20392--- a/arch/x86/include/asm/word-at-a-time.h
20393+++ b/arch/x86/include/asm/word-at-a-time.h
20394@@ -11,7 +11,7 @@
20395 * and shift, for example.
20396 */
20397 struct word_at_a_time {
20398- const unsigned long one_bits, high_bits;
20399+ unsigned long one_bits, high_bits;
20400 };
20401
20402 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20403diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20404index f58a9c7..dc378042a 100644
20405--- a/arch/x86/include/asm/x86_init.h
20406+++ b/arch/x86/include/asm/x86_init.h
20407@@ -129,7 +129,7 @@ struct x86_init_ops {
20408 struct x86_init_timers timers;
20409 struct x86_init_iommu iommu;
20410 struct x86_init_pci pci;
20411-};
20412+} __no_const;
20413
20414 /**
20415 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20416@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20417 void (*setup_percpu_clockev)(void);
20418 void (*early_percpu_clock_init)(void);
20419 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20420-};
20421+} __no_const;
20422
20423 struct timespec;
20424
20425@@ -168,7 +168,7 @@ struct x86_platform_ops {
20426 void (*save_sched_clock_state)(void);
20427 void (*restore_sched_clock_state)(void);
20428 void (*apic_post_init)(void);
20429-};
20430+} __no_const;
20431
20432 struct pci_dev;
20433 struct msi_msg;
20434@@ -182,7 +182,7 @@ struct x86_msi_ops {
20435 void (*teardown_msi_irqs)(struct pci_dev *dev);
20436 void (*restore_msi_irqs)(struct pci_dev *dev);
20437 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20438-};
20439+} __no_const;
20440
20441 struct IO_APIC_route_entry;
20442 struct io_apic_irq_attr;
20443@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20444 unsigned int destination, int vector,
20445 struct io_apic_irq_attr *attr);
20446 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20447-};
20448+} __no_const;
20449
20450 extern struct x86_init_ops x86_init;
20451 extern struct x86_cpuinit_ops x86_cpuinit;
20452diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20453index 358dcd3..23c0bf1 100644
20454--- a/arch/x86/include/asm/xen/page.h
20455+++ b/arch/x86/include/asm/xen/page.h
20456@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20457 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20458 * cases needing an extended handling.
20459 */
20460-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20461+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20462 {
20463 unsigned long mfn;
20464
20465diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20466index c9a6d68..cb57f42 100644
20467--- a/arch/x86/include/asm/xsave.h
20468+++ b/arch/x86/include/asm/xsave.h
20469@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20470 if (unlikely(err))
20471 return -EFAULT;
20472
20473+ pax_open_userland();
20474 __asm__ __volatile__(ASM_STAC "\n"
20475- "1:"XSAVE"\n"
20476+ "1:"
20477+ __copyuser_seg
20478+ XSAVE"\n"
20479 "2: " ASM_CLAC "\n"
20480 xstate_fault
20481 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20482 : "memory");
20483+ pax_close_userland();
20484 return err;
20485 }
20486
20487@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20488 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20489 {
20490 int err = 0;
20491- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20492+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20493 u32 lmask = mask;
20494 u32 hmask = mask >> 32;
20495
20496+ pax_open_userland();
20497 __asm__ __volatile__(ASM_STAC "\n"
20498- "1:"XRSTOR"\n"
20499+ "1:"
20500+ __copyuser_seg
20501+ XRSTOR"\n"
20502 "2: " ASM_CLAC "\n"
20503 xstate_fault
20504 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20505 : "memory"); /* memory required? */
20506+ pax_close_userland();
20507 return err;
20508 }
20509
20510diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20511index d993e33..8db1b18 100644
20512--- a/arch/x86/include/uapi/asm/e820.h
20513+++ b/arch/x86/include/uapi/asm/e820.h
20514@@ -58,7 +58,7 @@ struct e820map {
20515 #define ISA_START_ADDRESS 0xa0000
20516 #define ISA_END_ADDRESS 0x100000
20517
20518-#define BIOS_BEGIN 0x000a0000
20519+#define BIOS_BEGIN 0x000c0000
20520 #define BIOS_END 0x00100000
20521
20522 #define BIOS_ROM_BASE 0xffe00000
20523diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20524index 7b0a55a..ad115bf 100644
20525--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20526+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20527@@ -49,7 +49,6 @@
20528 #define EFLAGS 144
20529 #define RSP 152
20530 #define SS 160
20531-#define ARGOFFSET R11
20532 #endif /* __ASSEMBLY__ */
20533
20534 /* top of stack page */
20535diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20536index cdb1b70..426434c 100644
20537--- a/arch/x86/kernel/Makefile
20538+++ b/arch/x86/kernel/Makefile
20539@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20540 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20541 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20542 obj-y += probe_roms.o
20543-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20544+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20545 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20546 obj-$(CONFIG_X86_64) += mcount_64.o
20547 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20548diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20549index 803b684..68c64f1 100644
20550--- a/arch/x86/kernel/acpi/boot.c
20551+++ b/arch/x86/kernel/acpi/boot.c
20552@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20553 * If your system is blacklisted here, but you find that acpi=force
20554 * works for you, please contact linux-acpi@vger.kernel.org
20555 */
20556-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20557+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20558 /*
20559 * Boxes that need ACPI disabled
20560 */
20561@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20562 };
20563
20564 /* second table for DMI checks that should run after early-quirks */
20565-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20566+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20567 /*
20568 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20569 * which includes some code which overrides all temperature
20570diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20571index d1daead..acd77e2 100644
20572--- a/arch/x86/kernel/acpi/sleep.c
20573+++ b/arch/x86/kernel/acpi/sleep.c
20574@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20575 #else /* CONFIG_64BIT */
20576 #ifdef CONFIG_SMP
20577 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20578+
20579+ pax_open_kernel();
20580 early_gdt_descr.address =
20581 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20582+ pax_close_kernel();
20583+
20584 initial_gs = per_cpu_offset(smp_processor_id());
20585 #endif
20586 initial_code = (unsigned long)wakeup_long64;
20587diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20588index 665c6b7..eae4d56 100644
20589--- a/arch/x86/kernel/acpi/wakeup_32.S
20590+++ b/arch/x86/kernel/acpi/wakeup_32.S
20591@@ -29,13 +29,11 @@ wakeup_pmode_return:
20592 # and restore the stack ... but you need gdt for this to work
20593 movl saved_context_esp, %esp
20594
20595- movl %cs:saved_magic, %eax
20596- cmpl $0x12345678, %eax
20597+ cmpl $0x12345678, saved_magic
20598 jne bogus_magic
20599
20600 # jump to place where we left off
20601- movl saved_eip, %eax
20602- jmp *%eax
20603+ jmp *(saved_eip)
20604
20605 bogus_magic:
20606 jmp bogus_magic
20607diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20608index 703130f..27a155d 100644
20609--- a/arch/x86/kernel/alternative.c
20610+++ b/arch/x86/kernel/alternative.c
20611@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20612 */
20613 for (a = start; a < end; a++) {
20614 instr = (u8 *)&a->instr_offset + a->instr_offset;
20615+
20616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20617+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20618+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20619+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20620+#endif
20621+
20622 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20623 BUG_ON(a->replacementlen > a->instrlen);
20624 BUG_ON(a->instrlen > sizeof(insnbuf));
20625@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20626 add_nops(insnbuf + a->replacementlen,
20627 a->instrlen - a->replacementlen);
20628
20629+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20630+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20631+ instr = ktva_ktla(instr);
20632+#endif
20633+
20634 text_poke_early(instr, insnbuf, a->instrlen);
20635 }
20636 }
20637@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20638 for (poff = start; poff < end; poff++) {
20639 u8 *ptr = (u8 *)poff + *poff;
20640
20641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20642+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20643+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20644+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20645+#endif
20646+
20647 if (!*poff || ptr < text || ptr >= text_end)
20648 continue;
20649 /* turn DS segment override prefix into lock prefix */
20650- if (*ptr == 0x3e)
20651+ if (*ktla_ktva(ptr) == 0x3e)
20652 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20653 }
20654 mutex_unlock(&text_mutex);
20655@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20656 for (poff = start; poff < end; poff++) {
20657 u8 *ptr = (u8 *)poff + *poff;
20658
20659+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20660+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20661+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20662+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20663+#endif
20664+
20665 if (!*poff || ptr < text || ptr >= text_end)
20666 continue;
20667 /* turn lock prefix into DS segment override prefix */
20668- if (*ptr == 0xf0)
20669+ if (*ktla_ktva(ptr) == 0xf0)
20670 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20671 }
20672 mutex_unlock(&text_mutex);
20673@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20674
20675 BUG_ON(p->len > MAX_PATCH_LEN);
20676 /* prep the buffer with the original instructions */
20677- memcpy(insnbuf, p->instr, p->len);
20678+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20679 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20680 (unsigned long)p->instr, p->len);
20681
20682@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20683 if (!uniproc_patched || num_possible_cpus() == 1)
20684 free_init_pages("SMP alternatives",
20685 (unsigned long)__smp_locks,
20686- (unsigned long)__smp_locks_end);
20687+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20688 #endif
20689
20690 apply_paravirt(__parainstructions, __parainstructions_end);
20691@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20692 * instructions. And on the local CPU you need to be protected again NMI or MCE
20693 * handlers seeing an inconsistent instruction while you patch.
20694 */
20695-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20696+void *__kprobes text_poke_early(void *addr, const void *opcode,
20697 size_t len)
20698 {
20699 unsigned long flags;
20700 local_irq_save(flags);
20701- memcpy(addr, opcode, len);
20702+
20703+ pax_open_kernel();
20704+ memcpy(ktla_ktva(addr), opcode, len);
20705 sync_core();
20706+ pax_close_kernel();
20707+
20708 local_irq_restore(flags);
20709 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20710 that causes hangs on some VIA CPUs. */
20711@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20712 */
20713 void *text_poke(void *addr, const void *opcode, size_t len)
20714 {
20715- unsigned long flags;
20716- char *vaddr;
20717+ unsigned char *vaddr = ktla_ktva(addr);
20718 struct page *pages[2];
20719- int i;
20720+ size_t i;
20721
20722 if (!core_kernel_text((unsigned long)addr)) {
20723- pages[0] = vmalloc_to_page(addr);
20724- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20725+ pages[0] = vmalloc_to_page(vaddr);
20726+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20727 } else {
20728- pages[0] = virt_to_page(addr);
20729+ pages[0] = virt_to_page(vaddr);
20730 WARN_ON(!PageReserved(pages[0]));
20731- pages[1] = virt_to_page(addr + PAGE_SIZE);
20732+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20733 }
20734 BUG_ON(!pages[0]);
20735- local_irq_save(flags);
20736- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20737- if (pages[1])
20738- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20739- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20740- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20741- clear_fixmap(FIX_TEXT_POKE0);
20742- if (pages[1])
20743- clear_fixmap(FIX_TEXT_POKE1);
20744- local_flush_tlb();
20745- sync_core();
20746- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20747- that causes hangs on some VIA CPUs. */
20748+ text_poke_early(addr, opcode, len);
20749 for (i = 0; i < len; i++)
20750- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20751- local_irq_restore(flags);
20752+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20753 return addr;
20754 }
20755
20756@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20757 if (likely(!bp_patching_in_progress))
20758 return 0;
20759
20760- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20761+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20762 return 0;
20763
20764 /* set up the specified breakpoint handler */
20765@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20766 */
20767 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20768 {
20769- unsigned char int3 = 0xcc;
20770+ const unsigned char int3 = 0xcc;
20771
20772 bp_int3_handler = handler;
20773 bp_int3_addr = (u8 *)addr + sizeof(int3);
20774diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20775index ad3639a..bd4253c 100644
20776--- a/arch/x86/kernel/apic/apic.c
20777+++ b/arch/x86/kernel/apic/apic.c
20778@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20779 /*
20780 * Debug level, exported for io_apic.c
20781 */
20782-unsigned int apic_verbosity;
20783+int apic_verbosity;
20784
20785 int pic_mode;
20786
20787@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20788 apic_write(APIC_ESR, 0);
20789 v = apic_read(APIC_ESR);
20790 ack_APIC_irq();
20791- atomic_inc(&irq_err_count);
20792+ atomic_inc_unchecked(&irq_err_count);
20793
20794 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20795 smp_processor_id(), v);
20796diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20797index de918c4..32eed23 100644
20798--- a/arch/x86/kernel/apic/apic_flat_64.c
20799+++ b/arch/x86/kernel/apic/apic_flat_64.c
20800@@ -154,7 +154,7 @@ static int flat_probe(void)
20801 return 1;
20802 }
20803
20804-static struct apic apic_flat = {
20805+static struct apic apic_flat __read_only = {
20806 .name = "flat",
20807 .probe = flat_probe,
20808 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20809@@ -260,7 +260,7 @@ static int physflat_probe(void)
20810 return 0;
20811 }
20812
20813-static struct apic apic_physflat = {
20814+static struct apic apic_physflat __read_only = {
20815
20816 .name = "physical flat",
20817 .probe = physflat_probe,
20818diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20819index b205cdb..d8503ff 100644
20820--- a/arch/x86/kernel/apic/apic_noop.c
20821+++ b/arch/x86/kernel/apic/apic_noop.c
20822@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20823 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20824 }
20825
20826-struct apic apic_noop = {
20827+struct apic apic_noop __read_only = {
20828 .name = "noop",
20829 .probe = noop_probe,
20830 .acpi_madt_oem_check = NULL,
20831diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20832index c4a8d63..fe893ac 100644
20833--- a/arch/x86/kernel/apic/bigsmp_32.c
20834+++ b/arch/x86/kernel/apic/bigsmp_32.c
20835@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20836 return dmi_bigsmp;
20837 }
20838
20839-static struct apic apic_bigsmp = {
20840+static struct apic apic_bigsmp __read_only = {
20841
20842 .name = "bigsmp",
20843 .probe = probe_bigsmp,
20844diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20845index f4dc246..fbab133 100644
20846--- a/arch/x86/kernel/apic/io_apic.c
20847+++ b/arch/x86/kernel/apic/io_apic.c
20848@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20849 return ret;
20850 }
20851
20852-atomic_t irq_mis_count;
20853+atomic_unchecked_t irq_mis_count;
20854
20855 #ifdef CONFIG_GENERIC_PENDING_IRQ
20856 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20857@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20858 * at the cpu.
20859 */
20860 if (!(v & (1 << (i & 0x1f)))) {
20861- atomic_inc(&irq_mis_count);
20862+ atomic_inc_unchecked(&irq_mis_count);
20863
20864 eoi_ioapic_irq(irq, cfg);
20865 }
20866@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20867 ioapic_irqd_unmask(data, cfg, masked);
20868 }
20869
20870-static struct irq_chip ioapic_chip __read_mostly = {
20871+static struct irq_chip ioapic_chip = {
20872 .name = "IO-APIC",
20873 .irq_startup = startup_ioapic_irq,
20874 .irq_mask = mask_ioapic_irq,
20875@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20876 ack_APIC_irq();
20877 }
20878
20879-static struct irq_chip lapic_chip __read_mostly = {
20880+static struct irq_chip lapic_chip = {
20881 .name = "local-APIC",
20882 .irq_mask = mask_lapic_irq,
20883 .irq_unmask = unmask_lapic_irq,
20884diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20885index bda4886..f9c7195 100644
20886--- a/arch/x86/kernel/apic/probe_32.c
20887+++ b/arch/x86/kernel/apic/probe_32.c
20888@@ -72,7 +72,7 @@ static int probe_default(void)
20889 return 1;
20890 }
20891
20892-static struct apic apic_default = {
20893+static struct apic apic_default __read_only = {
20894
20895 .name = "default",
20896 .probe = probe_default,
20897diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20898index 6cedd79..023ff8e 100644
20899--- a/arch/x86/kernel/apic/vector.c
20900+++ b/arch/x86/kernel/apic/vector.c
20901@@ -21,7 +21,7 @@
20902
20903 static DEFINE_RAW_SPINLOCK(vector_lock);
20904
20905-void lock_vector_lock(void)
20906+void lock_vector_lock(void) __acquires(vector_lock)
20907 {
20908 /* Used to the online set of cpus does not change
20909 * during assign_irq_vector.
20910@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20911 raw_spin_lock(&vector_lock);
20912 }
20913
20914-void unlock_vector_lock(void)
20915+void unlock_vector_lock(void) __releases(vector_lock)
20916 {
20917 raw_spin_unlock(&vector_lock);
20918 }
20919diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20920index e658f21..b695a1a 100644
20921--- a/arch/x86/kernel/apic/x2apic_cluster.c
20922+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20923@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20924 return notifier_from_errno(err);
20925 }
20926
20927-static struct notifier_block __refdata x2apic_cpu_notifier = {
20928+static struct notifier_block x2apic_cpu_notifier = {
20929 .notifier_call = update_clusterinfo,
20930 };
20931
20932@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20933 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20934 }
20935
20936-static struct apic apic_x2apic_cluster = {
20937+static struct apic apic_x2apic_cluster __read_only = {
20938
20939 .name = "cluster x2apic",
20940 .probe = x2apic_cluster_probe,
20941diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20942index 6fae733..5ca17af 100644
20943--- a/arch/x86/kernel/apic/x2apic_phys.c
20944+++ b/arch/x86/kernel/apic/x2apic_phys.c
20945@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20946 return apic == &apic_x2apic_phys;
20947 }
20948
20949-static struct apic apic_x2apic_phys = {
20950+static struct apic apic_x2apic_phys __read_only = {
20951
20952 .name = "physical x2apic",
20953 .probe = x2apic_phys_probe,
20954diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20955index 8e9dcfd..c61b3e4 100644
20956--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20957+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20958@@ -348,7 +348,7 @@ static int uv_probe(void)
20959 return apic == &apic_x2apic_uv_x;
20960 }
20961
20962-static struct apic __refdata apic_x2apic_uv_x = {
20963+static struct apic apic_x2apic_uv_x __read_only = {
20964
20965 .name = "UV large system",
20966 .probe = uv_probe,
20967diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20968index 927ec92..de68f32 100644
20969--- a/arch/x86/kernel/apm_32.c
20970+++ b/arch/x86/kernel/apm_32.c
20971@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20972 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20973 * even though they are called in protected mode.
20974 */
20975-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20976+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20977 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20978
20979 static const char driver_version[] = "1.16ac"; /* no spaces */
20980@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20981 BUG_ON(cpu != 0);
20982 gdt = get_cpu_gdt_table(cpu);
20983 save_desc_40 = gdt[0x40 / 8];
20984+
20985+ pax_open_kernel();
20986 gdt[0x40 / 8] = bad_bios_desc;
20987+ pax_close_kernel();
20988
20989 apm_irq_save(flags);
20990 APM_DO_SAVE_SEGS;
20991@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20992 &call->esi);
20993 APM_DO_RESTORE_SEGS;
20994 apm_irq_restore(flags);
20995+
20996+ pax_open_kernel();
20997 gdt[0x40 / 8] = save_desc_40;
20998+ pax_close_kernel();
20999+
21000 put_cpu();
21001
21002 return call->eax & 0xff;
21003@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21004 BUG_ON(cpu != 0);
21005 gdt = get_cpu_gdt_table(cpu);
21006 save_desc_40 = gdt[0x40 / 8];
21007+
21008+ pax_open_kernel();
21009 gdt[0x40 / 8] = bad_bios_desc;
21010+ pax_close_kernel();
21011
21012 apm_irq_save(flags);
21013 APM_DO_SAVE_SEGS;
21014@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21015 &call->eax);
21016 APM_DO_RESTORE_SEGS;
21017 apm_irq_restore(flags);
21018+
21019+ pax_open_kernel();
21020 gdt[0x40 / 8] = save_desc_40;
21021+ pax_close_kernel();
21022+
21023 put_cpu();
21024 return error;
21025 }
21026@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21027 return 0;
21028 }
21029
21030-static struct dmi_system_id __initdata apm_dmi_table[] = {
21031+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21032 {
21033 print_if_true,
21034 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21035@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21036 * code to that CPU.
21037 */
21038 gdt = get_cpu_gdt_table(0);
21039+
21040+ pax_open_kernel();
21041 set_desc_base(&gdt[APM_CS >> 3],
21042 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21043 set_desc_base(&gdt[APM_CS_16 >> 3],
21044 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21045 set_desc_base(&gdt[APM_DS >> 3],
21046 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21047+ pax_close_kernel();
21048
21049 proc_create("apm", 0, NULL, &apm_file_ops);
21050
21051diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21052index 9f6b934..cf5ffb3 100644
21053--- a/arch/x86/kernel/asm-offsets.c
21054+++ b/arch/x86/kernel/asm-offsets.c
21055@@ -32,6 +32,8 @@ void common(void) {
21056 OFFSET(TI_flags, thread_info, flags);
21057 OFFSET(TI_status, thread_info, status);
21058 OFFSET(TI_addr_limit, thread_info, addr_limit);
21059+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21060+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21061
21062 BLANK();
21063 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21064@@ -52,8 +54,26 @@ void common(void) {
21065 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21066 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21067 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21068+
21069+#ifdef CONFIG_PAX_KERNEXEC
21070+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21071 #endif
21072
21073+#ifdef CONFIG_PAX_MEMORY_UDEREF
21074+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21075+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21076+#ifdef CONFIG_X86_64
21077+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21078+#endif
21079+#endif
21080+
21081+#endif
21082+
21083+ BLANK();
21084+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21085+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21086+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21087+
21088 #ifdef CONFIG_XEN
21089 BLANK();
21090 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21091diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21092index fdcbb4d..036dd93 100644
21093--- a/arch/x86/kernel/asm-offsets_64.c
21094+++ b/arch/x86/kernel/asm-offsets_64.c
21095@@ -80,6 +80,7 @@ int main(void)
21096 BLANK();
21097 #undef ENTRY
21098
21099+ DEFINE(TSS_size, sizeof(struct tss_struct));
21100 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21101 BLANK();
21102
21103diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21104index 80091ae..0c5184f 100644
21105--- a/arch/x86/kernel/cpu/Makefile
21106+++ b/arch/x86/kernel/cpu/Makefile
21107@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21108 CFLAGS_REMOVE_perf_event.o = -pg
21109 endif
21110
21111-# Make sure load_percpu_segment has no stackprotector
21112-nostackp := $(call cc-option, -fno-stack-protector)
21113-CFLAGS_common.o := $(nostackp)
21114-
21115 obj-y := intel_cacheinfo.o scattered.o topology.o
21116 obj-y += common.o
21117 obj-y += rdrand.o
21118diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21119index a220239..607fc38 100644
21120--- a/arch/x86/kernel/cpu/amd.c
21121+++ b/arch/x86/kernel/cpu/amd.c
21122@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21123 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21124 {
21125 /* AMD errata T13 (order #21922) */
21126- if ((c->x86 == 6)) {
21127+ if (c->x86 == 6) {
21128 /* Duron Rev A0 */
21129 if (c->x86_model == 3 && c->x86_mask == 0)
21130 size = 64;
21131diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21132index 2346c95..c061472 100644
21133--- a/arch/x86/kernel/cpu/common.c
21134+++ b/arch/x86/kernel/cpu/common.c
21135@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21136
21137 static const struct cpu_dev *this_cpu = &default_cpu;
21138
21139-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21140-#ifdef CONFIG_X86_64
21141- /*
21142- * We need valid kernel segments for data and code in long mode too
21143- * IRET will check the segment types kkeil 2000/10/28
21144- * Also sysret mandates a special GDT layout
21145- *
21146- * TLS descriptors are currently at a different place compared to i386.
21147- * Hopefully nobody expects them at a fixed place (Wine?)
21148- */
21149- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21150- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21151- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21152- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21153- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21154- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21155-#else
21156- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21157- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21158- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21159- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21160- /*
21161- * Segments used for calling PnP BIOS have byte granularity.
21162- * They code segments and data segments have fixed 64k limits,
21163- * the transfer segment sizes are set at run time.
21164- */
21165- /* 32-bit code */
21166- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21167- /* 16-bit code */
21168- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21169- /* 16-bit data */
21170- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21171- /* 16-bit data */
21172- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21173- /* 16-bit data */
21174- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21175- /*
21176- * The APM segments have byte granularity and their bases
21177- * are set at run time. All have 64k limits.
21178- */
21179- /* 32-bit code */
21180- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21181- /* 16-bit code */
21182- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21183- /* data */
21184- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21185-
21186- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21187- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21188- GDT_STACK_CANARY_INIT
21189-#endif
21190-} };
21191-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21192-
21193 static int __init x86_xsave_setup(char *s)
21194 {
21195 if (strlen(s))
21196@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21197 }
21198 }
21199
21200+#ifdef CONFIG_X86_64
21201+static __init int setup_disable_pcid(char *arg)
21202+{
21203+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21204+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21205+
21206+#ifdef CONFIG_PAX_MEMORY_UDEREF
21207+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21208+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21209+#endif
21210+
21211+ return 1;
21212+}
21213+__setup("nopcid", setup_disable_pcid);
21214+
21215+static void setup_pcid(struct cpuinfo_x86 *c)
21216+{
21217+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21218+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21219+
21220+#ifdef CONFIG_PAX_MEMORY_UDEREF
21221+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21222+ pax_open_kernel();
21223+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21224+ pax_close_kernel();
21225+ printk("PAX: slow and weak UDEREF enabled\n");
21226+ } else
21227+ printk("PAX: UDEREF disabled\n");
21228+#endif
21229+
21230+ return;
21231+ }
21232+
21233+ printk("PAX: PCID detected\n");
21234+ cr4_set_bits(X86_CR4_PCIDE);
21235+
21236+#ifdef CONFIG_PAX_MEMORY_UDEREF
21237+ pax_open_kernel();
21238+ clone_pgd_mask = ~(pgdval_t)0UL;
21239+ pax_close_kernel();
21240+ if (pax_user_shadow_base)
21241+ printk("PAX: weak UDEREF enabled\n");
21242+ else {
21243+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21244+ printk("PAX: strong UDEREF enabled\n");
21245+ }
21246+#endif
21247+
21248+ if (cpu_has(c, X86_FEATURE_INVPCID))
21249+ printk("PAX: INVPCID detected\n");
21250+}
21251+#endif
21252+
21253 /*
21254 * Some CPU features depend on higher CPUID levels, which may not always
21255 * be available due to CPUID level capping or broken virtualization
21256@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21257 {
21258 struct desc_ptr gdt_descr;
21259
21260- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21261+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21262 gdt_descr.size = GDT_SIZE - 1;
21263 load_gdt(&gdt_descr);
21264 /* Reload the per-cpu base */
21265@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21266 setup_smep(c);
21267 setup_smap(c);
21268
21269+#ifdef CONFIG_X86_32
21270+#ifdef CONFIG_PAX_PAGEEXEC
21271+ if (!(__supported_pte_mask & _PAGE_NX))
21272+ clear_cpu_cap(c, X86_FEATURE_PSE);
21273+#endif
21274+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21275+ clear_cpu_cap(c, X86_FEATURE_SEP);
21276+#endif
21277+#endif
21278+
21279+#ifdef CONFIG_X86_64
21280+ setup_pcid(c);
21281+#endif
21282+
21283 /*
21284 * The vendor-specific functions might have changed features.
21285 * Now we do "generic changes."
21286@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21287 void enable_sep_cpu(void)
21288 {
21289 int cpu = get_cpu();
21290- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21291+ struct tss_struct *tss = init_tss + cpu;
21292
21293 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21294 put_cpu();
21295@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21296 }
21297 __setup("clearcpuid=", setup_disablecpuid);
21298
21299+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21300+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21301+
21302 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21303- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21304+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21305 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21306
21307 #ifdef CONFIG_X86_64
21308-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21309-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21310- (unsigned long) debug_idt_table };
21311+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21312+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21313
21314 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21315 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21316@@ -1307,7 +1322,7 @@ void cpu_init(void)
21317 */
21318 load_ucode_ap();
21319
21320- t = &per_cpu(init_tss, cpu);
21321+ t = init_tss + cpu;
21322 oist = &per_cpu(orig_ist, cpu);
21323
21324 #ifdef CONFIG_NUMA
21325@@ -1339,7 +1354,6 @@ void cpu_init(void)
21326 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21327 barrier();
21328
21329- x86_configure_nx();
21330 x2apic_setup();
21331
21332 /*
21333@@ -1391,7 +1405,7 @@ void cpu_init(void)
21334 {
21335 int cpu = smp_processor_id();
21336 struct task_struct *curr = current;
21337- struct tss_struct *t = &per_cpu(init_tss, cpu);
21338+ struct tss_struct *t = init_tss + cpu;
21339 struct thread_struct *thread = &curr->thread;
21340
21341 wait_for_master_cpu(cpu);
21342diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21343index 6596433..1ad6eaf 100644
21344--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21345+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21346@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21347 };
21348
21349 #ifdef CONFIG_AMD_NB
21350+static struct attribute *default_attrs_amd_nb[] = {
21351+ &type.attr,
21352+ &level.attr,
21353+ &coherency_line_size.attr,
21354+ &physical_line_partition.attr,
21355+ &ways_of_associativity.attr,
21356+ &number_of_sets.attr,
21357+ &size.attr,
21358+ &shared_cpu_map.attr,
21359+ &shared_cpu_list.attr,
21360+ NULL,
21361+ NULL,
21362+ NULL,
21363+ NULL
21364+};
21365+
21366 static struct attribute **amd_l3_attrs(void)
21367 {
21368 static struct attribute **attrs;
21369@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21370
21371 n = ARRAY_SIZE(default_attrs);
21372
21373- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21374- n += 2;
21375-
21376- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21377- n += 1;
21378-
21379- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21380- if (attrs == NULL)
21381- return attrs = default_attrs;
21382-
21383- for (n = 0; default_attrs[n]; n++)
21384- attrs[n] = default_attrs[n];
21385+ attrs = default_attrs_amd_nb;
21386
21387 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21388 attrs[n++] = &cache_disable_0.attr;
21389@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21390 .default_attrs = default_attrs,
21391 };
21392
21393+#ifdef CONFIG_AMD_NB
21394+static struct kobj_type ktype_cache_amd_nb = {
21395+ .sysfs_ops = &sysfs_ops,
21396+ .default_attrs = default_attrs_amd_nb,
21397+};
21398+#endif
21399+
21400 static struct kobj_type ktype_percpu_entry = {
21401 .sysfs_ops = &sysfs_ops,
21402 };
21403@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21404 return retval;
21405 }
21406
21407+#ifdef CONFIG_AMD_NB
21408+ amd_l3_attrs();
21409+#endif
21410+
21411 for (i = 0; i < num_cache_leaves; i++) {
21412+ struct kobj_type *ktype;
21413+
21414 this_object = INDEX_KOBJECT_PTR(cpu, i);
21415 this_object->cpu = cpu;
21416 this_object->index = i;
21417
21418 this_leaf = CPUID4_INFO_IDX(cpu, i);
21419
21420- ktype_cache.default_attrs = default_attrs;
21421+ ktype = &ktype_cache;
21422 #ifdef CONFIG_AMD_NB
21423 if (this_leaf->base.nb)
21424- ktype_cache.default_attrs = amd_l3_attrs();
21425+ ktype = &ktype_cache_amd_nb;
21426 #endif
21427 retval = kobject_init_and_add(&(this_object->kobj),
21428- &ktype_cache,
21429+ ktype,
21430 per_cpu(ici_cache_kobject, cpu),
21431 "index%1lu", i);
21432 if (unlikely(retval)) {
21433diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21434index 3c036cb..3b5677d 100644
21435--- a/arch/x86/kernel/cpu/mcheck/mce.c
21436+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21437@@ -47,6 +47,7 @@
21438 #include <asm/tlbflush.h>
21439 #include <asm/mce.h>
21440 #include <asm/msr.h>
21441+#include <asm/local.h>
21442
21443 #include "mce-internal.h"
21444
21445@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21446 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21447 m->cs, m->ip);
21448
21449- if (m->cs == __KERNEL_CS)
21450+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21451 print_symbol("{%s}", m->ip);
21452 pr_cont("\n");
21453 }
21454@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21455
21456 #define PANIC_TIMEOUT 5 /* 5 seconds */
21457
21458-static atomic_t mce_panicked;
21459+static atomic_unchecked_t mce_panicked;
21460
21461 static int fake_panic;
21462-static atomic_t mce_fake_panicked;
21463+static atomic_unchecked_t mce_fake_panicked;
21464
21465 /* Panic in progress. Enable interrupts and wait for final IPI */
21466 static void wait_for_panic(void)
21467@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21468 /*
21469 * Make sure only one CPU runs in machine check panic
21470 */
21471- if (atomic_inc_return(&mce_panicked) > 1)
21472+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21473 wait_for_panic();
21474 barrier();
21475
21476@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21477 console_verbose();
21478 } else {
21479 /* Don't log too much for fake panic */
21480- if (atomic_inc_return(&mce_fake_panicked) > 1)
21481+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21482 return;
21483 }
21484 /* First print corrected ones that are still unlogged */
21485@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21486 if (!fake_panic) {
21487 if (panic_timeout == 0)
21488 panic_timeout = mca_cfg.panic_timeout;
21489- panic(msg);
21490+ panic("%s", msg);
21491 } else
21492 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21493 }
21494@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21495 * might have been modified by someone else.
21496 */
21497 rmb();
21498- if (atomic_read(&mce_panicked))
21499+ if (atomic_read_unchecked(&mce_panicked))
21500 wait_for_panic();
21501 if (!mca_cfg.monarch_timeout)
21502 goto out;
21503@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21504 }
21505
21506 /* Call the installed machine check handler for this CPU setup. */
21507-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21508+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21509 unexpected_machine_check;
21510
21511 /*
21512@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21513 return;
21514 }
21515
21516+ pax_open_kernel();
21517 machine_check_vector = do_machine_check;
21518+ pax_close_kernel();
21519
21520 __mcheck_cpu_init_generic();
21521 __mcheck_cpu_init_vendor(c);
21522@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21523 */
21524
21525 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21526-static int mce_chrdev_open_count; /* #times opened */
21527+static local_t mce_chrdev_open_count; /* #times opened */
21528 static int mce_chrdev_open_exclu; /* already open exclusive? */
21529
21530 static int mce_chrdev_open(struct inode *inode, struct file *file)
21531@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21532 spin_lock(&mce_chrdev_state_lock);
21533
21534 if (mce_chrdev_open_exclu ||
21535- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21536+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21537 spin_unlock(&mce_chrdev_state_lock);
21538
21539 return -EBUSY;
21540@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21541
21542 if (file->f_flags & O_EXCL)
21543 mce_chrdev_open_exclu = 1;
21544- mce_chrdev_open_count++;
21545+ local_inc(&mce_chrdev_open_count);
21546
21547 spin_unlock(&mce_chrdev_state_lock);
21548
21549@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21550 {
21551 spin_lock(&mce_chrdev_state_lock);
21552
21553- mce_chrdev_open_count--;
21554+ local_dec(&mce_chrdev_open_count);
21555 mce_chrdev_open_exclu = 0;
21556
21557 spin_unlock(&mce_chrdev_state_lock);
21558@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21559
21560 for (i = 0; i < mca_cfg.banks; i++) {
21561 struct mce_bank *b = &mce_banks[i];
21562- struct device_attribute *a = &b->attr;
21563+ device_attribute_no_const *a = &b->attr;
21564
21565 sysfs_attr_init(&a->attr);
21566 a->attr.name = b->attrname;
21567@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21568 static void mce_reset(void)
21569 {
21570 cpu_missing = 0;
21571- atomic_set(&mce_fake_panicked, 0);
21572+ atomic_set_unchecked(&mce_fake_panicked, 0);
21573 atomic_set(&mce_executing, 0);
21574 atomic_set(&mce_callin, 0);
21575 atomic_set(&global_nwo, 0);
21576diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21577index 737b0ad..09ec66e 100644
21578--- a/arch/x86/kernel/cpu/mcheck/p5.c
21579+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21580@@ -12,6 +12,7 @@
21581 #include <asm/tlbflush.h>
21582 #include <asm/mce.h>
21583 #include <asm/msr.h>
21584+#include <asm/pgtable.h>
21585
21586 /* By default disabled */
21587 int mce_p5_enabled __read_mostly;
21588@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21589 if (!cpu_has(c, X86_FEATURE_MCE))
21590 return;
21591
21592+ pax_open_kernel();
21593 machine_check_vector = pentium_machine_check;
21594+ pax_close_kernel();
21595 /* Make sure the vector pointer is visible before we enable MCEs: */
21596 wmb();
21597
21598diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21599index 44f1382..315b292 100644
21600--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21601+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21602@@ -11,6 +11,7 @@
21603 #include <asm/tlbflush.h>
21604 #include <asm/mce.h>
21605 #include <asm/msr.h>
21606+#include <asm/pgtable.h>
21607
21608 /* Machine check handler for WinChip C6: */
21609 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21610@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21611 {
21612 u32 lo, hi;
21613
21614+ pax_open_kernel();
21615 machine_check_vector = winchip_machine_check;
21616+ pax_close_kernel();
21617 /* Make sure the vector pointer is visible before we enable MCEs: */
21618 wmb();
21619
21620diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21621index 36a8361..e7058c2 100644
21622--- a/arch/x86/kernel/cpu/microcode/core.c
21623+++ b/arch/x86/kernel/cpu/microcode/core.c
21624@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21625 return NOTIFY_OK;
21626 }
21627
21628-static struct notifier_block __refdata mc_cpu_notifier = {
21629+static struct notifier_block mc_cpu_notifier = {
21630 .notifier_call = mc_cpu_callback,
21631 };
21632
21633diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21634index 746e7fd..8dc677e 100644
21635--- a/arch/x86/kernel/cpu/microcode/intel.c
21636+++ b/arch/x86/kernel/cpu/microcode/intel.c
21637@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21638
21639 static int get_ucode_user(void *to, const void *from, size_t n)
21640 {
21641- return copy_from_user(to, from, n);
21642+ return copy_from_user(to, (const void __force_user *)from, n);
21643 }
21644
21645 static enum ucode_state
21646 request_microcode_user(int cpu, const void __user *buf, size_t size)
21647 {
21648- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21649+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21650 }
21651
21652 static void microcode_fini_cpu(int cpu)
21653diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21654index ea5f363..cb0e905 100644
21655--- a/arch/x86/kernel/cpu/mtrr/main.c
21656+++ b/arch/x86/kernel/cpu/mtrr/main.c
21657@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21658 u64 size_or_mask, size_and_mask;
21659 static bool mtrr_aps_delayed_init;
21660
21661-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21662+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21663
21664 const struct mtrr_ops *mtrr_if;
21665
21666diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21667index df5e41f..816c719 100644
21668--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21669+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21670@@ -25,7 +25,7 @@ struct mtrr_ops {
21671 int (*validate_add_page)(unsigned long base, unsigned long size,
21672 unsigned int type);
21673 int (*have_wrcomb)(void);
21674-};
21675+} __do_const;
21676
21677 extern int generic_get_free_region(unsigned long base, unsigned long size,
21678 int replace_reg);
21679diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21680index b71a7f8..534af0e 100644
21681--- a/arch/x86/kernel/cpu/perf_event.c
21682+++ b/arch/x86/kernel/cpu/perf_event.c
21683@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21684
21685 }
21686
21687-static struct attribute_group x86_pmu_format_group = {
21688+static attribute_group_no_const x86_pmu_format_group = {
21689 .name = "format",
21690 .attrs = NULL,
21691 };
21692@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21693 NULL,
21694 };
21695
21696-static struct attribute_group x86_pmu_events_group = {
21697+static attribute_group_no_const x86_pmu_events_group = {
21698 .name = "events",
21699 .attrs = events_attr,
21700 };
21701@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21702 if (idx > GDT_ENTRIES)
21703 return 0;
21704
21705- desc = raw_cpu_ptr(gdt_page.gdt);
21706+ desc = get_cpu_gdt_table(smp_processor_id());
21707 }
21708
21709 return get_desc_base(desc + idx);
21710@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21711 break;
21712
21713 perf_callchain_store(entry, frame.return_address);
21714- fp = frame.next_frame;
21715+ fp = (const void __force_user *)frame.next_frame;
21716 }
21717 }
21718
21719diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21720index 97242a9..cf9c30e 100644
21721--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21722+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21723@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21724 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21725 {
21726 struct attribute **attrs;
21727- struct attribute_group *attr_group;
21728+ attribute_group_no_const *attr_group;
21729 int i = 0, j;
21730
21731 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21732diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21733index 2589906..1ca1000 100644
21734--- a/arch/x86/kernel/cpu/perf_event_intel.c
21735+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21736@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21737 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21738
21739 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21740- u64 capabilities;
21741+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21742
21743- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21744- x86_pmu.intel_cap.capabilities = capabilities;
21745+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21746+ x86_pmu.intel_cap.capabilities = capabilities;
21747 }
21748
21749 intel_ds_init();
21750diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21751index c4bb8b8..9f7384d 100644
21752--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21753+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21754@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21755 NULL,
21756 };
21757
21758-static struct attribute_group rapl_pmu_events_group = {
21759+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21760 .name = "events",
21761 .attrs = NULL, /* patched at runtime */
21762 };
21763diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21764index c635b8b..b78835e 100644
21765--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21766+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21767@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21768 static int __init uncore_type_init(struct intel_uncore_type *type)
21769 {
21770 struct intel_uncore_pmu *pmus;
21771- struct attribute_group *attr_group;
21772+ attribute_group_no_const *attr_group;
21773 struct attribute **attrs;
21774 int i, j;
21775
21776diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21777index 6c8c1e7..515b98a 100644
21778--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21779+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21780@@ -114,7 +114,7 @@ struct intel_uncore_box {
21781 struct uncore_event_desc {
21782 struct kobj_attribute attr;
21783 const char *config;
21784-};
21785+} __do_const;
21786
21787 ssize_t uncore_event_show(struct kobject *kobj,
21788 struct kobj_attribute *attr, char *buf);
21789diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21790index 83741a7..bd3507d 100644
21791--- a/arch/x86/kernel/cpuid.c
21792+++ b/arch/x86/kernel/cpuid.c
21793@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21794 return notifier_from_errno(err);
21795 }
21796
21797-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21798+static struct notifier_block cpuid_class_cpu_notifier =
21799 {
21800 .notifier_call = cpuid_class_cpu_callback,
21801 };
21802diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21803index aceb2f9..c76d3e3 100644
21804--- a/arch/x86/kernel/crash.c
21805+++ b/arch/x86/kernel/crash.c
21806@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21807 #ifdef CONFIG_X86_32
21808 struct pt_regs fixed_regs;
21809
21810- if (!user_mode_vm(regs)) {
21811+ if (!user_mode(regs)) {
21812 crash_fixup_ss_esp(&fixed_regs, regs);
21813 regs = &fixed_regs;
21814 }
21815diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21816index afa64ad..dce67dd 100644
21817--- a/arch/x86/kernel/crash_dump_64.c
21818+++ b/arch/x86/kernel/crash_dump_64.c
21819@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21820 return -ENOMEM;
21821
21822 if (userbuf) {
21823- if (copy_to_user(buf, vaddr + offset, csize)) {
21824+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21825 iounmap(vaddr);
21826 return -EFAULT;
21827 }
21828diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21829index f6dfd93..892ade4 100644
21830--- a/arch/x86/kernel/doublefault.c
21831+++ b/arch/x86/kernel/doublefault.c
21832@@ -12,7 +12,7 @@
21833
21834 #define DOUBLEFAULT_STACKSIZE (1024)
21835 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21836-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21837+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21838
21839 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21840
21841@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21842 unsigned long gdt, tss;
21843
21844 native_store_gdt(&gdt_desc);
21845- gdt = gdt_desc.address;
21846+ gdt = (unsigned long)gdt_desc.address;
21847
21848 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21849
21850@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21851 /* 0x2 bit is always set */
21852 .flags = X86_EFLAGS_SF | 0x2,
21853 .sp = STACK_START,
21854- .es = __USER_DS,
21855+ .es = __KERNEL_DS,
21856 .cs = __KERNEL_CS,
21857 .ss = __KERNEL_DS,
21858- .ds = __USER_DS,
21859+ .ds = __KERNEL_DS,
21860 .fs = __KERNEL_PERCPU,
21861
21862 .__cr3 = __pa_nodebug(swapper_pg_dir),
21863diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21864index cf3df1d..b637d9a 100644
21865--- a/arch/x86/kernel/dumpstack.c
21866+++ b/arch/x86/kernel/dumpstack.c
21867@@ -2,6 +2,9 @@
21868 * Copyright (C) 1991, 1992 Linus Torvalds
21869 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21870 */
21871+#ifdef CONFIG_GRKERNSEC_HIDESYM
21872+#define __INCLUDED_BY_HIDESYM 1
21873+#endif
21874 #include <linux/kallsyms.h>
21875 #include <linux/kprobes.h>
21876 #include <linux/uaccess.h>
21877@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21878
21879 void printk_address(unsigned long address)
21880 {
21881- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21882+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21883 }
21884
21885 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21886 static void
21887 print_ftrace_graph_addr(unsigned long addr, void *data,
21888 const struct stacktrace_ops *ops,
21889- struct thread_info *tinfo, int *graph)
21890+ struct task_struct *task, int *graph)
21891 {
21892- struct task_struct *task;
21893 unsigned long ret_addr;
21894 int index;
21895
21896 if (addr != (unsigned long)return_to_handler)
21897 return;
21898
21899- task = tinfo->task;
21900 index = task->curr_ret_stack;
21901
21902 if (!task->ret_stack || index < *graph)
21903@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21904 static inline void
21905 print_ftrace_graph_addr(unsigned long addr, void *data,
21906 const struct stacktrace_ops *ops,
21907- struct thread_info *tinfo, int *graph)
21908+ struct task_struct *task, int *graph)
21909 { }
21910 #endif
21911
21912@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21913 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21914 */
21915
21916-static inline int valid_stack_ptr(struct thread_info *tinfo,
21917- void *p, unsigned int size, void *end)
21918+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21919 {
21920- void *t = tinfo;
21921 if (end) {
21922 if (p < end && p >= (end-THREAD_SIZE))
21923 return 1;
21924@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21925 }
21926
21927 unsigned long
21928-print_context_stack(struct thread_info *tinfo,
21929+print_context_stack(struct task_struct *task, void *stack_start,
21930 unsigned long *stack, unsigned long bp,
21931 const struct stacktrace_ops *ops, void *data,
21932 unsigned long *end, int *graph)
21933 {
21934 struct stack_frame *frame = (struct stack_frame *)bp;
21935
21936- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21937+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21938 unsigned long addr;
21939
21940 addr = *stack;
21941@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21942 } else {
21943 ops->address(data, addr, 0);
21944 }
21945- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21946+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21947 }
21948 stack++;
21949 }
21950@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21951 EXPORT_SYMBOL_GPL(print_context_stack);
21952
21953 unsigned long
21954-print_context_stack_bp(struct thread_info *tinfo,
21955+print_context_stack_bp(struct task_struct *task, void *stack_start,
21956 unsigned long *stack, unsigned long bp,
21957 const struct stacktrace_ops *ops, void *data,
21958 unsigned long *end, int *graph)
21959@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21960 struct stack_frame *frame = (struct stack_frame *)bp;
21961 unsigned long *ret_addr = &frame->return_address;
21962
21963- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21964+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21965 unsigned long addr = *ret_addr;
21966
21967 if (!__kernel_text_address(addr))
21968@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21969 ops->address(data, addr, 1);
21970 frame = frame->next_frame;
21971 ret_addr = &frame->return_address;
21972- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21973+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21974 }
21975
21976 return (unsigned long)frame;
21977@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21978 static void print_trace_address(void *data, unsigned long addr, int reliable)
21979 {
21980 touch_nmi_watchdog();
21981- printk(data);
21982+ printk("%s", (char *)data);
21983 printk_stack_address(addr, reliable);
21984 }
21985
21986@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21987 EXPORT_SYMBOL_GPL(oops_begin);
21988 NOKPROBE_SYMBOL(oops_begin);
21989
21990+extern void gr_handle_kernel_exploit(void);
21991+
21992 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21993 {
21994 if (regs && kexec_should_crash(current))
21995@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21996 panic("Fatal exception in interrupt");
21997 if (panic_on_oops)
21998 panic("Fatal exception");
21999- do_exit(signr);
22000+
22001+ gr_handle_kernel_exploit();
22002+
22003+ do_group_exit(signr);
22004 }
22005 NOKPROBE_SYMBOL(oops_end);
22006
22007@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22008 print_modules();
22009 show_regs(regs);
22010 #ifdef CONFIG_X86_32
22011- if (user_mode_vm(regs)) {
22012+ if (user_mode(regs)) {
22013 sp = regs->sp;
22014 ss = regs->ss & 0xffff;
22015 } else {
22016@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22017 unsigned long flags = oops_begin();
22018 int sig = SIGSEGV;
22019
22020- if (!user_mode_vm(regs))
22021+ if (!user_mode(regs))
22022 report_bug(regs->ip, regs);
22023
22024 if (__die(str, regs, err))
22025diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22026index 5abd4cd..c65733b 100644
22027--- a/arch/x86/kernel/dumpstack_32.c
22028+++ b/arch/x86/kernel/dumpstack_32.c
22029@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22030 bp = stack_frame(task, regs);
22031
22032 for (;;) {
22033- struct thread_info *context;
22034+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22035 void *end_stack;
22036
22037 end_stack = is_hardirq_stack(stack, cpu);
22038 if (!end_stack)
22039 end_stack = is_softirq_stack(stack, cpu);
22040
22041- context = task_thread_info(task);
22042- bp = ops->walk_stack(context, stack, bp, ops, data,
22043+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22044 end_stack, &graph);
22045
22046 /* Stop if not on irq stack */
22047@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22048 int i;
22049
22050 show_regs_print_info(KERN_EMERG);
22051- __show_regs(regs, !user_mode_vm(regs));
22052+ __show_regs(regs, !user_mode(regs));
22053
22054 /*
22055 * When in-kernel, we also print out the stack and code at the
22056 * time of the fault..
22057 */
22058- if (!user_mode_vm(regs)) {
22059+ if (!user_mode(regs)) {
22060 unsigned int code_prologue = code_bytes * 43 / 64;
22061 unsigned int code_len = code_bytes;
22062 unsigned char c;
22063 u8 *ip;
22064+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22065
22066 pr_emerg("Stack:\n");
22067 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22068
22069 pr_emerg("Code:");
22070
22071- ip = (u8 *)regs->ip - code_prologue;
22072+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22073 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22074 /* try starting at IP */
22075- ip = (u8 *)regs->ip;
22076+ ip = (u8 *)regs->ip + cs_base;
22077 code_len = code_len - code_prologue + 1;
22078 }
22079 for (i = 0; i < code_len; i++, ip++) {
22080@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22081 pr_cont(" Bad EIP value.");
22082 break;
22083 }
22084- if (ip == (u8 *)regs->ip)
22085+ if (ip == (u8 *)regs->ip + cs_base)
22086 pr_cont(" <%02x>", c);
22087 else
22088 pr_cont(" %02x", c);
22089@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22090 {
22091 unsigned short ud2;
22092
22093+ ip = ktla_ktva(ip);
22094 if (ip < PAGE_OFFSET)
22095 return 0;
22096 if (probe_kernel_address((unsigned short *)ip, ud2))
22097@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22098
22099 return ud2 == 0x0b0f;
22100 }
22101+
22102+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22103+void pax_check_alloca(unsigned long size)
22104+{
22105+ unsigned long sp = (unsigned long)&sp, stack_left;
22106+
22107+ /* all kernel stacks are of the same size */
22108+ stack_left = sp & (THREAD_SIZE - 1);
22109+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22110+}
22111+EXPORT_SYMBOL(pax_check_alloca);
22112+#endif
22113diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22114index ff86f19..73eabf4 100644
22115--- a/arch/x86/kernel/dumpstack_64.c
22116+++ b/arch/x86/kernel/dumpstack_64.c
22117@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22118 const struct stacktrace_ops *ops, void *data)
22119 {
22120 const unsigned cpu = get_cpu();
22121- struct thread_info *tinfo;
22122 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22123 unsigned long dummy;
22124 unsigned used = 0;
22125 int graph = 0;
22126 int done = 0;
22127+ void *stack_start;
22128
22129 if (!task)
22130 task = current;
22131@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22132 * current stack address. If the stacks consist of nested
22133 * exceptions
22134 */
22135- tinfo = task_thread_info(task);
22136 while (!done) {
22137 unsigned long *stack_end;
22138 enum stack_type stype;
22139@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22140 if (ops->stack(data, id) < 0)
22141 break;
22142
22143- bp = ops->walk_stack(tinfo, stack, bp, ops,
22144+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22145 data, stack_end, &graph);
22146 ops->stack(data, "<EOE>");
22147 /*
22148@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22149 * second-to-last pointer (index -2 to end) in the
22150 * exception stack:
22151 */
22152+ if ((u16)stack_end[-1] != __KERNEL_DS)
22153+ goto out;
22154 stack = (unsigned long *) stack_end[-2];
22155 done = 0;
22156 break;
22157@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22158
22159 if (ops->stack(data, "IRQ") < 0)
22160 break;
22161- bp = ops->walk_stack(tinfo, stack, bp,
22162+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22163 ops, data, stack_end, &graph);
22164 /*
22165 * We link to the next stack (which would be
22166@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22167 /*
22168 * This handles the process stack:
22169 */
22170- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22171+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22172+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22173+out:
22174 put_cpu();
22175 }
22176 EXPORT_SYMBOL(dump_trace);
22177@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22178 {
22179 unsigned short ud2;
22180
22181- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22182+ if (probe_kernel_address((unsigned short *)ip, ud2))
22183 return 0;
22184
22185 return ud2 == 0x0b0f;
22186 }
22187+
22188+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22189+void pax_check_alloca(unsigned long size)
22190+{
22191+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22192+ unsigned cpu, used;
22193+ char *id;
22194+
22195+ /* check the process stack first */
22196+ stack_start = (unsigned long)task_stack_page(current);
22197+ stack_end = stack_start + THREAD_SIZE;
22198+ if (likely(stack_start <= sp && sp < stack_end)) {
22199+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22200+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22201+ return;
22202+ }
22203+
22204+ cpu = get_cpu();
22205+
22206+ /* check the irq stacks */
22207+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22208+ stack_start = stack_end - IRQ_STACK_SIZE;
22209+ if (stack_start <= sp && sp < stack_end) {
22210+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22211+ put_cpu();
22212+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22213+ return;
22214+ }
22215+
22216+ /* check the exception stacks */
22217+ used = 0;
22218+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22219+ stack_start = stack_end - EXCEPTION_STKSZ;
22220+ if (stack_end && stack_start <= sp && sp < stack_end) {
22221+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22222+ put_cpu();
22223+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22224+ return;
22225+ }
22226+
22227+ put_cpu();
22228+
22229+ /* unknown stack */
22230+ BUG();
22231+}
22232+EXPORT_SYMBOL(pax_check_alloca);
22233+#endif
22234diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22235index 46201de..ebffabf 100644
22236--- a/arch/x86/kernel/e820.c
22237+++ b/arch/x86/kernel/e820.c
22238@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22239
22240 static void early_panic(char *msg)
22241 {
22242- early_printk(msg);
22243- panic(msg);
22244+ early_printk("%s", msg);
22245+ panic("%s", msg);
22246 }
22247
22248 static int userdef __initdata;
22249diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22250index a62536a..8444df4 100644
22251--- a/arch/x86/kernel/early_printk.c
22252+++ b/arch/x86/kernel/early_printk.c
22253@@ -7,6 +7,7 @@
22254 #include <linux/pci_regs.h>
22255 #include <linux/pci_ids.h>
22256 #include <linux/errno.h>
22257+#include <linux/sched.h>
22258 #include <asm/io.h>
22259 #include <asm/processor.h>
22260 #include <asm/fcntl.h>
22261diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22262index 31e2d5b..b31c76d 100644
22263--- a/arch/x86/kernel/entry_32.S
22264+++ b/arch/x86/kernel/entry_32.S
22265@@ -177,13 +177,154 @@
22266 /*CFI_REL_OFFSET gs, PT_GS*/
22267 .endm
22268 .macro SET_KERNEL_GS reg
22269+
22270+#ifdef CONFIG_CC_STACKPROTECTOR
22271 movl $(__KERNEL_STACK_CANARY), \reg
22272+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22273+ movl $(__USER_DS), \reg
22274+#else
22275+ xorl \reg, \reg
22276+#endif
22277+
22278 movl \reg, %gs
22279 .endm
22280
22281 #endif /* CONFIG_X86_32_LAZY_GS */
22282
22283-.macro SAVE_ALL
22284+.macro pax_enter_kernel
22285+#ifdef CONFIG_PAX_KERNEXEC
22286+ call pax_enter_kernel
22287+#endif
22288+.endm
22289+
22290+.macro pax_exit_kernel
22291+#ifdef CONFIG_PAX_KERNEXEC
22292+ call pax_exit_kernel
22293+#endif
22294+.endm
22295+
22296+#ifdef CONFIG_PAX_KERNEXEC
22297+ENTRY(pax_enter_kernel)
22298+#ifdef CONFIG_PARAVIRT
22299+ pushl %eax
22300+ pushl %ecx
22301+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22302+ mov %eax, %esi
22303+#else
22304+ mov %cr0, %esi
22305+#endif
22306+ bts $16, %esi
22307+ jnc 1f
22308+ mov %cs, %esi
22309+ cmp $__KERNEL_CS, %esi
22310+ jz 3f
22311+ ljmp $__KERNEL_CS, $3f
22312+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22313+2:
22314+#ifdef CONFIG_PARAVIRT
22315+ mov %esi, %eax
22316+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22317+#else
22318+ mov %esi, %cr0
22319+#endif
22320+3:
22321+#ifdef CONFIG_PARAVIRT
22322+ popl %ecx
22323+ popl %eax
22324+#endif
22325+ ret
22326+ENDPROC(pax_enter_kernel)
22327+
22328+ENTRY(pax_exit_kernel)
22329+#ifdef CONFIG_PARAVIRT
22330+ pushl %eax
22331+ pushl %ecx
22332+#endif
22333+ mov %cs, %esi
22334+ cmp $__KERNEXEC_KERNEL_CS, %esi
22335+ jnz 2f
22336+#ifdef CONFIG_PARAVIRT
22337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22338+ mov %eax, %esi
22339+#else
22340+ mov %cr0, %esi
22341+#endif
22342+ btr $16, %esi
22343+ ljmp $__KERNEL_CS, $1f
22344+1:
22345+#ifdef CONFIG_PARAVIRT
22346+ mov %esi, %eax
22347+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22348+#else
22349+ mov %esi, %cr0
22350+#endif
22351+2:
22352+#ifdef CONFIG_PARAVIRT
22353+ popl %ecx
22354+ popl %eax
22355+#endif
22356+ ret
22357+ENDPROC(pax_exit_kernel)
22358+#endif
22359+
22360+ .macro pax_erase_kstack
22361+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22362+ call pax_erase_kstack
22363+#endif
22364+ .endm
22365+
22366+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22367+/*
22368+ * ebp: thread_info
22369+ */
22370+ENTRY(pax_erase_kstack)
22371+ pushl %edi
22372+ pushl %ecx
22373+ pushl %eax
22374+
22375+ mov TI_lowest_stack(%ebp), %edi
22376+ mov $-0xBEEF, %eax
22377+ std
22378+
22379+1: mov %edi, %ecx
22380+ and $THREAD_SIZE_asm - 1, %ecx
22381+ shr $2, %ecx
22382+ repne scasl
22383+ jecxz 2f
22384+
22385+ cmp $2*16, %ecx
22386+ jc 2f
22387+
22388+ mov $2*16, %ecx
22389+ repe scasl
22390+ jecxz 2f
22391+ jne 1b
22392+
22393+2: cld
22394+ or $2*4, %edi
22395+ mov %esp, %ecx
22396+ sub %edi, %ecx
22397+
22398+ cmp $THREAD_SIZE_asm, %ecx
22399+ jb 3f
22400+ ud2
22401+3:
22402+
22403+ shr $2, %ecx
22404+ rep stosl
22405+
22406+ mov TI_task_thread_sp0(%ebp), %edi
22407+ sub $128, %edi
22408+ mov %edi, TI_lowest_stack(%ebp)
22409+
22410+ popl %eax
22411+ popl %ecx
22412+ popl %edi
22413+ ret
22414+ENDPROC(pax_erase_kstack)
22415+#endif
22416+
22417+.macro __SAVE_ALL _DS
22418 cld
22419 PUSH_GS
22420 pushl_cfi %fs
22421@@ -206,7 +347,7 @@
22422 CFI_REL_OFFSET ecx, 0
22423 pushl_cfi %ebx
22424 CFI_REL_OFFSET ebx, 0
22425- movl $(__USER_DS), %edx
22426+ movl $\_DS, %edx
22427 movl %edx, %ds
22428 movl %edx, %es
22429 movl $(__KERNEL_PERCPU), %edx
22430@@ -214,6 +355,15 @@
22431 SET_KERNEL_GS %edx
22432 .endm
22433
22434+.macro SAVE_ALL
22435+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22436+ __SAVE_ALL __KERNEL_DS
22437+ pax_enter_kernel
22438+#else
22439+ __SAVE_ALL __USER_DS
22440+#endif
22441+.endm
22442+
22443 .macro RESTORE_INT_REGS
22444 popl_cfi %ebx
22445 CFI_RESTORE ebx
22446@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22447 popfl_cfi
22448 jmp syscall_exit
22449 CFI_ENDPROC
22450-END(ret_from_fork)
22451+ENDPROC(ret_from_fork)
22452
22453 ENTRY(ret_from_kernel_thread)
22454 CFI_STARTPROC
22455@@ -340,7 +490,15 @@ ret_from_intr:
22456 andl $SEGMENT_RPL_MASK, %eax
22457 #endif
22458 cmpl $USER_RPL, %eax
22459+
22460+#ifdef CONFIG_PAX_KERNEXEC
22461+ jae resume_userspace
22462+
22463+ pax_exit_kernel
22464+ jmp resume_kernel
22465+#else
22466 jb resume_kernel # not returning to v8086 or userspace
22467+#endif
22468
22469 ENTRY(resume_userspace)
22470 LOCKDEP_SYS_EXIT
22471@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22472 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22473 # int/exception return?
22474 jne work_pending
22475- jmp restore_all
22476-END(ret_from_exception)
22477+ jmp restore_all_pax
22478+ENDPROC(ret_from_exception)
22479
22480 #ifdef CONFIG_PREEMPT
22481 ENTRY(resume_kernel)
22482@@ -365,7 +523,7 @@ need_resched:
22483 jz restore_all
22484 call preempt_schedule_irq
22485 jmp need_resched
22486-END(resume_kernel)
22487+ENDPROC(resume_kernel)
22488 #endif
22489 CFI_ENDPROC
22490
22491@@ -395,30 +553,45 @@ sysenter_past_esp:
22492 /*CFI_REL_OFFSET cs, 0*/
22493 /*
22494 * Push current_thread_info()->sysenter_return to the stack.
22495- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22496- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22497 */
22498- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22499+ pushl_cfi $0
22500 CFI_REL_OFFSET eip, 0
22501
22502 pushl_cfi %eax
22503 SAVE_ALL
22504+ GET_THREAD_INFO(%ebp)
22505+ movl TI_sysenter_return(%ebp),%ebp
22506+ movl %ebp,PT_EIP(%esp)
22507 ENABLE_INTERRUPTS(CLBR_NONE)
22508
22509 /*
22510 * Load the potential sixth argument from user stack.
22511 * Careful about security.
22512 */
22513+ movl PT_OLDESP(%esp),%ebp
22514+
22515+#ifdef CONFIG_PAX_MEMORY_UDEREF
22516+ mov PT_OLDSS(%esp),%ds
22517+1: movl %ds:(%ebp),%ebp
22518+ push %ss
22519+ pop %ds
22520+#else
22521 cmpl $__PAGE_OFFSET-3,%ebp
22522 jae syscall_fault
22523 ASM_STAC
22524 1: movl (%ebp),%ebp
22525 ASM_CLAC
22526+#endif
22527+
22528 movl %ebp,PT_EBP(%esp)
22529 _ASM_EXTABLE(1b,syscall_fault)
22530
22531 GET_THREAD_INFO(%ebp)
22532
22533+#ifdef CONFIG_PAX_RANDKSTACK
22534+ pax_erase_kstack
22535+#endif
22536+
22537 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22538 jnz sysenter_audit
22539 sysenter_do_call:
22540@@ -434,12 +607,24 @@ sysenter_after_call:
22541 testl $_TIF_ALLWORK_MASK, %ecx
22542 jne sysexit_audit
22543 sysenter_exit:
22544+
22545+#ifdef CONFIG_PAX_RANDKSTACK
22546+ pushl_cfi %eax
22547+ movl %esp, %eax
22548+ call pax_randomize_kstack
22549+ popl_cfi %eax
22550+#endif
22551+
22552+ pax_erase_kstack
22553+
22554 /* if something modifies registers it must also disable sysexit */
22555 movl PT_EIP(%esp), %edx
22556 movl PT_OLDESP(%esp), %ecx
22557 xorl %ebp,%ebp
22558 TRACE_IRQS_ON
22559 1: mov PT_FS(%esp), %fs
22560+2: mov PT_DS(%esp), %ds
22561+3: mov PT_ES(%esp), %es
22562 PTGS_TO_GS
22563 ENABLE_INTERRUPTS_SYSEXIT
22564
22565@@ -453,6 +638,9 @@ sysenter_audit:
22566 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22567 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22568 call __audit_syscall_entry
22569+
22570+ pax_erase_kstack
22571+
22572 popl_cfi %ecx /* get that remapped edx off the stack */
22573 popl_cfi %ecx /* get that remapped esi off the stack */
22574 movl PT_EAX(%esp),%eax /* reload syscall number */
22575@@ -479,10 +667,16 @@ sysexit_audit:
22576
22577 CFI_ENDPROC
22578 .pushsection .fixup,"ax"
22579-2: movl $0,PT_FS(%esp)
22580+4: movl $0,PT_FS(%esp)
22581+ jmp 1b
22582+5: movl $0,PT_DS(%esp)
22583+ jmp 1b
22584+6: movl $0,PT_ES(%esp)
22585 jmp 1b
22586 .popsection
22587- _ASM_EXTABLE(1b,2b)
22588+ _ASM_EXTABLE(1b,4b)
22589+ _ASM_EXTABLE(2b,5b)
22590+ _ASM_EXTABLE(3b,6b)
22591 PTGS_TO_GS_EX
22592 ENDPROC(ia32_sysenter_target)
22593
22594@@ -493,6 +687,11 @@ ENTRY(system_call)
22595 pushl_cfi %eax # save orig_eax
22596 SAVE_ALL
22597 GET_THREAD_INFO(%ebp)
22598+
22599+#ifdef CONFIG_PAX_RANDKSTACK
22600+ pax_erase_kstack
22601+#endif
22602+
22603 # system call tracing in operation / emulation
22604 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22605 jnz syscall_trace_entry
22606@@ -512,6 +711,15 @@ syscall_exit:
22607 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22608 jne syscall_exit_work
22609
22610+restore_all_pax:
22611+
22612+#ifdef CONFIG_PAX_RANDKSTACK
22613+ movl %esp, %eax
22614+ call pax_randomize_kstack
22615+#endif
22616+
22617+ pax_erase_kstack
22618+
22619 restore_all:
22620 TRACE_IRQS_IRET
22621 restore_all_notrace:
22622@@ -566,14 +774,34 @@ ldt_ss:
22623 * compensating for the offset by changing to the ESPFIX segment with
22624 * a base address that matches for the difference.
22625 */
22626-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22627+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22628 mov %esp, %edx /* load kernel esp */
22629 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22630 mov %dx, %ax /* eax: new kernel esp */
22631 sub %eax, %edx /* offset (low word is 0) */
22632+#ifdef CONFIG_SMP
22633+ movl PER_CPU_VAR(cpu_number), %ebx
22634+ shll $PAGE_SHIFT_asm, %ebx
22635+ addl $cpu_gdt_table, %ebx
22636+#else
22637+ movl $cpu_gdt_table, %ebx
22638+#endif
22639 shr $16, %edx
22640- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22641- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22642+
22643+#ifdef CONFIG_PAX_KERNEXEC
22644+ mov %cr0, %esi
22645+ btr $16, %esi
22646+ mov %esi, %cr0
22647+#endif
22648+
22649+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22650+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22651+
22652+#ifdef CONFIG_PAX_KERNEXEC
22653+ bts $16, %esi
22654+ mov %esi, %cr0
22655+#endif
22656+
22657 pushl_cfi $__ESPFIX_SS
22658 pushl_cfi %eax /* new kernel esp */
22659 /* Disable interrupts, but do not irqtrace this section: we
22660@@ -603,20 +831,18 @@ work_resched:
22661 movl TI_flags(%ebp), %ecx
22662 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22663 # than syscall tracing?
22664- jz restore_all
22665+ jz restore_all_pax
22666 testb $_TIF_NEED_RESCHED, %cl
22667 jnz work_resched
22668
22669 work_notifysig: # deal with pending signals and
22670 # notify-resume requests
22671+ movl %esp, %eax
22672 #ifdef CONFIG_VM86
22673 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22674- movl %esp, %eax
22675 jne work_notifysig_v86 # returning to kernel-space or
22676 # vm86-space
22677 1:
22678-#else
22679- movl %esp, %eax
22680 #endif
22681 TRACE_IRQS_ON
22682 ENABLE_INTERRUPTS(CLBR_NONE)
22683@@ -637,7 +863,7 @@ work_notifysig_v86:
22684 movl %eax, %esp
22685 jmp 1b
22686 #endif
22687-END(work_pending)
22688+ENDPROC(work_pending)
22689
22690 # perform syscall exit tracing
22691 ALIGN
22692@@ -645,11 +871,14 @@ syscall_trace_entry:
22693 movl $-ENOSYS,PT_EAX(%esp)
22694 movl %esp, %eax
22695 call syscall_trace_enter
22696+
22697+ pax_erase_kstack
22698+
22699 /* What it returned is what we'll actually use. */
22700 cmpl $(NR_syscalls), %eax
22701 jnae syscall_call
22702 jmp syscall_exit
22703-END(syscall_trace_entry)
22704+ENDPROC(syscall_trace_entry)
22705
22706 # perform syscall exit tracing
22707 ALIGN
22708@@ -662,26 +891,30 @@ syscall_exit_work:
22709 movl %esp, %eax
22710 call syscall_trace_leave
22711 jmp resume_userspace
22712-END(syscall_exit_work)
22713+ENDPROC(syscall_exit_work)
22714 CFI_ENDPROC
22715
22716 RING0_INT_FRAME # can't unwind into user space anyway
22717 syscall_fault:
22718+#ifdef CONFIG_PAX_MEMORY_UDEREF
22719+ push %ss
22720+ pop %ds
22721+#endif
22722 ASM_CLAC
22723 GET_THREAD_INFO(%ebp)
22724 movl $-EFAULT,PT_EAX(%esp)
22725 jmp resume_userspace
22726-END(syscall_fault)
22727+ENDPROC(syscall_fault)
22728
22729 syscall_badsys:
22730 movl $-ENOSYS,%eax
22731 jmp syscall_after_call
22732-END(syscall_badsys)
22733+ENDPROC(syscall_badsys)
22734
22735 sysenter_badsys:
22736 movl $-ENOSYS,%eax
22737 jmp sysenter_after_call
22738-END(sysenter_badsys)
22739+ENDPROC(sysenter_badsys)
22740 CFI_ENDPROC
22741
22742 .macro FIXUP_ESPFIX_STACK
22743@@ -694,8 +927,15 @@ END(sysenter_badsys)
22744 */
22745 #ifdef CONFIG_X86_ESPFIX32
22746 /* fixup the stack */
22747- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22748- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22749+#ifdef CONFIG_SMP
22750+ movl PER_CPU_VAR(cpu_number), %ebx
22751+ shll $PAGE_SHIFT_asm, %ebx
22752+ addl $cpu_gdt_table, %ebx
22753+#else
22754+ movl $cpu_gdt_table, %ebx
22755+#endif
22756+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22757+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22758 shl $16, %eax
22759 addl %esp, %eax /* the adjusted stack pointer */
22760 pushl_cfi $__KERNEL_DS
22761@@ -751,7 +991,7 @@ vector=vector+1
22762 .endr
22763 2: jmp common_interrupt
22764 .endr
22765-END(irq_entries_start)
22766+ENDPROC(irq_entries_start)
22767
22768 .previous
22769 END(interrupt)
22770@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22771 pushl_cfi $do_coprocessor_error
22772 jmp error_code
22773 CFI_ENDPROC
22774-END(coprocessor_error)
22775+ENDPROC(coprocessor_error)
22776
22777 ENTRY(simd_coprocessor_error)
22778 RING0_INT_FRAME
22779@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22780 .section .altinstructions,"a"
22781 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22782 .previous
22783-.section .altinstr_replacement,"ax"
22784+.section .altinstr_replacement,"a"
22785 663: pushl $do_simd_coprocessor_error
22786 664:
22787 .previous
22788@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22789 #endif
22790 jmp error_code
22791 CFI_ENDPROC
22792-END(simd_coprocessor_error)
22793+ENDPROC(simd_coprocessor_error)
22794
22795 ENTRY(device_not_available)
22796 RING0_INT_FRAME
22797@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22798 pushl_cfi $do_device_not_available
22799 jmp error_code
22800 CFI_ENDPROC
22801-END(device_not_available)
22802+ENDPROC(device_not_available)
22803
22804 #ifdef CONFIG_PARAVIRT
22805 ENTRY(native_iret)
22806 iret
22807 _ASM_EXTABLE(native_iret, iret_exc)
22808-END(native_iret)
22809+ENDPROC(native_iret)
22810
22811 ENTRY(native_irq_enable_sysexit)
22812 sti
22813 sysexit
22814-END(native_irq_enable_sysexit)
22815+ENDPROC(native_irq_enable_sysexit)
22816 #endif
22817
22818 ENTRY(overflow)
22819@@ -860,7 +1100,7 @@ ENTRY(overflow)
22820 pushl_cfi $do_overflow
22821 jmp error_code
22822 CFI_ENDPROC
22823-END(overflow)
22824+ENDPROC(overflow)
22825
22826 ENTRY(bounds)
22827 RING0_INT_FRAME
22828@@ -869,7 +1109,7 @@ ENTRY(bounds)
22829 pushl_cfi $do_bounds
22830 jmp error_code
22831 CFI_ENDPROC
22832-END(bounds)
22833+ENDPROC(bounds)
22834
22835 ENTRY(invalid_op)
22836 RING0_INT_FRAME
22837@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22838 pushl_cfi $do_invalid_op
22839 jmp error_code
22840 CFI_ENDPROC
22841-END(invalid_op)
22842+ENDPROC(invalid_op)
22843
22844 ENTRY(coprocessor_segment_overrun)
22845 RING0_INT_FRAME
22846@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22847 pushl_cfi $do_coprocessor_segment_overrun
22848 jmp error_code
22849 CFI_ENDPROC
22850-END(coprocessor_segment_overrun)
22851+ENDPROC(coprocessor_segment_overrun)
22852
22853 ENTRY(invalid_TSS)
22854 RING0_EC_FRAME
22855@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22856 pushl_cfi $do_invalid_TSS
22857 jmp error_code
22858 CFI_ENDPROC
22859-END(invalid_TSS)
22860+ENDPROC(invalid_TSS)
22861
22862 ENTRY(segment_not_present)
22863 RING0_EC_FRAME
22864@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22865 pushl_cfi $do_segment_not_present
22866 jmp error_code
22867 CFI_ENDPROC
22868-END(segment_not_present)
22869+ENDPROC(segment_not_present)
22870
22871 ENTRY(stack_segment)
22872 RING0_EC_FRAME
22873@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22874 pushl_cfi $do_stack_segment
22875 jmp error_code
22876 CFI_ENDPROC
22877-END(stack_segment)
22878+ENDPROC(stack_segment)
22879
22880 ENTRY(alignment_check)
22881 RING0_EC_FRAME
22882@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22883 pushl_cfi $do_alignment_check
22884 jmp error_code
22885 CFI_ENDPROC
22886-END(alignment_check)
22887+ENDPROC(alignment_check)
22888
22889 ENTRY(divide_error)
22890 RING0_INT_FRAME
22891@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22892 pushl_cfi $do_divide_error
22893 jmp error_code
22894 CFI_ENDPROC
22895-END(divide_error)
22896+ENDPROC(divide_error)
22897
22898 #ifdef CONFIG_X86_MCE
22899 ENTRY(machine_check)
22900@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22901 pushl_cfi machine_check_vector
22902 jmp error_code
22903 CFI_ENDPROC
22904-END(machine_check)
22905+ENDPROC(machine_check)
22906 #endif
22907
22908 ENTRY(spurious_interrupt_bug)
22909@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22910 pushl_cfi $do_spurious_interrupt_bug
22911 jmp error_code
22912 CFI_ENDPROC
22913-END(spurious_interrupt_bug)
22914+ENDPROC(spurious_interrupt_bug)
22915
22916 #ifdef CONFIG_XEN
22917 /* Xen doesn't set %esp to be precisely what the normal sysenter
22918@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22919
22920 ENTRY(mcount)
22921 ret
22922-END(mcount)
22923+ENDPROC(mcount)
22924
22925 ENTRY(ftrace_caller)
22926 pushl %eax
22927@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22928 .globl ftrace_stub
22929 ftrace_stub:
22930 ret
22931-END(ftrace_caller)
22932+ENDPROC(ftrace_caller)
22933
22934 ENTRY(ftrace_regs_caller)
22935 pushf /* push flags before compare (in cs location) */
22936@@ -1185,7 +1425,7 @@ trace:
22937 popl %ecx
22938 popl %eax
22939 jmp ftrace_stub
22940-END(mcount)
22941+ENDPROC(mcount)
22942 #endif /* CONFIG_DYNAMIC_FTRACE */
22943 #endif /* CONFIG_FUNCTION_TRACER */
22944
22945@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22946 popl %ecx
22947 popl %eax
22948 ret
22949-END(ftrace_graph_caller)
22950+ENDPROC(ftrace_graph_caller)
22951
22952 .globl return_to_handler
22953 return_to_handler:
22954@@ -1264,15 +1504,18 @@ error_code:
22955 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22956 REG_TO_PTGS %ecx
22957 SET_KERNEL_GS %ecx
22958- movl $(__USER_DS), %ecx
22959+ movl $(__KERNEL_DS), %ecx
22960 movl %ecx, %ds
22961 movl %ecx, %es
22962+
22963+ pax_enter_kernel
22964+
22965 TRACE_IRQS_OFF
22966 movl %esp,%eax # pt_regs pointer
22967 call *%edi
22968 jmp ret_from_exception
22969 CFI_ENDPROC
22970-END(page_fault)
22971+ENDPROC(page_fault)
22972
22973 /*
22974 * Debug traps and NMI can happen at the one SYSENTER instruction
22975@@ -1315,7 +1558,7 @@ debug_stack_correct:
22976 call do_debug
22977 jmp ret_from_exception
22978 CFI_ENDPROC
22979-END(debug)
22980+ENDPROC(debug)
22981
22982 /*
22983 * NMI is doubly nasty. It can happen _while_ we're handling
22984@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22985 xorl %edx,%edx # zero error code
22986 movl %esp,%eax # pt_regs pointer
22987 call do_nmi
22988+
22989+ pax_exit_kernel
22990+
22991 jmp restore_all_notrace
22992 CFI_ENDPROC
22993
22994@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
22995 FIXUP_ESPFIX_STACK # %eax == %esp
22996 xorl %edx,%edx # zero error code
22997 call do_nmi
22998+
22999+ pax_exit_kernel
23000+
23001 RESTORE_REGS
23002 lss 12+4(%esp), %esp # back to espfix stack
23003 CFI_ADJUST_CFA_OFFSET -24
23004 jmp irq_return
23005 #endif
23006 CFI_ENDPROC
23007-END(nmi)
23008+ENDPROC(nmi)
23009
23010 ENTRY(int3)
23011 RING0_INT_FRAME
23012@@ -1411,14 +1660,14 @@ ENTRY(int3)
23013 call do_int3
23014 jmp ret_from_exception
23015 CFI_ENDPROC
23016-END(int3)
23017+ENDPROC(int3)
23018
23019 ENTRY(general_protection)
23020 RING0_EC_FRAME
23021 pushl_cfi $do_general_protection
23022 jmp error_code
23023 CFI_ENDPROC
23024-END(general_protection)
23025+ENDPROC(general_protection)
23026
23027 #ifdef CONFIG_KVM_GUEST
23028 ENTRY(async_page_fault)
23029@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23030 pushl_cfi $do_async_page_fault
23031 jmp error_code
23032 CFI_ENDPROC
23033-END(async_page_fault)
23034+ENDPROC(async_page_fault)
23035 #endif
23036
23037diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23038index f0095a7..ec77893 100644
23039--- a/arch/x86/kernel/entry_64.S
23040+++ b/arch/x86/kernel/entry_64.S
23041@@ -59,6 +59,8 @@
23042 #include <asm/smap.h>
23043 #include <asm/pgtable_types.h>
23044 #include <linux/err.h>
23045+#include <asm/pgtable.h>
23046+#include <asm/alternative-asm.h>
23047
23048 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23049 #include <linux/elf-em.h>
23050@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23051 ENDPROC(native_usergs_sysret64)
23052 #endif /* CONFIG_PARAVIRT */
23053
23054+ .macro ljmpq sel, off
23055+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23056+ .byte 0x48; ljmp *1234f(%rip)
23057+ .pushsection .rodata
23058+ .align 16
23059+ 1234: .quad \off; .word \sel
23060+ .popsection
23061+#else
23062+ pushq $\sel
23063+ pushq $\off
23064+ lretq
23065+#endif
23066+ .endm
23067+
23068+ .macro pax_enter_kernel
23069+ pax_set_fptr_mask
23070+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23071+ call pax_enter_kernel
23072+#endif
23073+ .endm
23074+
23075+ .macro pax_exit_kernel
23076+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23077+ call pax_exit_kernel
23078+#endif
23079+
23080+ .endm
23081+
23082+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23083+ENTRY(pax_enter_kernel)
23084+ pushq %rdi
23085+
23086+#ifdef CONFIG_PARAVIRT
23087+ PV_SAVE_REGS(CLBR_RDI)
23088+#endif
23089+
23090+#ifdef CONFIG_PAX_KERNEXEC
23091+ GET_CR0_INTO_RDI
23092+ bts $16,%rdi
23093+ jnc 3f
23094+ mov %cs,%edi
23095+ cmp $__KERNEL_CS,%edi
23096+ jnz 2f
23097+1:
23098+#endif
23099+
23100+#ifdef CONFIG_PAX_MEMORY_UDEREF
23101+ 661: jmp 111f
23102+ .pushsection .altinstr_replacement, "a"
23103+ 662: ASM_NOP2
23104+ .popsection
23105+ .pushsection .altinstructions, "a"
23106+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23107+ .popsection
23108+ GET_CR3_INTO_RDI
23109+ cmp $0,%dil
23110+ jnz 112f
23111+ mov $__KERNEL_DS,%edi
23112+ mov %edi,%ss
23113+ jmp 111f
23114+112: cmp $1,%dil
23115+ jz 113f
23116+ ud2
23117+113: sub $4097,%rdi
23118+ bts $63,%rdi
23119+ SET_RDI_INTO_CR3
23120+ mov $__UDEREF_KERNEL_DS,%edi
23121+ mov %edi,%ss
23122+111:
23123+#endif
23124+
23125+#ifdef CONFIG_PARAVIRT
23126+ PV_RESTORE_REGS(CLBR_RDI)
23127+#endif
23128+
23129+ popq %rdi
23130+ pax_force_retaddr
23131+ retq
23132+
23133+#ifdef CONFIG_PAX_KERNEXEC
23134+2: ljmpq __KERNEL_CS,1b
23135+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23136+4: SET_RDI_INTO_CR0
23137+ jmp 1b
23138+#endif
23139+ENDPROC(pax_enter_kernel)
23140+
23141+ENTRY(pax_exit_kernel)
23142+ pushq %rdi
23143+
23144+#ifdef CONFIG_PARAVIRT
23145+ PV_SAVE_REGS(CLBR_RDI)
23146+#endif
23147+
23148+#ifdef CONFIG_PAX_KERNEXEC
23149+ mov %cs,%rdi
23150+ cmp $__KERNEXEC_KERNEL_CS,%edi
23151+ jz 2f
23152+ GET_CR0_INTO_RDI
23153+ bts $16,%rdi
23154+ jnc 4f
23155+1:
23156+#endif
23157+
23158+#ifdef CONFIG_PAX_MEMORY_UDEREF
23159+ 661: jmp 111f
23160+ .pushsection .altinstr_replacement, "a"
23161+ 662: ASM_NOP2
23162+ .popsection
23163+ .pushsection .altinstructions, "a"
23164+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23165+ .popsection
23166+ mov %ss,%edi
23167+ cmp $__UDEREF_KERNEL_DS,%edi
23168+ jnz 111f
23169+ GET_CR3_INTO_RDI
23170+ cmp $0,%dil
23171+ jz 112f
23172+ ud2
23173+112: add $4097,%rdi
23174+ bts $63,%rdi
23175+ SET_RDI_INTO_CR3
23176+ mov $__KERNEL_DS,%edi
23177+ mov %edi,%ss
23178+111:
23179+#endif
23180+
23181+#ifdef CONFIG_PARAVIRT
23182+ PV_RESTORE_REGS(CLBR_RDI);
23183+#endif
23184+
23185+ popq %rdi
23186+ pax_force_retaddr
23187+ retq
23188+
23189+#ifdef CONFIG_PAX_KERNEXEC
23190+2: GET_CR0_INTO_RDI
23191+ btr $16,%rdi
23192+ jnc 4f
23193+ ljmpq __KERNEL_CS,3f
23194+3: SET_RDI_INTO_CR0
23195+ jmp 1b
23196+4: ud2
23197+ jmp 4b
23198+#endif
23199+ENDPROC(pax_exit_kernel)
23200+#endif
23201+
23202+ .macro pax_enter_kernel_user
23203+ pax_set_fptr_mask
23204+#ifdef CONFIG_PAX_MEMORY_UDEREF
23205+ call pax_enter_kernel_user
23206+#endif
23207+ .endm
23208+
23209+ .macro pax_exit_kernel_user
23210+#ifdef CONFIG_PAX_MEMORY_UDEREF
23211+ call pax_exit_kernel_user
23212+#endif
23213+#ifdef CONFIG_PAX_RANDKSTACK
23214+ pushq %rax
23215+ pushq %r11
23216+ call pax_randomize_kstack
23217+ popq %r11
23218+ popq %rax
23219+#endif
23220+ .endm
23221+
23222+#ifdef CONFIG_PAX_MEMORY_UDEREF
23223+ENTRY(pax_enter_kernel_user)
23224+ pushq %rdi
23225+ pushq %rbx
23226+
23227+#ifdef CONFIG_PARAVIRT
23228+ PV_SAVE_REGS(CLBR_RDI)
23229+#endif
23230+
23231+ 661: jmp 111f
23232+ .pushsection .altinstr_replacement, "a"
23233+ 662: ASM_NOP2
23234+ .popsection
23235+ .pushsection .altinstructions, "a"
23236+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23237+ .popsection
23238+ GET_CR3_INTO_RDI
23239+ cmp $1,%dil
23240+ jnz 4f
23241+ sub $4097,%rdi
23242+ bts $63,%rdi
23243+ SET_RDI_INTO_CR3
23244+ jmp 3f
23245+111:
23246+
23247+ GET_CR3_INTO_RDI
23248+ mov %rdi,%rbx
23249+ add $__START_KERNEL_map,%rbx
23250+ sub phys_base(%rip),%rbx
23251+
23252+#ifdef CONFIG_PARAVIRT
23253+ cmpl $0, pv_info+PARAVIRT_enabled
23254+ jz 1f
23255+ pushq %rdi
23256+ i = 0
23257+ .rept USER_PGD_PTRS
23258+ mov i*8(%rbx),%rsi
23259+ mov $0,%sil
23260+ lea i*8(%rbx),%rdi
23261+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23262+ i = i + 1
23263+ .endr
23264+ popq %rdi
23265+ jmp 2f
23266+1:
23267+#endif
23268+
23269+ i = 0
23270+ .rept USER_PGD_PTRS
23271+ movb $0,i*8(%rbx)
23272+ i = i + 1
23273+ .endr
23274+
23275+2: SET_RDI_INTO_CR3
23276+
23277+#ifdef CONFIG_PAX_KERNEXEC
23278+ GET_CR0_INTO_RDI
23279+ bts $16,%rdi
23280+ SET_RDI_INTO_CR0
23281+#endif
23282+
23283+3:
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ PV_RESTORE_REGS(CLBR_RDI)
23287+#endif
23288+
23289+ popq %rbx
23290+ popq %rdi
23291+ pax_force_retaddr
23292+ retq
23293+4: ud2
23294+ENDPROC(pax_enter_kernel_user)
23295+
23296+ENTRY(pax_exit_kernel_user)
23297+ pushq %rdi
23298+ pushq %rbx
23299+
23300+#ifdef CONFIG_PARAVIRT
23301+ PV_SAVE_REGS(CLBR_RDI)
23302+#endif
23303+
23304+ GET_CR3_INTO_RDI
23305+ 661: jmp 1f
23306+ .pushsection .altinstr_replacement, "a"
23307+ 662: ASM_NOP2
23308+ .popsection
23309+ .pushsection .altinstructions, "a"
23310+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23311+ .popsection
23312+ cmp $0,%dil
23313+ jnz 3f
23314+ add $4097,%rdi
23315+ bts $63,%rdi
23316+ SET_RDI_INTO_CR3
23317+ jmp 2f
23318+1:
23319+
23320+ mov %rdi,%rbx
23321+
23322+#ifdef CONFIG_PAX_KERNEXEC
23323+ GET_CR0_INTO_RDI
23324+ btr $16,%rdi
23325+ jnc 3f
23326+ SET_RDI_INTO_CR0
23327+#endif
23328+
23329+ add $__START_KERNEL_map,%rbx
23330+ sub phys_base(%rip),%rbx
23331+
23332+#ifdef CONFIG_PARAVIRT
23333+ cmpl $0, pv_info+PARAVIRT_enabled
23334+ jz 1f
23335+ i = 0
23336+ .rept USER_PGD_PTRS
23337+ mov i*8(%rbx),%rsi
23338+ mov $0x67,%sil
23339+ lea i*8(%rbx),%rdi
23340+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23341+ i = i + 1
23342+ .endr
23343+ jmp 2f
23344+1:
23345+#endif
23346+
23347+ i = 0
23348+ .rept USER_PGD_PTRS
23349+ movb $0x67,i*8(%rbx)
23350+ i = i + 1
23351+ .endr
23352+2:
23353+
23354+#ifdef CONFIG_PARAVIRT
23355+ PV_RESTORE_REGS(CLBR_RDI)
23356+#endif
23357+
23358+ popq %rbx
23359+ popq %rdi
23360+ pax_force_retaddr
23361+ retq
23362+3: ud2
23363+ENDPROC(pax_exit_kernel_user)
23364+#endif
23365+
23366+ .macro pax_enter_kernel_nmi
23367+ pax_set_fptr_mask
23368+
23369+#ifdef CONFIG_PAX_KERNEXEC
23370+ GET_CR0_INTO_RDI
23371+ bts $16,%rdi
23372+ jc 110f
23373+ SET_RDI_INTO_CR0
23374+ or $2,%ebx
23375+110:
23376+#endif
23377+
23378+#ifdef CONFIG_PAX_MEMORY_UDEREF
23379+ 661: jmp 111f
23380+ .pushsection .altinstr_replacement, "a"
23381+ 662: ASM_NOP2
23382+ .popsection
23383+ .pushsection .altinstructions, "a"
23384+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23385+ .popsection
23386+ GET_CR3_INTO_RDI
23387+ cmp $0,%dil
23388+ jz 111f
23389+ sub $4097,%rdi
23390+ or $4,%ebx
23391+ bts $63,%rdi
23392+ SET_RDI_INTO_CR3
23393+ mov $__UDEREF_KERNEL_DS,%edi
23394+ mov %edi,%ss
23395+111:
23396+#endif
23397+ .endm
23398+
23399+ .macro pax_exit_kernel_nmi
23400+#ifdef CONFIG_PAX_KERNEXEC
23401+ btr $1,%ebx
23402+ jnc 110f
23403+ GET_CR0_INTO_RDI
23404+ btr $16,%rdi
23405+ SET_RDI_INTO_CR0
23406+110:
23407+#endif
23408+
23409+#ifdef CONFIG_PAX_MEMORY_UDEREF
23410+ btr $2,%ebx
23411+ jnc 111f
23412+ GET_CR3_INTO_RDI
23413+ add $4097,%rdi
23414+ bts $63,%rdi
23415+ SET_RDI_INTO_CR3
23416+ mov $__KERNEL_DS,%edi
23417+ mov %edi,%ss
23418+111:
23419+#endif
23420+ .endm
23421+
23422+ .macro pax_erase_kstack
23423+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23424+ call pax_erase_kstack
23425+#endif
23426+ .endm
23427+
23428+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23429+ENTRY(pax_erase_kstack)
23430+ pushq %rdi
23431+ pushq %rcx
23432+ pushq %rax
23433+ pushq %r11
23434+
23435+ GET_THREAD_INFO(%r11)
23436+ mov TI_lowest_stack(%r11), %rdi
23437+ mov $-0xBEEF, %rax
23438+ std
23439+
23440+1: mov %edi, %ecx
23441+ and $THREAD_SIZE_asm - 1, %ecx
23442+ shr $3, %ecx
23443+ repne scasq
23444+ jecxz 2f
23445+
23446+ cmp $2*8, %ecx
23447+ jc 2f
23448+
23449+ mov $2*8, %ecx
23450+ repe scasq
23451+ jecxz 2f
23452+ jne 1b
23453+
23454+2: cld
23455+ or $2*8, %rdi
23456+ mov %esp, %ecx
23457+ sub %edi, %ecx
23458+
23459+ cmp $THREAD_SIZE_asm, %rcx
23460+ jb 3f
23461+ ud2
23462+3:
23463+
23464+ shr $3, %ecx
23465+ rep stosq
23466+
23467+ mov TI_task_thread_sp0(%r11), %rdi
23468+ sub $256, %rdi
23469+ mov %rdi, TI_lowest_stack(%r11)
23470+
23471+ popq %r11
23472+ popq %rax
23473+ popq %rcx
23474+ popq %rdi
23475+ pax_force_retaddr
23476+ ret
23477+ENDPROC(pax_erase_kstack)
23478+#endif
23479
23480 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23481 #ifdef CONFIG_TRACE_IRQFLAGS
23482@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23483 .endm
23484
23485 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23486- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23487+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23488 jnc 1f
23489 TRACE_IRQS_ON_DEBUG
23490 1:
23491@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23492 js 1f /* negative -> in kernel */
23493 SWAPGS
23494 xorl %ebx,%ebx
23495-1: ret
23496+1:
23497+#ifdef CONFIG_PAX_MEMORY_UDEREF
23498+ testb $3, CS+8(%rsp)
23499+ jnz 1f
23500+ pax_enter_kernel
23501+ jmp 2f
23502+1: pax_enter_kernel_user
23503+2:
23504+#else
23505+ pax_enter_kernel
23506+#endif
23507+ pax_force_retaddr
23508+ ret
23509 CFI_ENDPROC
23510-END(save_paranoid)
23511+ENDPROC(save_paranoid)
23512+
23513+ENTRY(save_paranoid_nmi)
23514+ XCPT_FRAME 1 RDI+8
23515+ cld
23516+ movq_cfi rdi, RDI+8
23517+ movq_cfi rsi, RSI+8
23518+ movq_cfi rdx, RDX+8
23519+ movq_cfi rcx, RCX+8
23520+ movq_cfi rax, RAX+8
23521+ movq_cfi r8, R8+8
23522+ movq_cfi r9, R9+8
23523+ movq_cfi r10, R10+8
23524+ movq_cfi r11, R11+8
23525+ movq_cfi rbx, RBX+8
23526+ movq_cfi rbp, RBP+8
23527+ movq_cfi r12, R12+8
23528+ movq_cfi r13, R13+8
23529+ movq_cfi r14, R14+8
23530+ movq_cfi r15, R15+8
23531+ movl $1,%ebx
23532+ movl $MSR_GS_BASE,%ecx
23533+ rdmsr
23534+ testl %edx,%edx
23535+ js 1f /* negative -> in kernel */
23536+ SWAPGS
23537+ xorl %ebx,%ebx
23538+1: pax_enter_kernel_nmi
23539+ pax_force_retaddr
23540+ ret
23541+ CFI_ENDPROC
23542+ENDPROC(save_paranoid_nmi)
23543
23544 /*
23545 * A newly forked process directly context switches into this address.
23546@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23547
23548 RESTORE_REST
23549
23550- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23551+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23552 jz 1f
23553
23554 /*
23555@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23556 jmp int_ret_from_sys_call
23557
23558 1:
23559- subq $REST_SKIP, %rsp # leave space for volatiles
23560- CFI_ADJUST_CFA_OFFSET REST_SKIP
23561 movq %rbp, %rdi
23562 call *%rbx
23563 movl $0, RAX(%rsp)
23564 RESTORE_REST
23565 jmp int_ret_from_sys_call
23566 CFI_ENDPROC
23567-END(ret_from_fork)
23568+ENDPROC(ret_from_fork)
23569
23570 /*
23571 * System call entry. Up to 6 arguments in registers are supported.
23572@@ -324,7 +792,7 @@ END(ret_from_fork)
23573 ENTRY(system_call)
23574 CFI_STARTPROC simple
23575 CFI_SIGNAL_FRAME
23576- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23577+ CFI_DEF_CFA rsp,0
23578 CFI_REGISTER rip,rcx
23579 /*CFI_REGISTER rflags,r11*/
23580 SWAPGS_UNSAFE_STACK
23581@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23582
23583 movq %rsp,PER_CPU_VAR(old_rsp)
23584 movq PER_CPU_VAR(kernel_stack),%rsp
23585+ SAVE_ARGS 8*6, 0, rax_enosys=1
23586+ pax_enter_kernel_user
23587+
23588+#ifdef CONFIG_PAX_RANDKSTACK
23589+ pax_erase_kstack
23590+#endif
23591+
23592 /*
23593 * No need to follow this irqs off/on section - it's straight
23594 * and short:
23595 */
23596 ENABLE_INTERRUPTS(CLBR_NONE)
23597- SAVE_ARGS 8, 0, rax_enosys=1
23598 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23599 movq %rcx,RIP-ARGOFFSET(%rsp)
23600 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23601- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23602+ GET_THREAD_INFO(%rcx)
23603+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23604 jnz tracesys
23605 system_call_fastpath:
23606 #if __SYSCALL_MASK == ~0
23607@@ -376,10 +851,13 @@ ret_from_sys_call:
23608 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23609 * very bad.
23610 */
23611- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23612+ GET_THREAD_INFO(%rcx)
23613+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23614 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23615
23616 CFI_REMEMBER_STATE
23617+ pax_exit_kernel_user
23618+ pax_erase_kstack
23619 /*
23620 * sysretq will re-enable interrupts:
23621 */
23622@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23623
23624 /* Do syscall tracing */
23625 tracesys:
23626- leaq -REST_SKIP(%rsp), %rdi
23627+ movq %rsp, %rdi
23628 movq $AUDIT_ARCH_X86_64, %rsi
23629 call syscall_trace_enter_phase1
23630 test %rax, %rax
23631 jnz tracesys_phase2 /* if needed, run the slow path */
23632- LOAD_ARGS 0 /* else restore clobbered regs */
23633+
23634+ pax_erase_kstack
23635+
23636+ LOAD_ARGS /* else restore clobbered regs */
23637 jmp system_call_fastpath /* and return to the fast path */
23638
23639 tracesys_phase2:
23640@@ -415,12 +896,14 @@ tracesys_phase2:
23641 movq %rax,%rdx
23642 call syscall_trace_enter_phase2
23643
23644+ pax_erase_kstack
23645+
23646 /*
23647 * Reload arg registers from stack in case ptrace changed them.
23648 * We don't reload %rax because syscall_trace_entry_phase2() returned
23649 * the value it wants us to use in the table lookup.
23650 */
23651- LOAD_ARGS ARGOFFSET, 1
23652+ LOAD_ARGS 1
23653 RESTORE_REST
23654 #if __SYSCALL_MASK == ~0
23655 cmpq $__NR_syscall_max,%rax
23656@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23657 andl %edi,%edx
23658 jnz int_careful
23659 andl $~TS_COMPAT,TI_status(%rcx)
23660- jmp retint_swapgs
23661+ pax_exit_kernel_user
23662+ pax_erase_kstack
23663+ jmp retint_swapgs_pax
23664
23665 /* Either reschedule or signal or syscall exit tracking needed. */
23666 /* First do a reschedule test. */
23667@@ -497,7 +982,7 @@ int_restore_rest:
23668 TRACE_IRQS_OFF
23669 jmp int_with_check
23670 CFI_ENDPROC
23671-END(system_call)
23672+ENDPROC(system_call)
23673
23674 .macro FORK_LIKE func
23675 ENTRY(stub_\func)
23676@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23677 DEFAULT_FRAME 0 8 /* offset 8: return address */
23678 call sys_\func
23679 RESTORE_TOP_OF_STACK %r11, 8
23680- ret $REST_SKIP /* pop extended registers */
23681+ pax_force_retaddr
23682+ ret
23683 CFI_ENDPROC
23684-END(stub_\func)
23685+ENDPROC(stub_\func)
23686 .endm
23687
23688 .macro FIXED_FRAME label,func
23689@@ -522,9 +1008,10 @@ ENTRY(\label)
23690 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23691 call \func
23692 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23693+ pax_force_retaddr
23694 ret
23695 CFI_ENDPROC
23696-END(\label)
23697+ENDPROC(\label)
23698 .endm
23699
23700 FORK_LIKE clone
23701@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23702 RESTORE_REST
23703 jmp int_ret_from_sys_call
23704 CFI_ENDPROC
23705-END(stub_execve)
23706+ENDPROC(stub_execve)
23707
23708 ENTRY(stub_execveat)
23709 CFI_STARTPROC
23710@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23711 RESTORE_REST
23712 jmp int_ret_from_sys_call
23713 CFI_ENDPROC
23714-END(stub_execveat)
23715+ENDPROC(stub_execveat)
23716
23717 /*
23718 * sigreturn is special because it needs to restore all registers on return.
23719@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23720 RESTORE_REST
23721 jmp int_ret_from_sys_call
23722 CFI_ENDPROC
23723-END(stub_rt_sigreturn)
23724+ENDPROC(stub_rt_sigreturn)
23725
23726 #ifdef CONFIG_X86_X32_ABI
23727 ENTRY(stub_x32_rt_sigreturn)
23728@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23729 RESTORE_REST
23730 jmp int_ret_from_sys_call
23731 CFI_ENDPROC
23732-END(stub_x32_rt_sigreturn)
23733+ENDPROC(stub_x32_rt_sigreturn)
23734
23735 ENTRY(stub_x32_execve)
23736 CFI_STARTPROC
23737@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23738 RESTORE_REST
23739 jmp int_ret_from_sys_call
23740 CFI_ENDPROC
23741-END(stub_x32_execve)
23742+ENDPROC(stub_x32_execve)
23743
23744 ENTRY(stub_x32_execveat)
23745 CFI_STARTPROC
23746@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23747 RESTORE_REST
23748 jmp int_ret_from_sys_call
23749 CFI_ENDPROC
23750-END(stub_x32_execveat)
23751+ENDPROC(stub_x32_execveat)
23752
23753 #endif
23754
23755@@ -653,7 +1140,7 @@ vector=vector+1
23756 2: jmp common_interrupt
23757 .endr
23758 CFI_ENDPROC
23759-END(irq_entries_start)
23760+ENDPROC(irq_entries_start)
23761
23762 .previous
23763 END(interrupt)
23764@@ -670,28 +1157,29 @@ END(interrupt)
23765 /* 0(%rsp): ~(interrupt number) */
23766 .macro interrupt func
23767 /* reserve pt_regs for scratch regs and rbp */
23768- subq $ORIG_RAX-RBP, %rsp
23769- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23770+ subq $ORIG_RAX, %rsp
23771+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23772 cld
23773- /* start from rbp in pt_regs and jump over */
23774- movq_cfi rdi, (RDI-RBP)
23775- movq_cfi rsi, (RSI-RBP)
23776- movq_cfi rdx, (RDX-RBP)
23777- movq_cfi rcx, (RCX-RBP)
23778- movq_cfi rax, (RAX-RBP)
23779- movq_cfi r8, (R8-RBP)
23780- movq_cfi r9, (R9-RBP)
23781- movq_cfi r10, (R10-RBP)
23782- movq_cfi r11, (R11-RBP)
23783+ /* start from r15 in pt_regs and jump over */
23784+ movq_cfi rdi, RDI
23785+ movq_cfi rsi, RSI
23786+ movq_cfi rdx, RDX
23787+ movq_cfi rcx, RCX
23788+ movq_cfi rax, RAX
23789+ movq_cfi r8, R8
23790+ movq_cfi r9, R9
23791+ movq_cfi r10, R10
23792+ movq_cfi r11, R11
23793+ movq_cfi r12, R12
23794
23795 /* Save rbp so that we can unwind from get_irq_regs() */
23796- movq_cfi rbp, 0
23797+ movq_cfi rbp, RBP
23798
23799 /* Save previous stack value */
23800 movq %rsp, %rsi
23801
23802- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23803- testl $3, CS-RBP(%rsi)
23804+ movq %rsp,%rdi /* arg1 for handler */
23805+ testb $3, CS(%rsi)
23806 je 1f
23807 SWAPGS
23808 /*
23809@@ -711,6 +1199,18 @@ END(interrupt)
23810 0x06 /* DW_OP_deref */, \
23811 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23812 0x22 /* DW_OP_plus */
23813+
23814+#ifdef CONFIG_PAX_MEMORY_UDEREF
23815+ testb $3, CS(%rdi)
23816+ jnz 1f
23817+ pax_enter_kernel
23818+ jmp 2f
23819+1: pax_enter_kernel_user
23820+2:
23821+#else
23822+ pax_enter_kernel
23823+#endif
23824+
23825 /* We entered an interrupt context - irqs are off: */
23826 TRACE_IRQS_OFF
23827
23828@@ -735,14 +1235,14 @@ ret_from_intr:
23829
23830 /* Restore saved previous stack */
23831 popq %rsi
23832- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23833- leaq ARGOFFSET-RBP(%rsi), %rsp
23834+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23835+ movq %rsi, %rsp
23836 CFI_DEF_CFA_REGISTER rsp
23837- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23838+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23839
23840 exit_intr:
23841 GET_THREAD_INFO(%rcx)
23842- testl $3,CS-ARGOFFSET(%rsp)
23843+ testb $3,CS-ARGOFFSET(%rsp)
23844 je retint_kernel
23845
23846 /* Interrupt came from user space */
23847@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23848 * The iretq could re-enable interrupts:
23849 */
23850 DISABLE_INTERRUPTS(CLBR_ANY)
23851+ pax_exit_kernel_user
23852+retint_swapgs_pax:
23853 TRACE_IRQS_IRETQ
23854
23855 /*
23856 * Try to use SYSRET instead of IRET if we're returning to
23857 * a completely clean 64-bit userspace context.
23858 */
23859- movq (RCX-R11)(%rsp), %rcx
23860- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23861+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23862+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23863 jne opportunistic_sysret_failed
23864
23865 /*
23866@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23867 shr $__VIRTUAL_MASK_SHIFT, %rcx
23868 jnz opportunistic_sysret_failed
23869
23870- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23871+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23872 jne opportunistic_sysret_failed
23873
23874 movq (R11-ARGOFFSET)(%rsp), %r11
23875@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23876
23877 retint_restore_args: /* return to kernel space */
23878 DISABLE_INTERRUPTS(CLBR_ANY)
23879+ pax_exit_kernel
23880+
23881+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23882+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23883+ * namely calling EFI runtime services with a phys mapping. We're
23884+ * starting off with NOPs and patch in the real instrumentation
23885+ * (BTS/OR) before starting any userland process; even before starting
23886+ * up the APs.
23887+ */
23888+ .pushsection .altinstr_replacement, "a"
23889+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23890+ 602:
23891+ .popsection
23892+ 603: .fill 602b-601b, 1, 0x90
23893+ .pushsection .altinstructions, "a"
23894+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23895+ .popsection
23896+#else
23897+ pax_force_retaddr (RIP-ARGOFFSET)
23898+#endif
23899+
23900 /*
23901 * The iretq could re-enable interrupts:
23902 */
23903@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23904 SWAPGS
23905 movq PER_CPU_VAR(espfix_waddr),%rdi
23906 movq %rax,(0*8)(%rdi) /* RAX */
23907- movq (2*8)(%rsp),%rax /* RIP */
23908+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23909 movq %rax,(1*8)(%rdi)
23910- movq (3*8)(%rsp),%rax /* CS */
23911+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23912 movq %rax,(2*8)(%rdi)
23913- movq (4*8)(%rsp),%rax /* RFLAGS */
23914+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23915 movq %rax,(3*8)(%rdi)
23916- movq (6*8)(%rsp),%rax /* SS */
23917+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23918 movq %rax,(5*8)(%rdi)
23919- movq (5*8)(%rsp),%rax /* RSP */
23920+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23921 movq %rax,(4*8)(%rdi)
23922 andl $0xffff0000,%eax
23923 popq_cfi %rdi
23924@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23925 jmp exit_intr
23926 #endif
23927 CFI_ENDPROC
23928-END(common_interrupt)
23929+ENDPROC(common_interrupt)
23930
23931 /*
23932 * APIC interrupts.
23933@@ -951,7 +1474,7 @@ ENTRY(\sym)
23934 interrupt \do_sym
23935 jmp ret_from_intr
23936 CFI_ENDPROC
23937-END(\sym)
23938+ENDPROC(\sym)
23939 .endm
23940
23941 #ifdef CONFIG_TRACING
23942@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23943 /*
23944 * Exception entry points.
23945 */
23946-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23947+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23948
23949 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23950 ENTRY(\sym)
23951@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23952 .endif
23953
23954 .if \shift_ist != -1
23955+#ifdef CONFIG_SMP
23956+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23957+ lea init_tss(%r13), %r13
23958+#else
23959+ lea init_tss(%rip), %r13
23960+#endif
23961 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23962 .endif
23963
23964@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23965 .endif
23966
23967 CFI_ENDPROC
23968-END(\sym)
23969+ENDPROC(\sym)
23970 .endm
23971
23972 #ifdef CONFIG_TRACING
23973@@ -1167,9 +1696,10 @@ gs_change:
23974 2: mfence /* workaround */
23975 SWAPGS
23976 popfq_cfi
23977+ pax_force_retaddr
23978 ret
23979 CFI_ENDPROC
23980-END(native_load_gs_index)
23981+ENDPROC(native_load_gs_index)
23982
23983 _ASM_EXTABLE(gs_change,bad_gs)
23984 .section .fixup,"ax"
23985@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
23986 CFI_DEF_CFA_REGISTER rsp
23987 CFI_ADJUST_CFA_OFFSET -8
23988 decl PER_CPU_VAR(irq_count)
23989+ pax_force_retaddr
23990 ret
23991 CFI_ENDPROC
23992-END(do_softirq_own_stack)
23993+ENDPROC(do_softirq_own_stack)
23994
23995 #ifdef CONFIG_XEN
23996 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
23997@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23998 #endif
23999 jmp error_exit
24000 CFI_ENDPROC
24001-END(xen_do_hypervisor_callback)
24002+ENDPROC(xen_do_hypervisor_callback)
24003
24004 /*
24005 * Hypervisor uses this for application faults while it executes.
24006@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24007 SAVE_ALL
24008 jmp error_exit
24009 CFI_ENDPROC
24010-END(xen_failsafe_callback)
24011+ENDPROC(xen_failsafe_callback)
24012
24013 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24014 xen_hvm_callback_vector xen_evtchn_do_upcall
24015@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24016 DEFAULT_FRAME
24017 DISABLE_INTERRUPTS(CLBR_NONE)
24018 TRACE_IRQS_OFF_DEBUG
24019- testl %ebx,%ebx /* swapgs needed? */
24020+ testl $1,%ebx /* swapgs needed? */
24021 jnz paranoid_restore
24022+#ifdef CONFIG_PAX_MEMORY_UDEREF
24023+ pax_exit_kernel_user
24024+#else
24025+ pax_exit_kernel
24026+#endif
24027 TRACE_IRQS_IRETQ 0
24028 SWAPGS_UNSAFE_STACK
24029 RESTORE_ALL 8
24030 INTERRUPT_RETURN
24031 paranoid_restore:
24032+ pax_exit_kernel
24033 TRACE_IRQS_IRETQ_DEBUG 0
24034 RESTORE_ALL 8
24035+ pax_force_retaddr_bts
24036 INTERRUPT_RETURN
24037 CFI_ENDPROC
24038-END(paranoid_exit)
24039+ENDPROC(paranoid_exit)
24040
24041 /*
24042 * Exception entry point. This expects an error code/orig_rax on the stack.
24043@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24044 movq %r14, R14+8(%rsp)
24045 movq %r15, R15+8(%rsp)
24046 xorl %ebx,%ebx
24047- testl $3,CS+8(%rsp)
24048+ testb $3,CS+8(%rsp)
24049 je error_kernelspace
24050 error_swapgs:
24051 SWAPGS
24052 error_sti:
24053+#ifdef CONFIG_PAX_MEMORY_UDEREF
24054+ testb $3, CS+8(%rsp)
24055+ jnz 1f
24056+ pax_enter_kernel
24057+ jmp 2f
24058+1: pax_enter_kernel_user
24059+2:
24060+#else
24061+ pax_enter_kernel
24062+#endif
24063 TRACE_IRQS_OFF
24064+ pax_force_retaddr
24065 ret
24066
24067 /*
24068@@ -1422,7 +1971,7 @@ error_bad_iret:
24069 decl %ebx /* Return to usergs */
24070 jmp error_sti
24071 CFI_ENDPROC
24072-END(error_entry)
24073+ENDPROC(error_entry)
24074
24075
24076 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24077@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24078 DISABLE_INTERRUPTS(CLBR_NONE)
24079 TRACE_IRQS_OFF
24080 GET_THREAD_INFO(%rcx)
24081- testl %eax,%eax
24082+ testl $1,%eax
24083 jne retint_kernel
24084 LOCKDEP_SYS_EXIT_IRQ
24085 movl TI_flags(%rcx),%edx
24086@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24087 jnz retint_careful
24088 jmp retint_swapgs
24089 CFI_ENDPROC
24090-END(error_exit)
24091+ENDPROC(error_exit)
24092
24093 /*
24094 * Test if a given stack is an NMI stack or not.
24095@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24096 * If %cs was not the kernel segment, then the NMI triggered in user
24097 * space, which means it is definitely not nested.
24098 */
24099+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24100+ je 1f
24101 cmpl $__KERNEL_CS, 16(%rsp)
24102 jne first_nmi
24103-
24104+1:
24105 /*
24106 * Check the special variable on the stack to see if NMIs are
24107 * executing.
24108@@ -1536,8 +2087,7 @@ nested_nmi:
24109
24110 1:
24111 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24112- leaq -1*8(%rsp), %rdx
24113- movq %rdx, %rsp
24114+ subq $8, %rsp
24115 CFI_ADJUST_CFA_OFFSET 1*8
24116 leaq -10*8(%rsp), %rdx
24117 pushq_cfi $__KERNEL_DS
24118@@ -1555,6 +2105,7 @@ nested_nmi_out:
24119 CFI_RESTORE rdx
24120
24121 /* No need to check faults here */
24122+# pax_force_retaddr_bts
24123 INTERRUPT_RETURN
24124
24125 CFI_RESTORE_STATE
24126@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24127 subq $ORIG_RAX-R15, %rsp
24128 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24129 /*
24130- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24131+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24132 * as we should not be calling schedule in NMI context.
24133 * Even with normal interrupts enabled. An NMI should not be
24134 * setting NEED_RESCHED or anything that normal interrupts and
24135 * exceptions might do.
24136 */
24137- call save_paranoid
24138+ call save_paranoid_nmi
24139 DEFAULT_FRAME 0
24140
24141 /*
24142@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24143 * NMI itself takes a page fault, the page fault that was preempted
24144 * will read the information from the NMI page fault and not the
24145 * origin fault. Save it off and restore it if it changes.
24146- * Use the r12 callee-saved register.
24147+ * Use the r13 callee-saved register.
24148 */
24149- movq %cr2, %r12
24150+ movq %cr2, %r13
24151
24152 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24153 movq %rsp,%rdi
24154@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24155
24156 /* Did the NMI take a page fault? Restore cr2 if it did */
24157 movq %cr2, %rcx
24158- cmpq %rcx, %r12
24159+ cmpq %rcx, %r13
24160 je 1f
24161- movq %r12, %cr2
24162+ movq %r13, %cr2
24163 1:
24164
24165- testl %ebx,%ebx /* swapgs needed? */
24166+ testl $1,%ebx /* swapgs needed? */
24167 jnz nmi_restore
24168 nmi_swapgs:
24169 SWAPGS_UNSAFE_STACK
24170 nmi_restore:
24171+ pax_exit_kernel_nmi
24172 /* Pop the extra iret frame at once */
24173 RESTORE_ALL 6*8
24174+ testb $3, 8(%rsp)
24175+ jnz 1f
24176+ pax_force_retaddr_bts
24177+1:
24178
24179 /* Clear the NMI executing stack variable */
24180 movq $0, 5*8(%rsp)
24181 jmp irq_return
24182 CFI_ENDPROC
24183-END(nmi)
24184+ENDPROC(nmi)
24185
24186 ENTRY(ignore_sysret)
24187 CFI_STARTPROC
24188 mov $-ENOSYS,%eax
24189 sysret
24190 CFI_ENDPROC
24191-END(ignore_sysret)
24192+ENDPROC(ignore_sysret)
24193
24194diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24195index f5d0730..5bce89c 100644
24196--- a/arch/x86/kernel/espfix_64.c
24197+++ b/arch/x86/kernel/espfix_64.c
24198@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24199 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24200 static void *espfix_pages[ESPFIX_MAX_PAGES];
24201
24202-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24203- __aligned(PAGE_SIZE);
24204+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24205
24206 static unsigned int page_random, slot_random;
24207
24208@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24209 void __init init_espfix_bsp(void)
24210 {
24211 pgd_t *pgd_p;
24212+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24213
24214 /* Install the espfix pud into the kernel page directory */
24215- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24216+ pgd_p = &init_level4_pgt[index];
24217 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24218
24219+#ifdef CONFIG_PAX_PER_CPU_PGD
24220+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24221+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24222+#endif
24223+
24224 /* Randomize the locations */
24225 init_espfix_random();
24226
24227@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24228 set_pte(&pte_p[n*PTE_STRIDE], pte);
24229
24230 /* Job is done for this CPU and any CPU which shares this page */
24231- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24232+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24233
24234 unlock_done:
24235 mutex_unlock(&espfix_init_mutex);
24236diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24237index 8b7b0a5..2395f29 100644
24238--- a/arch/x86/kernel/ftrace.c
24239+++ b/arch/x86/kernel/ftrace.c
24240@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24241 * kernel identity mapping to modify code.
24242 */
24243 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24244- ip = (unsigned long)__va(__pa_symbol(ip));
24245+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24246
24247 return ip;
24248 }
24249@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24250 {
24251 unsigned char replaced[MCOUNT_INSN_SIZE];
24252
24253+ ip = ktla_ktva(ip);
24254+
24255 /*
24256 * Note: Due to modules and __init, code can
24257 * disappear and change, we need to protect against faulting
24258@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24259 unsigned char old[MCOUNT_INSN_SIZE];
24260 int ret;
24261
24262- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24263+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24264
24265 ftrace_update_func = ip;
24266 /* Make sure the breakpoints see the ftrace_update_func update */
24267@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24268 unsigned char replaced[MCOUNT_INSN_SIZE];
24269 unsigned char brk = BREAKPOINT_INSTRUCTION;
24270
24271- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24272+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24273 return -EFAULT;
24274
24275 /* Make sure it is what we expect it to be */
24276diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24277index c4f8d46..2d63ae2 100644
24278--- a/arch/x86/kernel/head64.c
24279+++ b/arch/x86/kernel/head64.c
24280@@ -68,12 +68,12 @@ again:
24281 pgd = *pgd_p;
24282
24283 /*
24284- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24285- * critical -- __PAGE_OFFSET would point us back into the dynamic
24286+ * The use of __early_va rather than __va here is critical:
24287+ * __va would point us back into the dynamic
24288 * range and we might end up looping forever...
24289 */
24290 if (pgd)
24291- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24292+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24293 else {
24294 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24295 reset_early_page_tables();
24296@@ -83,13 +83,13 @@ again:
24297 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24298 for (i = 0; i < PTRS_PER_PUD; i++)
24299 pud_p[i] = 0;
24300- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24301+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24302 }
24303 pud_p += pud_index(address);
24304 pud = *pud_p;
24305
24306 if (pud)
24307- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24308+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24309 else {
24310 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24311 reset_early_page_tables();
24312@@ -99,7 +99,7 @@ again:
24313 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24314 for (i = 0; i < PTRS_PER_PMD; i++)
24315 pmd_p[i] = 0;
24316- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24317+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24318 }
24319 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24320 pmd_p[pmd_index(address)] = pmd;
24321@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24322 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24323 early_printk("Kernel alive\n");
24324
24325- clear_page(init_level4_pgt);
24326 /* set init_level4_pgt kernel high mapping*/
24327 init_level4_pgt[511] = early_level4_pgt[511];
24328
24329diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24330index f36bd42..0ab4474 100644
24331--- a/arch/x86/kernel/head_32.S
24332+++ b/arch/x86/kernel/head_32.S
24333@@ -26,6 +26,12 @@
24334 /* Physical address */
24335 #define pa(X) ((X) - __PAGE_OFFSET)
24336
24337+#ifdef CONFIG_PAX_KERNEXEC
24338+#define ta(X) (X)
24339+#else
24340+#define ta(X) ((X) - __PAGE_OFFSET)
24341+#endif
24342+
24343 /*
24344 * References to members of the new_cpu_data structure.
24345 */
24346@@ -55,11 +61,7 @@
24347 * and small than max_low_pfn, otherwise will waste some page table entries
24348 */
24349
24350-#if PTRS_PER_PMD > 1
24351-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24352-#else
24353-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24354-#endif
24355+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24356
24357 /* Number of possible pages in the lowmem region */
24358 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24359@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24360 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24361
24362 /*
24363+ * Real beginning of normal "text" segment
24364+ */
24365+ENTRY(stext)
24366+ENTRY(_stext)
24367+
24368+/*
24369 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24370 * %esi points to the real-mode code as a 32-bit pointer.
24371 * CS and DS must be 4 GB flat segments, but we don't depend on
24372@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24373 * can.
24374 */
24375 __HEAD
24376+
24377+#ifdef CONFIG_PAX_KERNEXEC
24378+ jmp startup_32
24379+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24380+.fill PAGE_SIZE-5,1,0xcc
24381+#endif
24382+
24383 ENTRY(startup_32)
24384 movl pa(stack_start),%ecx
24385
24386@@ -106,6 +121,59 @@ ENTRY(startup_32)
24387 2:
24388 leal -__PAGE_OFFSET(%ecx),%esp
24389
24390+#ifdef CONFIG_SMP
24391+ movl $pa(cpu_gdt_table),%edi
24392+ movl $__per_cpu_load,%eax
24393+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24394+ rorl $16,%eax
24395+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24396+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24397+ movl $__per_cpu_end - 1,%eax
24398+ subl $__per_cpu_start,%eax
24399+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24400+#endif
24401+
24402+#ifdef CONFIG_PAX_MEMORY_UDEREF
24403+ movl $NR_CPUS,%ecx
24404+ movl $pa(cpu_gdt_table),%edi
24405+1:
24406+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24407+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24408+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24409+ addl $PAGE_SIZE_asm,%edi
24410+ loop 1b
24411+#endif
24412+
24413+#ifdef CONFIG_PAX_KERNEXEC
24414+ movl $pa(boot_gdt),%edi
24415+ movl $__LOAD_PHYSICAL_ADDR,%eax
24416+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24417+ rorl $16,%eax
24418+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24419+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24420+ rorl $16,%eax
24421+
24422+ ljmp $(__BOOT_CS),$1f
24423+1:
24424+
24425+ movl $NR_CPUS,%ecx
24426+ movl $pa(cpu_gdt_table),%edi
24427+ addl $__PAGE_OFFSET,%eax
24428+1:
24429+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24430+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24431+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24432+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24433+ rorl $16,%eax
24434+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24435+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24436+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24437+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24438+ rorl $16,%eax
24439+ addl $PAGE_SIZE_asm,%edi
24440+ loop 1b
24441+#endif
24442+
24443 /*
24444 * Clear BSS first so that there are no surprises...
24445 */
24446@@ -201,8 +269,11 @@ ENTRY(startup_32)
24447 movl %eax, pa(max_pfn_mapped)
24448
24449 /* Do early initialization of the fixmap area */
24450- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24451- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24452+#ifdef CONFIG_COMPAT_VDSO
24453+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24454+#else
24455+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24456+#endif
24457 #else /* Not PAE */
24458
24459 page_pde_offset = (__PAGE_OFFSET >> 20);
24460@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24461 movl %eax, pa(max_pfn_mapped)
24462
24463 /* Do early initialization of the fixmap area */
24464- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24465- movl %eax,pa(initial_page_table+0xffc)
24466+#ifdef CONFIG_COMPAT_VDSO
24467+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24468+#else
24469+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24470+#endif
24471 #endif
24472
24473 #ifdef CONFIG_PARAVIRT
24474@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24475 cmpl $num_subarch_entries, %eax
24476 jae bad_subarch
24477
24478- movl pa(subarch_entries)(,%eax,4), %eax
24479- subl $__PAGE_OFFSET, %eax
24480- jmp *%eax
24481+ jmp *pa(subarch_entries)(,%eax,4)
24482
24483 bad_subarch:
24484 WEAK(lguest_entry)
24485@@ -261,10 +333,10 @@ WEAK(xen_entry)
24486 __INITDATA
24487
24488 subarch_entries:
24489- .long default_entry /* normal x86/PC */
24490- .long lguest_entry /* lguest hypervisor */
24491- .long xen_entry /* Xen hypervisor */
24492- .long default_entry /* Moorestown MID */
24493+ .long ta(default_entry) /* normal x86/PC */
24494+ .long ta(lguest_entry) /* lguest hypervisor */
24495+ .long ta(xen_entry) /* Xen hypervisor */
24496+ .long ta(default_entry) /* Moorestown MID */
24497 num_subarch_entries = (. - subarch_entries) / 4
24498 .previous
24499 #else
24500@@ -354,6 +426,7 @@ default_entry:
24501 movl pa(mmu_cr4_features),%eax
24502 movl %eax,%cr4
24503
24504+#ifdef CONFIG_X86_PAE
24505 testb $X86_CR4_PAE, %al # check if PAE is enabled
24506 jz enable_paging
24507
24508@@ -382,6 +455,9 @@ default_entry:
24509 /* Make changes effective */
24510 wrmsr
24511
24512+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24513+#endif
24514+
24515 enable_paging:
24516
24517 /*
24518@@ -449,14 +525,20 @@ is486:
24519 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24520 movl %eax,%ss # after changing gdt.
24521
24522- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24523+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24524 movl %eax,%ds
24525 movl %eax,%es
24526
24527 movl $(__KERNEL_PERCPU), %eax
24528 movl %eax,%fs # set this cpu's percpu
24529
24530+#ifdef CONFIG_CC_STACKPROTECTOR
24531 movl $(__KERNEL_STACK_CANARY),%eax
24532+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24533+ movl $(__USER_DS),%eax
24534+#else
24535+ xorl %eax,%eax
24536+#endif
24537 movl %eax,%gs
24538
24539 xorl %eax,%eax # Clear LDT
24540@@ -512,8 +594,11 @@ setup_once:
24541 * relocation. Manually set base address in stack canary
24542 * segment descriptor.
24543 */
24544- movl $gdt_page,%eax
24545+ movl $cpu_gdt_table,%eax
24546 movl $stack_canary,%ecx
24547+#ifdef CONFIG_SMP
24548+ addl $__per_cpu_load,%ecx
24549+#endif
24550 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24551 shrl $16, %ecx
24552 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24553@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24554 cmpl $2,(%esp) # X86_TRAP_NMI
24555 je is_nmi # Ignore NMI
24556
24557- cmpl $2,%ss:early_recursion_flag
24558+ cmpl $1,%ss:early_recursion_flag
24559 je hlt_loop
24560 incl %ss:early_recursion_flag
24561
24562@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24563 pushl (20+6*4)(%esp) /* trapno */
24564 pushl $fault_msg
24565 call printk
24566-#endif
24567 call dump_stack
24568+#endif
24569 hlt_loop:
24570 hlt
24571 jmp hlt_loop
24572@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24573 /* This is the default interrupt "handler" :-) */
24574 ALIGN
24575 ignore_int:
24576- cld
24577 #ifdef CONFIG_PRINTK
24578+ cmpl $2,%ss:early_recursion_flag
24579+ je hlt_loop
24580+ incl %ss:early_recursion_flag
24581+ cld
24582 pushl %eax
24583 pushl %ecx
24584 pushl %edx
24585@@ -617,9 +705,6 @@ ignore_int:
24586 movl $(__KERNEL_DS),%eax
24587 movl %eax,%ds
24588 movl %eax,%es
24589- cmpl $2,early_recursion_flag
24590- je hlt_loop
24591- incl early_recursion_flag
24592 pushl 16(%esp)
24593 pushl 24(%esp)
24594 pushl 32(%esp)
24595@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24596 /*
24597 * BSS section
24598 */
24599-__PAGE_ALIGNED_BSS
24600- .align PAGE_SIZE
24601 #ifdef CONFIG_X86_PAE
24602+.section .initial_pg_pmd,"a",@progbits
24603 initial_pg_pmd:
24604 .fill 1024*KPMDS,4,0
24605 #else
24606+.section .initial_page_table,"a",@progbits
24607 ENTRY(initial_page_table)
24608 .fill 1024,4,0
24609 #endif
24610+.section .initial_pg_fixmap,"a",@progbits
24611 initial_pg_fixmap:
24612 .fill 1024,4,0
24613+.section .empty_zero_page,"a",@progbits
24614 ENTRY(empty_zero_page)
24615 .fill 4096,1,0
24616+.section .swapper_pg_dir,"a",@progbits
24617 ENTRY(swapper_pg_dir)
24618+#ifdef CONFIG_X86_PAE
24619+ .fill 4,8,0
24620+#else
24621 .fill 1024,4,0
24622+#endif
24623
24624 /*
24625 * This starts the data section.
24626 */
24627 #ifdef CONFIG_X86_PAE
24628-__PAGE_ALIGNED_DATA
24629- /* Page-aligned for the benefit of paravirt? */
24630- .align PAGE_SIZE
24631+.section .initial_page_table,"a",@progbits
24632 ENTRY(initial_page_table)
24633 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24634 # if KPMDS == 3
24635@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24636 # error "Kernel PMDs should be 1, 2 or 3"
24637 # endif
24638 .align PAGE_SIZE /* needs to be page-sized too */
24639+
24640+#ifdef CONFIG_PAX_PER_CPU_PGD
24641+ENTRY(cpu_pgd)
24642+ .rept 2*NR_CPUS
24643+ .fill 4,8,0
24644+ .endr
24645+#endif
24646+
24647 #endif
24648
24649 .data
24650 .balign 4
24651 ENTRY(stack_start)
24652- .long init_thread_union+THREAD_SIZE
24653+ .long init_thread_union+THREAD_SIZE-8
24654
24655 __INITRODATA
24656 int_msg:
24657@@ -727,7 +825,7 @@ fault_msg:
24658 * segment size, and 32-bit linear address value:
24659 */
24660
24661- .data
24662+.section .rodata,"a",@progbits
24663 .globl boot_gdt_descr
24664 .globl idt_descr
24665
24666@@ -736,7 +834,7 @@ fault_msg:
24667 .word 0 # 32 bit align gdt_desc.address
24668 boot_gdt_descr:
24669 .word __BOOT_DS+7
24670- .long boot_gdt - __PAGE_OFFSET
24671+ .long pa(boot_gdt)
24672
24673 .word 0 # 32-bit align idt_desc.address
24674 idt_descr:
24675@@ -747,7 +845,7 @@ idt_descr:
24676 .word 0 # 32 bit align gdt_desc.address
24677 ENTRY(early_gdt_descr)
24678 .word GDT_ENTRIES*8-1
24679- .long gdt_page /* Overwritten for secondary CPUs */
24680+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24681
24682 /*
24683 * The boot_gdt must mirror the equivalent in setup.S and is
24684@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24685 .align L1_CACHE_BYTES
24686 ENTRY(boot_gdt)
24687 .fill GDT_ENTRY_BOOT_CS,8,0
24688- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24689- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24690+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24691+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24692+
24693+ .align PAGE_SIZE_asm
24694+ENTRY(cpu_gdt_table)
24695+ .rept NR_CPUS
24696+ .quad 0x0000000000000000 /* NULL descriptor */
24697+ .quad 0x0000000000000000 /* 0x0b reserved */
24698+ .quad 0x0000000000000000 /* 0x13 reserved */
24699+ .quad 0x0000000000000000 /* 0x1b reserved */
24700+
24701+#ifdef CONFIG_PAX_KERNEXEC
24702+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24703+#else
24704+ .quad 0x0000000000000000 /* 0x20 unused */
24705+#endif
24706+
24707+ .quad 0x0000000000000000 /* 0x28 unused */
24708+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24709+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24710+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24711+ .quad 0x0000000000000000 /* 0x4b reserved */
24712+ .quad 0x0000000000000000 /* 0x53 reserved */
24713+ .quad 0x0000000000000000 /* 0x5b reserved */
24714+
24715+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24716+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24717+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24718+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24719+
24720+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24721+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24722+
24723+ /*
24724+ * Segments used for calling PnP BIOS have byte granularity.
24725+ * The code segments and data segments have fixed 64k limits,
24726+ * the transfer segment sizes are set at run time.
24727+ */
24728+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24729+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24730+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24731+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24732+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24733+
24734+ /*
24735+ * The APM segments have byte granularity and their bases
24736+ * are set at run time. All have 64k limits.
24737+ */
24738+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24739+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24740+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24741+
24742+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24743+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24744+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24745+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24746+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24747+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24748+
24749+ /* Be sure this is zeroed to avoid false validations in Xen */
24750+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24751+ .endr
24752diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24753index 6fd514d9..c4221b8 100644
24754--- a/arch/x86/kernel/head_64.S
24755+++ b/arch/x86/kernel/head_64.S
24756@@ -20,6 +20,8 @@
24757 #include <asm/processor-flags.h>
24758 #include <asm/percpu.h>
24759 #include <asm/nops.h>
24760+#include <asm/cpufeature.h>
24761+#include <asm/alternative-asm.h>
24762
24763 #ifdef CONFIG_PARAVIRT
24764 #include <asm/asm-offsets.h>
24765@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24766 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24767 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24768 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24769+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24770+L3_VMALLOC_START = pud_index(VMALLOC_START)
24771+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24772+L3_VMALLOC_END = pud_index(VMALLOC_END)
24773+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24774+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24775
24776 .text
24777 __HEAD
24778@@ -89,11 +97,24 @@ startup_64:
24779 * Fixup the physical addresses in the page table
24780 */
24781 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24782+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24783+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24784+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24787
24788- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24789- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24790+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24791+#ifndef CONFIG_XEN
24792+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24793+#endif
24794+
24795+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24796+
24797+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24798+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24799
24800 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24801+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24802
24803 /*
24804 * Set up the identity mapping for the switchover. These
24805@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24806 * after the boot processor executes this code.
24807 */
24808
24809+ orq $-1, %rbp
24810 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24811 1:
24812
24813- /* Enable PAE mode and PGE */
24814- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24815+ /* Enable PAE mode and PSE/PGE */
24816+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24817 movq %rcx, %cr4
24818
24819 /* Setup early boot stage 4 level pagetables. */
24820@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24821 movl $MSR_EFER, %ecx
24822 rdmsr
24823 btsl $_EFER_SCE, %eax /* Enable System Call */
24824- btl $20,%edi /* No Execute supported? */
24825+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24826 jnc 1f
24827 btsl $_EFER_NX, %eax
24828+ cmpq $-1, %rbp
24829+ je 1f
24830 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24831+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24832+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24833+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24834+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24835+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24836+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24837+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24838 1: wrmsr /* Make changes effective */
24839
24840 /* Setup cr0 */
24841@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24842 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24843 * address given in m16:64.
24844 */
24845+ pax_set_fptr_mask
24846 movq initial_code(%rip),%rax
24847 pushq $0 # fake return address to stop unwinder
24848 pushq $__KERNEL_CS # set correct cs
24849@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24850 .quad INIT_PER_CPU_VAR(irq_stack_union)
24851
24852 GLOBAL(stack_start)
24853- .quad init_thread_union+THREAD_SIZE-8
24854+ .quad init_thread_union+THREAD_SIZE-16
24855 .word 0
24856 __FINITDATA
24857
24858@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24859 call dump_stack
24860 #ifdef CONFIG_KALLSYMS
24861 leaq early_idt_ripmsg(%rip),%rdi
24862- movq 40(%rsp),%rsi # %rip again
24863+ movq 88(%rsp),%rsi # %rip again
24864 call __print_symbol
24865 #endif
24866 #endif /* EARLY_PRINTK */
24867@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24868 early_recursion_flag:
24869 .long 0
24870
24871+ .section .rodata,"a",@progbits
24872 #ifdef CONFIG_EARLY_PRINTK
24873 early_idt_msg:
24874 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24875@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24876 NEXT_PAGE(early_dynamic_pgts)
24877 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24878
24879- .data
24880+ .section .rodata,"a",@progbits
24881
24882-#ifndef CONFIG_XEN
24883 NEXT_PAGE(init_level4_pgt)
24884- .fill 512,8,0
24885-#else
24886-NEXT_PAGE(init_level4_pgt)
24887- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24888 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24889 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24890+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24891+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24892+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24893+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24894+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24895+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24896 .org init_level4_pgt + L4_START_KERNEL*8, 0
24897 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24898 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24899
24900+#ifdef CONFIG_PAX_PER_CPU_PGD
24901+NEXT_PAGE(cpu_pgd)
24902+ .rept 2*NR_CPUS
24903+ .fill 512,8,0
24904+ .endr
24905+#endif
24906+
24907 NEXT_PAGE(level3_ident_pgt)
24908 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24909+#ifdef CONFIG_XEN
24910 .fill 511, 8, 0
24911+#else
24912+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24913+ .fill 510,8,0
24914+#endif
24915+
24916+NEXT_PAGE(level3_vmalloc_start_pgt)
24917+ .fill 512,8,0
24918+
24919+NEXT_PAGE(level3_vmalloc_end_pgt)
24920+ .fill 512,8,0
24921+
24922+NEXT_PAGE(level3_vmemmap_pgt)
24923+ .fill L3_VMEMMAP_START,8,0
24924+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24925+
24926 NEXT_PAGE(level2_ident_pgt)
24927- /* Since I easily can, map the first 1G.
24928+ /* Since I easily can, map the first 2G.
24929 * Don't set NX because code runs from these pages.
24930 */
24931- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24932-#endif
24933+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24934
24935 NEXT_PAGE(level3_kernel_pgt)
24936 .fill L3_START_KERNEL,8,0
24937@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
24938 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24939 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24940
24941+NEXT_PAGE(level2_vmemmap_pgt)
24942+ .fill 512,8,0
24943+
24944 NEXT_PAGE(level2_kernel_pgt)
24945 /*
24946 * 512 MB kernel mapping. We spend a full page on this pagetable
24947@@ -494,21 +553,57 @@ NEXT_PAGE(level2_kernel_pgt)
24948 NEXT_PAGE(level2_fixmap_pgt)
24949 .fill 506,8,0
24950 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24951- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24952- .fill 5,8,0
24953+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24954+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24955+ .fill 4,8,0
24956
24957 NEXT_PAGE(level1_fixmap_pgt)
24958 .fill 512,8,0
24959
24960+NEXT_PAGE(level1_vsyscall_pgt)
24961+ .fill 512,8,0
24962+
24963 #undef PMDS
24964
24965- .data
24966+ .align PAGE_SIZE
24967+ENTRY(cpu_gdt_table)
24968+ .rept NR_CPUS
24969+ .quad 0x0000000000000000 /* NULL descriptor */
24970+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24971+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24972+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24973+ .quad 0x00cffb000000ffff /* __USER32_CS */
24974+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24975+ .quad 0x00affb000000ffff /* __USER_CS */
24976+
24977+#ifdef CONFIG_PAX_KERNEXEC
24978+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24979+#else
24980+ .quad 0x0 /* unused */
24981+#endif
24982+
24983+ .quad 0,0 /* TSS */
24984+ .quad 0,0 /* LDT */
24985+ .quad 0,0,0 /* three TLS descriptors */
24986+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24987+ /* asm/segment.h:GDT_ENTRIES must match this */
24988+
24989+#ifdef CONFIG_PAX_MEMORY_UDEREF
24990+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24991+#else
24992+ .quad 0x0 /* unused */
24993+#endif
24994+
24995+ /* zero the remaining page */
24996+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24997+ .endr
24998+
24999 .align 16
25000 .globl early_gdt_descr
25001 early_gdt_descr:
25002 .word GDT_ENTRIES*8-1
25003 early_gdt_descr_base:
25004- .quad INIT_PER_CPU_VAR(gdt_page)
25005+ .quad cpu_gdt_table
25006
25007 ENTRY(phys_base)
25008 /* This must match the first entry in level2_kernel_pgt */
25009@@ -532,8 +627,8 @@ NEXT_PAGE(kasan_zero_pud)
25010
25011
25012 #include "../../x86/xen/xen-head.S"
25013-
25014- __PAGE_ALIGNED_BSS
25015+
25016+ .section .rodata,"a",@progbits
25017 NEXT_PAGE(empty_zero_page)
25018 .skip PAGE_SIZE
25019
25020diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25021index 05fd74f..c3548b1 100644
25022--- a/arch/x86/kernel/i386_ksyms_32.c
25023+++ b/arch/x86/kernel/i386_ksyms_32.c
25024@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25025 EXPORT_SYMBOL(cmpxchg8b_emu);
25026 #endif
25027
25028+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25029+
25030 /* Networking helper routines. */
25031 EXPORT_SYMBOL(csum_partial_copy_generic);
25032+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25033+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25034
25035 EXPORT_SYMBOL(__get_user_1);
25036 EXPORT_SYMBOL(__get_user_2);
25037@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25038 EXPORT_SYMBOL(___preempt_schedule_context);
25039 #endif
25040 #endif
25041+
25042+#ifdef CONFIG_PAX_KERNEXEC
25043+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25044+#endif
25045+
25046+#ifdef CONFIG_PAX_PER_CPU_PGD
25047+EXPORT_SYMBOL(cpu_pgd);
25048+#endif
25049diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25050index d5651fc..29c740d 100644
25051--- a/arch/x86/kernel/i387.c
25052+++ b/arch/x86/kernel/i387.c
25053@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25054 static inline bool interrupted_user_mode(void)
25055 {
25056 struct pt_regs *regs = get_irq_regs();
25057- return regs && user_mode_vm(regs);
25058+ return regs && user_mode(regs);
25059 }
25060
25061 /*
25062diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25063index e7cc537..67d7372 100644
25064--- a/arch/x86/kernel/i8259.c
25065+++ b/arch/x86/kernel/i8259.c
25066@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25067 static void make_8259A_irq(unsigned int irq)
25068 {
25069 disable_irq_nosync(irq);
25070- io_apic_irqs &= ~(1<<irq);
25071+ io_apic_irqs &= ~(1UL<<irq);
25072 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25073 enable_irq(irq);
25074 }
25075@@ -208,7 +208,7 @@ spurious_8259A_irq:
25076 "spurious 8259A interrupt: IRQ%d.\n", irq);
25077 spurious_irq_mask |= irqmask;
25078 }
25079- atomic_inc(&irq_err_count);
25080+ atomic_inc_unchecked(&irq_err_count);
25081 /*
25082 * Theoretically we do not have to handle this IRQ,
25083 * but in Linux this does not cause problems and is
25084@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25085 /* (slave's support for AEOI in flat mode is to be investigated) */
25086 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25087
25088+ pax_open_kernel();
25089 if (auto_eoi)
25090 /*
25091 * In AEOI mode we just have to mask the interrupt
25092 * when acking.
25093 */
25094- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25095+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25096 else
25097- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25098+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25099+ pax_close_kernel();
25100
25101 udelay(100); /* wait for 8259A to initialize */
25102
25103diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25104index a979b5b..1d6db75 100644
25105--- a/arch/x86/kernel/io_delay.c
25106+++ b/arch/x86/kernel/io_delay.c
25107@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25108 * Quirk table for systems that misbehave (lock up, etc.) if port
25109 * 0x80 is used:
25110 */
25111-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25112+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25113 {
25114 .callback = dmi_io_delay_0xed_port,
25115 .ident = "Compaq Presario V6000",
25116diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25117index 4ddaf66..49d5c18 100644
25118--- a/arch/x86/kernel/ioport.c
25119+++ b/arch/x86/kernel/ioport.c
25120@@ -6,6 +6,7 @@
25121 #include <linux/sched.h>
25122 #include <linux/kernel.h>
25123 #include <linux/capability.h>
25124+#include <linux/security.h>
25125 #include <linux/errno.h>
25126 #include <linux/types.h>
25127 #include <linux/ioport.h>
25128@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25129 return -EINVAL;
25130 if (turn_on && !capable(CAP_SYS_RAWIO))
25131 return -EPERM;
25132+#ifdef CONFIG_GRKERNSEC_IO
25133+ if (turn_on && grsec_disable_privio) {
25134+ gr_handle_ioperm();
25135+ return -ENODEV;
25136+ }
25137+#endif
25138
25139 /*
25140 * If it's the first ioperm() call in this thread's lifetime, set the
25141@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25142 * because the ->io_bitmap_max value must match the bitmap
25143 * contents:
25144 */
25145- tss = &per_cpu(init_tss, get_cpu());
25146+ tss = init_tss + get_cpu();
25147
25148 if (turn_on)
25149 bitmap_clear(t->io_bitmap_ptr, from, num);
25150@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25151 if (level > old) {
25152 if (!capable(CAP_SYS_RAWIO))
25153 return -EPERM;
25154+#ifdef CONFIG_GRKERNSEC_IO
25155+ if (grsec_disable_privio) {
25156+ gr_handle_iopl();
25157+ return -ENODEV;
25158+ }
25159+#endif
25160 }
25161 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25162 t->iopl = level << 12;
25163diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25164index 67b1cbe..6ad4cbc 100644
25165--- a/arch/x86/kernel/irq.c
25166+++ b/arch/x86/kernel/irq.c
25167@@ -22,7 +22,7 @@
25168 #define CREATE_TRACE_POINTS
25169 #include <asm/trace/irq_vectors.h>
25170
25171-atomic_t irq_err_count;
25172+atomic_unchecked_t irq_err_count;
25173
25174 /* Function pointer for generic interrupt vector handling */
25175 void (*x86_platform_ipi_callback)(void) = NULL;
25176@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25177 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25178 seq_puts(p, " Hypervisor callback interrupts\n");
25179 #endif
25180- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25181+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25182 #if defined(CONFIG_X86_IO_APIC)
25183- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25184+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25185 #endif
25186 return 0;
25187 }
25188@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25189
25190 u64 arch_irq_stat(void)
25191 {
25192- u64 sum = atomic_read(&irq_err_count);
25193+ u64 sum = atomic_read_unchecked(&irq_err_count);
25194 return sum;
25195 }
25196
25197diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25198index 28d28f5..e6cc9ae 100644
25199--- a/arch/x86/kernel/irq_32.c
25200+++ b/arch/x86/kernel/irq_32.c
25201@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25202
25203 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25204
25205+extern void gr_handle_kernel_exploit(void);
25206+
25207 int sysctl_panic_on_stackoverflow __read_mostly;
25208
25209 /* Debugging check for stack overflow: is there less than 1KB free? */
25210@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25211 __asm__ __volatile__("andl %%esp,%0" :
25212 "=r" (sp) : "0" (THREAD_SIZE - 1));
25213
25214- return sp < (sizeof(struct thread_info) + STACK_WARN);
25215+ return sp < STACK_WARN;
25216 }
25217
25218 static void print_stack_overflow(void)
25219 {
25220 printk(KERN_WARNING "low stack detected by irq handler\n");
25221 dump_stack();
25222+ gr_handle_kernel_exploit();
25223 if (sysctl_panic_on_stackoverflow)
25224 panic("low stack detected by irq handler - check messages\n");
25225 }
25226@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25227 static inline int
25228 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25229 {
25230- struct irq_stack *curstk, *irqstk;
25231+ struct irq_stack *irqstk;
25232 u32 *isp, *prev_esp, arg1, arg2;
25233
25234- curstk = (struct irq_stack *) current_stack();
25235 irqstk = __this_cpu_read(hardirq_stack);
25236
25237 /*
25238@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25239 * handler) we can't do that and just have to keep using the
25240 * current stack (which is the irq stack already after all)
25241 */
25242- if (unlikely(curstk == irqstk))
25243+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25244 return 0;
25245
25246- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25247+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25248
25249 /* Save the next esp at the bottom of the stack */
25250 prev_esp = (u32 *)irqstk;
25251 *prev_esp = current_stack_pointer();
25252
25253+#ifdef CONFIG_PAX_MEMORY_UDEREF
25254+ __set_fs(MAKE_MM_SEG(0));
25255+#endif
25256+
25257 if (unlikely(overflow))
25258 call_on_stack(print_stack_overflow, isp);
25259
25260@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25261 : "0" (irq), "1" (desc), "2" (isp),
25262 "D" (desc->handle_irq)
25263 : "memory", "cc", "ecx");
25264+
25265+#ifdef CONFIG_PAX_MEMORY_UDEREF
25266+ __set_fs(current_thread_info()->addr_limit);
25267+#endif
25268+
25269 return 1;
25270 }
25271
25272@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25273 */
25274 void irq_ctx_init(int cpu)
25275 {
25276- struct irq_stack *irqstk;
25277-
25278 if (per_cpu(hardirq_stack, cpu))
25279 return;
25280
25281- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25282- THREADINFO_GFP,
25283- THREAD_SIZE_ORDER));
25284- per_cpu(hardirq_stack, cpu) = irqstk;
25285-
25286- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25287- THREADINFO_GFP,
25288- THREAD_SIZE_ORDER));
25289- per_cpu(softirq_stack, cpu) = irqstk;
25290-
25291- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25292- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25293+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25294+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25295 }
25296
25297 void do_softirq_own_stack(void)
25298 {
25299- struct thread_info *curstk;
25300 struct irq_stack *irqstk;
25301 u32 *isp, *prev_esp;
25302
25303- curstk = current_stack();
25304 irqstk = __this_cpu_read(softirq_stack);
25305
25306 /* build the stack frame on the softirq stack */
25307@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25308 prev_esp = (u32 *)irqstk;
25309 *prev_esp = current_stack_pointer();
25310
25311+#ifdef CONFIG_PAX_MEMORY_UDEREF
25312+ __set_fs(MAKE_MM_SEG(0));
25313+#endif
25314+
25315 call_on_stack(__do_softirq, isp);
25316+
25317+#ifdef CONFIG_PAX_MEMORY_UDEREF
25318+ __set_fs(current_thread_info()->addr_limit);
25319+#endif
25320+
25321 }
25322
25323 bool handle_irq(unsigned irq, struct pt_regs *regs)
25324@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25325 if (unlikely(!desc))
25326 return false;
25327
25328- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25329+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25330 if (unlikely(overflow))
25331 print_stack_overflow();
25332 desc->handle_irq(irq, desc);
25333diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25334index e4b503d..824fce8 100644
25335--- a/arch/x86/kernel/irq_64.c
25336+++ b/arch/x86/kernel/irq_64.c
25337@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25338 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25339 EXPORT_PER_CPU_SYMBOL(irq_regs);
25340
25341+extern void gr_handle_kernel_exploit(void);
25342+
25343 int sysctl_panic_on_stackoverflow;
25344
25345 /*
25346@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25347 u64 estack_top, estack_bottom;
25348 u64 curbase = (u64)task_stack_page(current);
25349
25350- if (user_mode_vm(regs))
25351+ if (user_mode(regs))
25352 return;
25353
25354 if (regs->sp >= curbase + sizeof(struct thread_info) +
25355@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25356 irq_stack_top, irq_stack_bottom,
25357 estack_top, estack_bottom);
25358
25359+ gr_handle_kernel_exploit();
25360+
25361 if (sysctl_panic_on_stackoverflow)
25362 panic("low stack detected by irq handler - check messages\n");
25363 #endif
25364diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25365index 26d5a55..a01160a 100644
25366--- a/arch/x86/kernel/jump_label.c
25367+++ b/arch/x86/kernel/jump_label.c
25368@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25369 * Jump label is enabled for the first time.
25370 * So we expect a default_nop...
25371 */
25372- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25373+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25374 != 0))
25375 bug_at((void *)entry->code, __LINE__);
25376 } else {
25377@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25378 * ...otherwise expect an ideal_nop. Otherwise
25379 * something went horribly wrong.
25380 */
25381- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25382+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25383 != 0))
25384 bug_at((void *)entry->code, __LINE__);
25385 }
25386@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25387 * are converting the default nop to the ideal nop.
25388 */
25389 if (init) {
25390- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25391+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25392 bug_at((void *)entry->code, __LINE__);
25393 } else {
25394 code.jump = 0xe9;
25395 code.offset = entry->target -
25396 (entry->code + JUMP_LABEL_NOP_SIZE);
25397- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25398+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25399 bug_at((void *)entry->code, __LINE__);
25400 }
25401 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25402diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25403index 25ecd56..e12482f 100644
25404--- a/arch/x86/kernel/kgdb.c
25405+++ b/arch/x86/kernel/kgdb.c
25406@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25407 #ifdef CONFIG_X86_32
25408 switch (regno) {
25409 case GDB_SS:
25410- if (!user_mode_vm(regs))
25411+ if (!user_mode(regs))
25412 *(unsigned long *)mem = __KERNEL_DS;
25413 break;
25414 case GDB_SP:
25415- if (!user_mode_vm(regs))
25416+ if (!user_mode(regs))
25417 *(unsigned long *)mem = kernel_stack_pointer(regs);
25418 break;
25419 case GDB_GS:
25420@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25421 bp->attr.bp_addr = breakinfo[breakno].addr;
25422 bp->attr.bp_len = breakinfo[breakno].len;
25423 bp->attr.bp_type = breakinfo[breakno].type;
25424- info->address = breakinfo[breakno].addr;
25425+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25426+ info->address = ktla_ktva(breakinfo[breakno].addr);
25427+ else
25428+ info->address = breakinfo[breakno].addr;
25429 info->len = breakinfo[breakno].len;
25430 info->type = breakinfo[breakno].type;
25431 val = arch_install_hw_breakpoint(bp);
25432@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25433 case 'k':
25434 /* clear the trace bit */
25435 linux_regs->flags &= ~X86_EFLAGS_TF;
25436- atomic_set(&kgdb_cpu_doing_single_step, -1);
25437+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25438
25439 /* set the trace bit if we're stepping */
25440 if (remcomInBuffer[0] == 's') {
25441 linux_regs->flags |= X86_EFLAGS_TF;
25442- atomic_set(&kgdb_cpu_doing_single_step,
25443+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25444 raw_smp_processor_id());
25445 }
25446
25447@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25448
25449 switch (cmd) {
25450 case DIE_DEBUG:
25451- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25452+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25453 if (user_mode(regs))
25454 return single_step_cont(regs, args);
25455 break;
25456@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25457 #endif /* CONFIG_DEBUG_RODATA */
25458
25459 bpt->type = BP_BREAKPOINT;
25460- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25461+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25462 BREAK_INSTR_SIZE);
25463 if (err)
25464 return err;
25465- err = probe_kernel_write((char *)bpt->bpt_addr,
25466+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25467 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25468 #ifdef CONFIG_DEBUG_RODATA
25469 if (!err)
25470@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25471 return -EBUSY;
25472 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25473 BREAK_INSTR_SIZE);
25474- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25475+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25476 if (err)
25477 return err;
25478 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25479@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25480 if (mutex_is_locked(&text_mutex))
25481 goto knl_write;
25482 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25483- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25484+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25485 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25486 goto knl_write;
25487 return err;
25488 knl_write:
25489 #endif /* CONFIG_DEBUG_RODATA */
25490- return probe_kernel_write((char *)bpt->bpt_addr,
25491+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25492 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25493 }
25494
25495diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25496index 4e3d5a9..03fffd8 100644
25497--- a/arch/x86/kernel/kprobes/core.c
25498+++ b/arch/x86/kernel/kprobes/core.c
25499@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25500 s32 raddr;
25501 } __packed *insn;
25502
25503- insn = (struct __arch_relative_insn *)from;
25504+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25505+
25506+ pax_open_kernel();
25507 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25508 insn->op = op;
25509+ pax_close_kernel();
25510 }
25511
25512 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25513@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25514 kprobe_opcode_t opcode;
25515 kprobe_opcode_t *orig_opcodes = opcodes;
25516
25517- if (search_exception_tables((unsigned long)opcodes))
25518+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25519 return 0; /* Page fault may occur on this address. */
25520
25521 retry:
25522@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25523 * Fortunately, we know that the original code is the ideal 5-byte
25524 * long NOP.
25525 */
25526- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25527+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25528 if (faddr)
25529 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25530 else
25531 buf[0] = kp->opcode;
25532- return (unsigned long)buf;
25533+ return ktva_ktla((unsigned long)buf);
25534 }
25535
25536 /*
25537@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25538 /* Another subsystem puts a breakpoint, failed to recover */
25539 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25540 return 0;
25541+ pax_open_kernel();
25542 memcpy(dest, insn.kaddr, insn.length);
25543+ pax_close_kernel();
25544
25545 #ifdef CONFIG_X86_64
25546 if (insn_rip_relative(&insn)) {
25547@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25548 return 0;
25549 }
25550 disp = (u8 *) dest + insn_offset_displacement(&insn);
25551+ pax_open_kernel();
25552 *(s32 *) disp = (s32) newdisp;
25553+ pax_close_kernel();
25554 }
25555 #endif
25556 return insn.length;
25557@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25558 * nor set current_kprobe, because it doesn't use single
25559 * stepping.
25560 */
25561- regs->ip = (unsigned long)p->ainsn.insn;
25562+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25563 preempt_enable_no_resched();
25564 return;
25565 }
25566@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25567 regs->flags &= ~X86_EFLAGS_IF;
25568 /* single step inline if the instruction is an int3 */
25569 if (p->opcode == BREAKPOINT_INSTRUCTION)
25570- regs->ip = (unsigned long)p->addr;
25571+ regs->ip = ktla_ktva((unsigned long)p->addr);
25572 else
25573- regs->ip = (unsigned long)p->ainsn.insn;
25574+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25575 }
25576 NOKPROBE_SYMBOL(setup_singlestep);
25577
25578@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25579 struct kprobe *p;
25580 struct kprobe_ctlblk *kcb;
25581
25582- if (user_mode_vm(regs))
25583+ if (user_mode(regs))
25584 return 0;
25585
25586 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25587@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25588 setup_singlestep(p, regs, kcb, 0);
25589 return 1;
25590 }
25591- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25592+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25593 /*
25594 * The breakpoint instruction was removed right
25595 * after we hit it. Another cpu has removed
25596@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25597 " movq %rax, 152(%rsp)\n"
25598 RESTORE_REGS_STRING
25599 " popfq\n"
25600+#ifdef KERNEXEC_PLUGIN
25601+ " btsq $63,(%rsp)\n"
25602+#endif
25603 #else
25604 " pushf\n"
25605 SAVE_REGS_STRING
25606@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25607 struct kprobe_ctlblk *kcb)
25608 {
25609 unsigned long *tos = stack_addr(regs);
25610- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25611+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25612 unsigned long orig_ip = (unsigned long)p->addr;
25613 kprobe_opcode_t *insn = p->ainsn.insn;
25614
25615@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25616 struct die_args *args = data;
25617 int ret = NOTIFY_DONE;
25618
25619- if (args->regs && user_mode_vm(args->regs))
25620+ if (args->regs && user_mode(args->regs))
25621 return ret;
25622
25623 if (val == DIE_GPF) {
25624diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25625index 7b3b9d1..e2478b91 100644
25626--- a/arch/x86/kernel/kprobes/opt.c
25627+++ b/arch/x86/kernel/kprobes/opt.c
25628@@ -79,6 +79,7 @@ found:
25629 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25630 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25631 {
25632+ pax_open_kernel();
25633 #ifdef CONFIG_X86_64
25634 *addr++ = 0x48;
25635 *addr++ = 0xbf;
25636@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25637 *addr++ = 0xb8;
25638 #endif
25639 *(unsigned long *)addr = val;
25640+ pax_close_kernel();
25641 }
25642
25643 asm (
25644@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25645 * Verify if the address gap is in 2GB range, because this uses
25646 * a relative jump.
25647 */
25648- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25649+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25650 if (abs(rel) > 0x7fffffff) {
25651 __arch_remove_optimized_kprobe(op, 0);
25652 return -ERANGE;
25653@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25654 op->optinsn.size = ret;
25655
25656 /* Copy arch-dep-instance from template */
25657- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25658+ pax_open_kernel();
25659+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25660+ pax_close_kernel();
25661
25662 /* Set probe information */
25663 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25664
25665 /* Set probe function call */
25666- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25667+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25668
25669 /* Set returning jmp instruction at the tail of out-of-line buffer */
25670- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25671+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25672 (u8 *)op->kp.addr + op->optinsn.size);
25673
25674 flush_icache_range((unsigned long) buf,
25675@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25676 WARN_ON(kprobe_disabled(&op->kp));
25677
25678 /* Backup instructions which will be replaced by jump address */
25679- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25680+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25681 RELATIVE_ADDR_SIZE);
25682
25683 insn_buf[0] = RELATIVEJUMP_OPCODE;
25684@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25685 /* This kprobe is really able to run optimized path. */
25686 op = container_of(p, struct optimized_kprobe, kp);
25687 /* Detour through copied instructions */
25688- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25689+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25690 if (!reenter)
25691 reset_current_kprobe();
25692 preempt_enable_no_resched();
25693diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25694index c2bedae..25e7ab60 100644
25695--- a/arch/x86/kernel/ksysfs.c
25696+++ b/arch/x86/kernel/ksysfs.c
25697@@ -184,7 +184,7 @@ out:
25698
25699 static struct kobj_attribute type_attr = __ATTR_RO(type);
25700
25701-static struct bin_attribute data_attr = {
25702+static bin_attribute_no_const data_attr __read_only = {
25703 .attr = {
25704 .name = "data",
25705 .mode = S_IRUGO,
25706diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25707index c37886d..d851d32 100644
25708--- a/arch/x86/kernel/ldt.c
25709+++ b/arch/x86/kernel/ldt.c
25710@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25711 if (reload) {
25712 #ifdef CONFIG_SMP
25713 preempt_disable();
25714- load_LDT(pc);
25715+ load_LDT_nolock(pc);
25716 if (!cpumask_equal(mm_cpumask(current->mm),
25717 cpumask_of(smp_processor_id())))
25718 smp_call_function(flush_ldt, current->mm, 1);
25719 preempt_enable();
25720 #else
25721- load_LDT(pc);
25722+ load_LDT_nolock(pc);
25723 #endif
25724 }
25725 if (oldsize) {
25726@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25727 return err;
25728
25729 for (i = 0; i < old->size; i++)
25730- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25731+ write_ldt_entry(new->ldt, i, old->ldt + i);
25732 return 0;
25733 }
25734
25735@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25736 retval = copy_ldt(&mm->context, &old_mm->context);
25737 mutex_unlock(&old_mm->context.lock);
25738 }
25739+
25740+ if (tsk == current) {
25741+ mm->context.vdso = 0;
25742+
25743+#ifdef CONFIG_X86_32
25744+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25745+ mm->context.user_cs_base = 0UL;
25746+ mm->context.user_cs_limit = ~0UL;
25747+
25748+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25749+ cpus_clear(mm->context.cpu_user_cs_mask);
25750+#endif
25751+
25752+#endif
25753+#endif
25754+
25755+ }
25756+
25757 return retval;
25758 }
25759
25760@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25761 }
25762 }
25763
25764+#ifdef CONFIG_PAX_SEGMEXEC
25765+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25766+ error = -EINVAL;
25767+ goto out_unlock;
25768+ }
25769+#endif
25770+
25771 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25772 error = -EINVAL;
25773 goto out_unlock;
25774diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25775index ff3c3101d..d7c0cd8 100644
25776--- a/arch/x86/kernel/livepatch.c
25777+++ b/arch/x86/kernel/livepatch.c
25778@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25779 int ret, numpages, size = 4;
25780 bool readonly;
25781 unsigned long val;
25782- unsigned long core = (unsigned long)mod->module_core;
25783- unsigned long core_ro_size = mod->core_ro_size;
25784- unsigned long core_size = mod->core_size;
25785+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25786+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25787+ unsigned long core_size_rx = mod->core_size_rx;
25788+ unsigned long core_size_rw = mod->core_size_rw;
25789
25790 switch (type) {
25791 case R_X86_64_NONE:
25792@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25793 return -EINVAL;
25794 }
25795
25796- if (loc < core || loc >= core + core_size)
25797+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25798+ (loc < core_rw || loc >= core_rw + core_size_rw))
25799 /* loc does not point to any symbol inside the module */
25800 return -EINVAL;
25801
25802- if (loc < core + core_ro_size)
25803+ if (loc < core_rx + core_size_rx)
25804 readonly = true;
25805 else
25806 readonly = false;
25807diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25808index 469b23d..5449cfe 100644
25809--- a/arch/x86/kernel/machine_kexec_32.c
25810+++ b/arch/x86/kernel/machine_kexec_32.c
25811@@ -26,7 +26,7 @@
25812 #include <asm/cacheflush.h>
25813 #include <asm/debugreg.h>
25814
25815-static void set_idt(void *newidt, __u16 limit)
25816+static void set_idt(struct desc_struct *newidt, __u16 limit)
25817 {
25818 struct desc_ptr curidt;
25819
25820@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25821 }
25822
25823
25824-static void set_gdt(void *newgdt, __u16 limit)
25825+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25826 {
25827 struct desc_ptr curgdt;
25828
25829@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25830 }
25831
25832 control_page = page_address(image->control_code_page);
25833- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25834+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25835
25836 relocate_kernel_ptr = control_page;
25837 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25838diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25839index 94ea120..4154cea 100644
25840--- a/arch/x86/kernel/mcount_64.S
25841+++ b/arch/x86/kernel/mcount_64.S
25842@@ -7,7 +7,7 @@
25843 #include <linux/linkage.h>
25844 #include <asm/ptrace.h>
25845 #include <asm/ftrace.h>
25846-
25847+#include <asm/alternative-asm.h>
25848
25849 .code64
25850 .section .entry.text, "ax"
25851@@ -148,8 +148,9 @@
25852 #ifdef CONFIG_DYNAMIC_FTRACE
25853
25854 ENTRY(function_hook)
25855+ pax_force_retaddr
25856 retq
25857-END(function_hook)
25858+ENDPROC(function_hook)
25859
25860 ENTRY(ftrace_caller)
25861 /* save_mcount_regs fills in first two parameters */
25862@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25863 #endif
25864
25865 GLOBAL(ftrace_stub)
25866+ pax_force_retaddr
25867 retq
25868-END(ftrace_caller)
25869+ENDPROC(ftrace_caller)
25870
25871 ENTRY(ftrace_regs_caller)
25872 /* Save the current flags before any operations that can change them */
25873@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25874
25875 jmp ftrace_return
25876
25877-END(ftrace_regs_caller)
25878+ENDPROC(ftrace_regs_caller)
25879
25880
25881 #else /* ! CONFIG_DYNAMIC_FTRACE */
25882@@ -272,18 +274,20 @@ fgraph_trace:
25883 #endif
25884
25885 GLOBAL(ftrace_stub)
25886+ pax_force_retaddr
25887 retq
25888
25889 trace:
25890 /* save_mcount_regs fills in first two parameters */
25891 save_mcount_regs
25892
25893+ pax_force_fptr ftrace_trace_function
25894 call *ftrace_trace_function
25895
25896 restore_mcount_regs
25897
25898 jmp fgraph_trace
25899-END(function_hook)
25900+ENDPROC(function_hook)
25901 #endif /* CONFIG_DYNAMIC_FTRACE */
25902 #endif /* CONFIG_FUNCTION_TRACER */
25903
25904@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25905
25906 restore_mcount_regs
25907
25908+ pax_force_retaddr
25909 retq
25910-END(ftrace_graph_caller)
25911+ENDPROC(ftrace_graph_caller)
25912
25913 GLOBAL(return_to_handler)
25914 subq $24, %rsp
25915@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25916 movq 8(%rsp), %rdx
25917 movq (%rsp), %rax
25918 addq $24, %rsp
25919+ pax_force_fptr %rdi
25920 jmp *%rdi
25921+ENDPROC(return_to_handler)
25922 #endif
25923diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25924index d1ac80b..f593701 100644
25925--- a/arch/x86/kernel/module.c
25926+++ b/arch/x86/kernel/module.c
25927@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25928 }
25929 #endif
25930
25931-void *module_alloc(unsigned long size)
25932+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25933 {
25934 void *p;
25935
25936- if (PAGE_ALIGN(size) > MODULES_LEN)
25937+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25938 return NULL;
25939
25940 p = __vmalloc_node_range(size, MODULE_ALIGN,
25941 MODULES_VADDR + get_module_load_offset(),
25942- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25943- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25944+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25945+ prot, 0, NUMA_NO_NODE,
25946 __builtin_return_address(0));
25947 if (p && (kasan_module_alloc(p, size) < 0)) {
25948 vfree(p);
25949@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25950 return p;
25951 }
25952
25953+void *module_alloc(unsigned long size)
25954+{
25955+
25956+#ifdef CONFIG_PAX_KERNEXEC
25957+ return __module_alloc(size, PAGE_KERNEL);
25958+#else
25959+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25960+#endif
25961+
25962+}
25963+
25964+#ifdef CONFIG_PAX_KERNEXEC
25965+#ifdef CONFIG_X86_32
25966+void *module_alloc_exec(unsigned long size)
25967+{
25968+ struct vm_struct *area;
25969+
25970+ if (size == 0)
25971+ return NULL;
25972+
25973+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25974+return area ? area->addr : NULL;
25975+}
25976+EXPORT_SYMBOL(module_alloc_exec);
25977+
25978+void module_memfree_exec(void *module_region)
25979+{
25980+ vunmap(module_region);
25981+}
25982+EXPORT_SYMBOL(module_memfree_exec);
25983+#else
25984+void module_memfree_exec(void *module_region)
25985+{
25986+ module_memfree(module_region);
25987+}
25988+EXPORT_SYMBOL(module_memfree_exec);
25989+
25990+void *module_alloc_exec(unsigned long size)
25991+{
25992+ return __module_alloc(size, PAGE_KERNEL_RX);
25993+}
25994+EXPORT_SYMBOL(module_alloc_exec);
25995+#endif
25996+#endif
25997+
25998 #ifdef CONFIG_X86_32
25999 int apply_relocate(Elf32_Shdr *sechdrs,
26000 const char *strtab,
26001@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26002 unsigned int i;
26003 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26004 Elf32_Sym *sym;
26005- uint32_t *location;
26006+ uint32_t *plocation, location;
26007
26008 DEBUGP("Applying relocate section %u to %u\n",
26009 relsec, sechdrs[relsec].sh_info);
26010 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26011 /* This is where to make the change */
26012- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26013- + rel[i].r_offset;
26014+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26015+ location = (uint32_t)plocation;
26016+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26017+ plocation = ktla_ktva((void *)plocation);
26018 /* This is the symbol it is referring to. Note that all
26019 undefined symbols have been resolved. */
26020 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26021@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26022 switch (ELF32_R_TYPE(rel[i].r_info)) {
26023 case R_386_32:
26024 /* We add the value into the location given */
26025- *location += sym->st_value;
26026+ pax_open_kernel();
26027+ *plocation += sym->st_value;
26028+ pax_close_kernel();
26029 break;
26030 case R_386_PC32:
26031 /* Add the value, subtract its position */
26032- *location += sym->st_value - (uint32_t)location;
26033+ pax_open_kernel();
26034+ *plocation += sym->st_value - location;
26035+ pax_close_kernel();
26036 break;
26037 default:
26038 pr_err("%s: Unknown relocation: %u\n",
26039@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26040 case R_X86_64_NONE:
26041 break;
26042 case R_X86_64_64:
26043+ pax_open_kernel();
26044 *(u64 *)loc = val;
26045+ pax_close_kernel();
26046 break;
26047 case R_X86_64_32:
26048+ pax_open_kernel();
26049 *(u32 *)loc = val;
26050+ pax_close_kernel();
26051 if (val != *(u32 *)loc)
26052 goto overflow;
26053 break;
26054 case R_X86_64_32S:
26055+ pax_open_kernel();
26056 *(s32 *)loc = val;
26057+ pax_close_kernel();
26058 if ((s64)val != *(s32 *)loc)
26059 goto overflow;
26060 break;
26061 case R_X86_64_PC32:
26062 val -= (u64)loc;
26063+ pax_open_kernel();
26064 *(u32 *)loc = val;
26065+ pax_close_kernel();
26066+
26067 #if 0
26068 if ((s64)val != *(s32 *)loc)
26069 goto overflow;
26070diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26071index 113e707..0a690e1 100644
26072--- a/arch/x86/kernel/msr.c
26073+++ b/arch/x86/kernel/msr.c
26074@@ -39,6 +39,7 @@
26075 #include <linux/notifier.h>
26076 #include <linux/uaccess.h>
26077 #include <linux/gfp.h>
26078+#include <linux/grsecurity.h>
26079
26080 #include <asm/processor.h>
26081 #include <asm/msr.h>
26082@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26083 int err = 0;
26084 ssize_t bytes = 0;
26085
26086+#ifdef CONFIG_GRKERNSEC_KMEM
26087+ gr_handle_msr_write();
26088+ return -EPERM;
26089+#endif
26090+
26091 if (count % 8)
26092 return -EINVAL; /* Invalid chunk size */
26093
26094@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26095 err = -EBADF;
26096 break;
26097 }
26098+#ifdef CONFIG_GRKERNSEC_KMEM
26099+ gr_handle_msr_write();
26100+ return -EPERM;
26101+#endif
26102 if (copy_from_user(&regs, uregs, sizeof regs)) {
26103 err = -EFAULT;
26104 break;
26105@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26106 return notifier_from_errno(err);
26107 }
26108
26109-static struct notifier_block __refdata msr_class_cpu_notifier = {
26110+static struct notifier_block msr_class_cpu_notifier = {
26111 .notifier_call = msr_class_cpu_callback,
26112 };
26113
26114diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26115index c3e985d..110a36a 100644
26116--- a/arch/x86/kernel/nmi.c
26117+++ b/arch/x86/kernel/nmi.c
26118@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26119
26120 static void nmi_max_handler(struct irq_work *w)
26121 {
26122- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26123+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26124 int remainder_ns, decimal_msecs;
26125- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26126+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26127
26128 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26129 decimal_msecs = remainder_ns / 1000;
26130
26131 printk_ratelimited(KERN_INFO
26132 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26133- a->handler, whole_msecs, decimal_msecs);
26134+ n->action->handler, whole_msecs, decimal_msecs);
26135 }
26136
26137 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26138@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26139 delta = sched_clock() - delta;
26140 trace_nmi_handler(a->handler, (int)delta, thishandled);
26141
26142- if (delta < nmi_longest_ns || delta < a->max_duration)
26143+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26144 continue;
26145
26146- a->max_duration = delta;
26147- irq_work_queue(&a->irq_work);
26148+ a->work->max_duration = delta;
26149+ irq_work_queue(&a->work->irq_work);
26150 }
26151
26152 rcu_read_unlock();
26153@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26154 }
26155 NOKPROBE_SYMBOL(nmi_handle);
26156
26157-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26158+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26159 {
26160 struct nmi_desc *desc = nmi_to_desc(type);
26161 unsigned long flags;
26162@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26163 if (!action->handler)
26164 return -EINVAL;
26165
26166- init_irq_work(&action->irq_work, nmi_max_handler);
26167+ action->work->action = action;
26168+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26169
26170 spin_lock_irqsave(&desc->lock, flags);
26171
26172@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26173 * event confuses some handlers (kdump uses this flag)
26174 */
26175 if (action->flags & NMI_FLAG_FIRST)
26176- list_add_rcu(&action->list, &desc->head);
26177+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26178 else
26179- list_add_tail_rcu(&action->list, &desc->head);
26180+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26181
26182 spin_unlock_irqrestore(&desc->lock, flags);
26183 return 0;
26184@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26185 if (!strcmp(n->name, name)) {
26186 WARN(in_nmi(),
26187 "Trying to free NMI (%s) from NMI context!\n", n->name);
26188- list_del_rcu(&n->list);
26189+ pax_list_del_rcu((struct list_head *)&n->list);
26190 break;
26191 }
26192 }
26193@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26194 dotraplinkage notrace void
26195 do_nmi(struct pt_regs *regs, long error_code)
26196 {
26197+
26198+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26199+ if (!user_mode(regs)) {
26200+ unsigned long cs = regs->cs & 0xFFFF;
26201+ unsigned long ip = ktva_ktla(regs->ip);
26202+
26203+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26204+ regs->ip = ip;
26205+ }
26206+#endif
26207+
26208 nmi_nesting_preprocess(regs);
26209
26210 nmi_enter();
26211diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26212index 6d9582e..f746287 100644
26213--- a/arch/x86/kernel/nmi_selftest.c
26214+++ b/arch/x86/kernel/nmi_selftest.c
26215@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26216 {
26217 /* trap all the unknown NMIs we may generate */
26218 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26219- __initdata);
26220+ __initconst);
26221 }
26222
26223 static void __init cleanup_nmi_testsuite(void)
26224@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26225 unsigned long timeout;
26226
26227 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26228- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26229+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26230 nmi_fail = FAILURE;
26231 return;
26232 }
26233diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26234index bbb6c73..24a58ef 100644
26235--- a/arch/x86/kernel/paravirt-spinlocks.c
26236+++ b/arch/x86/kernel/paravirt-spinlocks.c
26237@@ -8,7 +8,7 @@
26238
26239 #include <asm/paravirt.h>
26240
26241-struct pv_lock_ops pv_lock_ops = {
26242+struct pv_lock_ops pv_lock_ops __read_only = {
26243 #ifdef CONFIG_SMP
26244 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26245 .unlock_kick = paravirt_nop,
26246diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26247index 548d25f..f8fb99c 100644
26248--- a/arch/x86/kernel/paravirt.c
26249+++ b/arch/x86/kernel/paravirt.c
26250@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26251 {
26252 return x;
26253 }
26254+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26255+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26256+#endif
26257
26258 void __init default_banner(void)
26259 {
26260@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26261
26262 if (opfunc == NULL)
26263 /* If there's no function, patch it with a ud2a (BUG) */
26264- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26265- else if (opfunc == _paravirt_nop)
26266+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26267+ else if (opfunc == (void *)_paravirt_nop)
26268 /* If the operation is a nop, then nop the callsite */
26269 ret = paravirt_patch_nop();
26270
26271 /* identity functions just return their single argument */
26272- else if (opfunc == _paravirt_ident_32)
26273+ else if (opfunc == (void *)_paravirt_ident_32)
26274 ret = paravirt_patch_ident_32(insnbuf, len);
26275- else if (opfunc == _paravirt_ident_64)
26276+ else if (opfunc == (void *)_paravirt_ident_64)
26277 ret = paravirt_patch_ident_64(insnbuf, len);
26278+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26279+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26280+ ret = paravirt_patch_ident_64(insnbuf, len);
26281+#endif
26282
26283 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26284 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26285@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26286 if (insn_len > len || start == NULL)
26287 insn_len = len;
26288 else
26289- memcpy(insnbuf, start, insn_len);
26290+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26291
26292 return insn_len;
26293 }
26294@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26295 return this_cpu_read(paravirt_lazy_mode);
26296 }
26297
26298-struct pv_info pv_info = {
26299+struct pv_info pv_info __read_only = {
26300 .name = "bare hardware",
26301 .paravirt_enabled = 0,
26302 .kernel_rpl = 0,
26303@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26304 #endif
26305 };
26306
26307-struct pv_init_ops pv_init_ops = {
26308+struct pv_init_ops pv_init_ops __read_only = {
26309 .patch = native_patch,
26310 };
26311
26312-struct pv_time_ops pv_time_ops = {
26313+struct pv_time_ops pv_time_ops __read_only = {
26314 .sched_clock = native_sched_clock,
26315 .steal_clock = native_steal_clock,
26316 };
26317
26318-__visible struct pv_irq_ops pv_irq_ops = {
26319+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26320 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26321 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26322 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26323@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26324 #endif
26325 };
26326
26327-__visible struct pv_cpu_ops pv_cpu_ops = {
26328+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26329 .cpuid = native_cpuid,
26330 .get_debugreg = native_get_debugreg,
26331 .set_debugreg = native_set_debugreg,
26332@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26333 NOKPROBE_SYMBOL(native_set_debugreg);
26334 NOKPROBE_SYMBOL(native_load_idt);
26335
26336-struct pv_apic_ops pv_apic_ops = {
26337+struct pv_apic_ops pv_apic_ops __read_only= {
26338 #ifdef CONFIG_X86_LOCAL_APIC
26339 .startup_ipi_hook = paravirt_nop,
26340 #endif
26341 };
26342
26343-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26344+#ifdef CONFIG_X86_32
26345+#ifdef CONFIG_X86_PAE
26346+/* 64-bit pagetable entries */
26347+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26348+#else
26349 /* 32-bit pagetable entries */
26350 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26351+#endif
26352 #else
26353 /* 64-bit pagetable entries */
26354 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26355 #endif
26356
26357-struct pv_mmu_ops pv_mmu_ops = {
26358+struct pv_mmu_ops pv_mmu_ops __read_only = {
26359
26360 .read_cr2 = native_read_cr2,
26361 .write_cr2 = native_write_cr2,
26362@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26363 .make_pud = PTE_IDENT,
26364
26365 .set_pgd = native_set_pgd,
26366+ .set_pgd_batched = native_set_pgd_batched,
26367 #endif
26368 #endif /* PAGETABLE_LEVELS >= 3 */
26369
26370@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26371 },
26372
26373 .set_fixmap = native_set_fixmap,
26374+
26375+#ifdef CONFIG_PAX_KERNEXEC
26376+ .pax_open_kernel = native_pax_open_kernel,
26377+ .pax_close_kernel = native_pax_close_kernel,
26378+#endif
26379+
26380 };
26381
26382 EXPORT_SYMBOL_GPL(pv_time_ops);
26383diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26384index a1da673..b6f5831 100644
26385--- a/arch/x86/kernel/paravirt_patch_64.c
26386+++ b/arch/x86/kernel/paravirt_patch_64.c
26387@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26388 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26389 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26390 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26391+
26392+#ifndef CONFIG_PAX_MEMORY_UDEREF
26393 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26394+#endif
26395+
26396 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26397 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26398
26399@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26400 PATCH_SITE(pv_mmu_ops, read_cr3);
26401 PATCH_SITE(pv_mmu_ops, write_cr3);
26402 PATCH_SITE(pv_cpu_ops, clts);
26403+
26404+#ifndef CONFIG_PAX_MEMORY_UDEREF
26405 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26406+#endif
26407+
26408 PATCH_SITE(pv_cpu_ops, wbinvd);
26409
26410 patch_site:
26411diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26412index 0497f71..7186c0d 100644
26413--- a/arch/x86/kernel/pci-calgary_64.c
26414+++ b/arch/x86/kernel/pci-calgary_64.c
26415@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26416 tce_space = be64_to_cpu(readq(target));
26417 tce_space = tce_space & TAR_SW_BITS;
26418
26419- tce_space = tce_space & (~specified_table_size);
26420+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26421 info->tce_space = (u64 *)__va(tce_space);
26422 }
26423 }
26424diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26425index 35ccf75..7a15747 100644
26426--- a/arch/x86/kernel/pci-iommu_table.c
26427+++ b/arch/x86/kernel/pci-iommu_table.c
26428@@ -2,7 +2,7 @@
26429 #include <asm/iommu_table.h>
26430 #include <linux/string.h>
26431 #include <linux/kallsyms.h>
26432-
26433+#include <linux/sched.h>
26434
26435 #define DEBUG 1
26436
26437diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26438index 77dd0ad..9ec4723 100644
26439--- a/arch/x86/kernel/pci-swiotlb.c
26440+++ b/arch/x86/kernel/pci-swiotlb.c
26441@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26442 struct dma_attrs *attrs)
26443 {
26444 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26445- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26446+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26447 else
26448 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26449 }
26450diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26451index 046e2d6..2cc8ad2 100644
26452--- a/arch/x86/kernel/process.c
26453+++ b/arch/x86/kernel/process.c
26454@@ -37,7 +37,8 @@
26455 * section. Since TSS's are completely CPU-local, we want them
26456 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26457 */
26458-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26459+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26460+EXPORT_SYMBOL(init_tss);
26461
26462 #ifdef CONFIG_X86_64
26463 static DEFINE_PER_CPU(unsigned char, is_idle);
26464@@ -95,7 +96,7 @@ void arch_task_cache_init(void)
26465 task_xstate_cachep =
26466 kmem_cache_create("task_xstate", xstate_size,
26467 __alignof__(union thread_xstate),
26468- SLAB_PANIC | SLAB_NOTRACK, NULL);
26469+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26470 setup_xstate_comp();
26471 }
26472
26473@@ -109,7 +110,7 @@ void exit_thread(void)
26474 unsigned long *bp = t->io_bitmap_ptr;
26475
26476 if (bp) {
26477- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26478+ struct tss_struct *tss = init_tss + get_cpu();
26479
26480 t->io_bitmap_ptr = NULL;
26481 clear_thread_flag(TIF_IO_BITMAP);
26482@@ -129,6 +130,9 @@ void flush_thread(void)
26483 {
26484 struct task_struct *tsk = current;
26485
26486+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26487+ loadsegment(gs, 0);
26488+#endif
26489 flush_ptrace_hw_breakpoint(tsk);
26490 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26491 drop_init_fpu(tsk);
26492@@ -275,7 +279,7 @@ static void __exit_idle(void)
26493 void exit_idle(void)
26494 {
26495 /* idle loop has pid 0 */
26496- if (current->pid)
26497+ if (task_pid_nr(current))
26498 return;
26499 __exit_idle();
26500 }
26501@@ -328,7 +332,7 @@ bool xen_set_default_idle(void)
26502 return ret;
26503 }
26504 #endif
26505-void stop_this_cpu(void *dummy)
26506+__noreturn void stop_this_cpu(void *dummy)
26507 {
26508 local_irq_disable();
26509 /*
26510@@ -457,16 +461,37 @@ static int __init idle_setup(char *str)
26511 }
26512 early_param("idle", idle_setup);
26513
26514-unsigned long arch_align_stack(unsigned long sp)
26515+#ifdef CONFIG_PAX_RANDKSTACK
26516+void pax_randomize_kstack(struct pt_regs *regs)
26517 {
26518- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26519- sp -= get_random_int() % 8192;
26520- return sp & ~0xf;
26521-}
26522+ struct thread_struct *thread = &current->thread;
26523+ unsigned long time;
26524
26525-unsigned long arch_randomize_brk(struct mm_struct *mm)
26526-{
26527- unsigned long range_end = mm->brk + 0x02000000;
26528- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26529-}
26530+ if (!randomize_va_space)
26531+ return;
26532+
26533+ if (v8086_mode(regs))
26534+ return;
26535
26536+ rdtscl(time);
26537+
26538+ /* P4 seems to return a 0 LSB, ignore it */
26539+#ifdef CONFIG_MPENTIUM4
26540+ time &= 0x3EUL;
26541+ time <<= 2;
26542+#elif defined(CONFIG_X86_64)
26543+ time &= 0xFUL;
26544+ time <<= 4;
26545+#else
26546+ time &= 0x1FUL;
26547+ time <<= 3;
26548+#endif
26549+
26550+ thread->sp0 ^= time;
26551+ load_sp0(init_tss + smp_processor_id(), thread);
26552+
26553+#ifdef CONFIG_X86_64
26554+ this_cpu_write(kernel_stack, thread->sp0);
26555+#endif
26556+}
26557+#endif
26558diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26559index 603c4f9..3a105d7 100644
26560--- a/arch/x86/kernel/process_32.c
26561+++ b/arch/x86/kernel/process_32.c
26562@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26563 unsigned long thread_saved_pc(struct task_struct *tsk)
26564 {
26565 return ((unsigned long *)tsk->thread.sp)[3];
26566+//XXX return tsk->thread.eip;
26567 }
26568
26569 void __show_regs(struct pt_regs *regs, int all)
26570@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26571 unsigned long sp;
26572 unsigned short ss, gs;
26573
26574- if (user_mode_vm(regs)) {
26575+ if (user_mode(regs)) {
26576 sp = regs->sp;
26577 ss = regs->ss & 0xffff;
26578- gs = get_user_gs(regs);
26579 } else {
26580 sp = kernel_stack_pointer(regs);
26581 savesegment(ss, ss);
26582- savesegment(gs, gs);
26583 }
26584+ gs = get_user_gs(regs);
26585
26586 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26587 (u16)regs->cs, regs->ip, regs->flags,
26588- smp_processor_id());
26589+ raw_smp_processor_id());
26590 print_symbol("EIP is at %s\n", regs->ip);
26591
26592 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26593@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26594 int copy_thread(unsigned long clone_flags, unsigned long sp,
26595 unsigned long arg, struct task_struct *p)
26596 {
26597- struct pt_regs *childregs = task_pt_regs(p);
26598+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26599 struct task_struct *tsk;
26600 int err;
26601
26602 p->thread.sp = (unsigned long) childregs;
26603 p->thread.sp0 = (unsigned long) (childregs+1);
26604+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26605 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26606
26607 if (unlikely(p->flags & PF_KTHREAD)) {
26608 /* kernel thread */
26609 memset(childregs, 0, sizeof(struct pt_regs));
26610 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26611- task_user_gs(p) = __KERNEL_STACK_CANARY;
26612- childregs->ds = __USER_DS;
26613- childregs->es = __USER_DS;
26614+ savesegment(gs, childregs->gs);
26615+ childregs->ds = __KERNEL_DS;
26616+ childregs->es = __KERNEL_DS;
26617 childregs->fs = __KERNEL_PERCPU;
26618 childregs->bx = sp; /* function */
26619 childregs->bp = arg;
26620@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26621 struct thread_struct *prev = &prev_p->thread,
26622 *next = &next_p->thread;
26623 int cpu = smp_processor_id();
26624- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26625+ struct tss_struct *tss = init_tss + cpu;
26626 fpu_switch_t fpu;
26627
26628 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26629@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26630 */
26631 lazy_save_gs(prev->gs);
26632
26633+#ifdef CONFIG_PAX_MEMORY_UDEREF
26634+ __set_fs(task_thread_info(next_p)->addr_limit);
26635+#endif
26636+
26637 /*
26638 * Load the per-thread Thread-Local Storage descriptor.
26639 */
26640@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26641 */
26642 arch_end_context_switch(next_p);
26643
26644- this_cpu_write(kernel_stack,
26645- (unsigned long)task_stack_page(next_p) +
26646- THREAD_SIZE - KERNEL_STACK_OFFSET);
26647+ this_cpu_write(current_task, next_p);
26648+ this_cpu_write(current_tinfo, &next_p->tinfo);
26649+ this_cpu_write(kernel_stack, next->sp0);
26650
26651 /*
26652 * Restore %gs if needed (which is common)
26653@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654
26655 switch_fpu_finish(next_p, fpu);
26656
26657- this_cpu_write(current_task, next_p);
26658-
26659 return prev_p;
26660 }
26661
26662@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26663 } while (count++ < 16);
26664 return 0;
26665 }
26666-
26667diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26668index 67fcc43..0d2c630 100644
26669--- a/arch/x86/kernel/process_64.c
26670+++ b/arch/x86/kernel/process_64.c
26671@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26672 struct pt_regs *childregs;
26673 struct task_struct *me = current;
26674
26675- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26676+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26677 childregs = task_pt_regs(p);
26678 p->thread.sp = (unsigned long) childregs;
26679 p->thread.usersp = me->thread.usersp;
26680+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26681 set_tsk_thread_flag(p, TIF_FORK);
26682 p->thread.io_bitmap_ptr = NULL;
26683
26684@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26685 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26686 savesegment(es, p->thread.es);
26687 savesegment(ds, p->thread.ds);
26688+ savesegment(ss, p->thread.ss);
26689+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26690 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26691
26692 if (unlikely(p->flags & PF_KTHREAD)) {
26693@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26694 struct thread_struct *prev = &prev_p->thread;
26695 struct thread_struct *next = &next_p->thread;
26696 int cpu = smp_processor_id();
26697- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26698+ struct tss_struct *tss = init_tss + cpu;
26699 unsigned fsindex, gsindex;
26700 fpu_switch_t fpu;
26701
26702@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26703 if (unlikely(next->ds | prev->ds))
26704 loadsegment(ds, next->ds);
26705
26706+ savesegment(ss, prev->ss);
26707+ if (unlikely(next->ss != prev->ss))
26708+ loadsegment(ss, next->ss);
26709+
26710 /*
26711 * Switch FS and GS.
26712 *
26713@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26714 prev->usersp = this_cpu_read(old_rsp);
26715 this_cpu_write(old_rsp, next->usersp);
26716 this_cpu_write(current_task, next_p);
26717+ this_cpu_write(current_tinfo, &next_p->tinfo);
26718
26719 /*
26720 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26721@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26722 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26723 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26724
26725- this_cpu_write(kernel_stack,
26726- (unsigned long)task_stack_page(next_p) +
26727- THREAD_SIZE - KERNEL_STACK_OFFSET);
26728+ this_cpu_write(kernel_stack, next->sp0);
26729
26730 /*
26731 * Now maybe reload the debug registers and handle I/O bitmaps
26732@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26733 if (!p || p == current || p->state == TASK_RUNNING)
26734 return 0;
26735 stack = (unsigned long)task_stack_page(p);
26736- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26737+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26738 return 0;
26739 fp = *(u64 *)(p->thread.sp);
26740 do {
26741- if (fp < (unsigned long)stack ||
26742- fp >= (unsigned long)stack+THREAD_SIZE)
26743+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26744 return 0;
26745 ip = *(u64 *)(fp+8);
26746 if (!in_sched_functions(ip))
26747diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26748index e510618..5165ac0 100644
26749--- a/arch/x86/kernel/ptrace.c
26750+++ b/arch/x86/kernel/ptrace.c
26751@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26752 unsigned long sp = (unsigned long)&regs->sp;
26753 u32 *prev_esp;
26754
26755- if (context == (sp & ~(THREAD_SIZE - 1)))
26756+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26757 return sp;
26758
26759- prev_esp = (u32 *)(context);
26760+ prev_esp = *(u32 **)(context);
26761 if (prev_esp)
26762 return (unsigned long)prev_esp;
26763
26764@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26765 if (child->thread.gs != value)
26766 return do_arch_prctl(child, ARCH_SET_GS, value);
26767 return 0;
26768+
26769+ case offsetof(struct user_regs_struct,ip):
26770+ /*
26771+ * Protect against any attempt to set ip to an
26772+ * impossible address. There are dragons lurking if the
26773+ * address is noncanonical. (This explicitly allows
26774+ * setting ip to TASK_SIZE_MAX, because user code can do
26775+ * that all by itself by running off the end of its
26776+ * address space.
26777+ */
26778+ if (value > TASK_SIZE_MAX)
26779+ return -EIO;
26780+ break;
26781+
26782 #endif
26783 }
26784
26785@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26786 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26787 {
26788 int i;
26789- int dr7 = 0;
26790+ unsigned long dr7 = 0;
26791 struct arch_hw_breakpoint *info;
26792
26793 for (i = 0; i < HBP_NUM; i++) {
26794@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26795 unsigned long addr, unsigned long data)
26796 {
26797 int ret;
26798- unsigned long __user *datap = (unsigned long __user *)data;
26799+ unsigned long __user *datap = (__force unsigned long __user *)data;
26800
26801 switch (request) {
26802 /* read the word at location addr in the USER area. */
26803@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26804 if ((int) addr < 0)
26805 return -EIO;
26806 ret = do_get_thread_area(child, addr,
26807- (struct user_desc __user *)data);
26808+ (__force struct user_desc __user *) data);
26809 break;
26810
26811 case PTRACE_SET_THREAD_AREA:
26812 if ((int) addr < 0)
26813 return -EIO;
26814 ret = do_set_thread_area(child, addr,
26815- (struct user_desc __user *)data, 0);
26816+ (__force struct user_desc __user *) data, 0);
26817 break;
26818 #endif
26819
26820@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26821
26822 #ifdef CONFIG_X86_64
26823
26824-static struct user_regset x86_64_regsets[] __read_mostly = {
26825+static user_regset_no_const x86_64_regsets[] __read_only = {
26826 [REGSET_GENERAL] = {
26827 .core_note_type = NT_PRSTATUS,
26828 .n = sizeof(struct user_regs_struct) / sizeof(long),
26829@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26830 #endif /* CONFIG_X86_64 */
26831
26832 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26833-static struct user_regset x86_32_regsets[] __read_mostly = {
26834+static user_regset_no_const x86_32_regsets[] __read_only = {
26835 [REGSET_GENERAL] = {
26836 .core_note_type = NT_PRSTATUS,
26837 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26838@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26839 */
26840 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26841
26842-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26843+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26844 {
26845 #ifdef CONFIG_X86_64
26846 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26847@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26848 memset(info, 0, sizeof(*info));
26849 info->si_signo = SIGTRAP;
26850 info->si_code = si_code;
26851- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26852+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26853 }
26854
26855 void user_single_step_siginfo(struct task_struct *tsk,
26856@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26857 }
26858 }
26859
26860+#ifdef CONFIG_GRKERNSEC_SETXID
26861+extern void gr_delayed_cred_worker(void);
26862+#endif
26863+
26864 /*
26865 * We can return 0 to resume the syscall or anything else to go to phase
26866 * 2. If we resume the syscall, we need to put something appropriate in
26867@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26868
26869 BUG_ON(regs != task_pt_regs(current));
26870
26871+#ifdef CONFIG_GRKERNSEC_SETXID
26872+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26873+ gr_delayed_cred_worker();
26874+#endif
26875+
26876 /*
26877 * If we stepped into a sysenter/syscall insn, it trapped in
26878 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26879@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26880 */
26881 user_exit();
26882
26883+#ifdef CONFIG_GRKERNSEC_SETXID
26884+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26885+ gr_delayed_cred_worker();
26886+#endif
26887+
26888 audit_syscall_exit(regs);
26889
26890 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26891diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26892index 2f355d2..e75ed0a 100644
26893--- a/arch/x86/kernel/pvclock.c
26894+++ b/arch/x86/kernel/pvclock.c
26895@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26896 reset_hung_task_detector();
26897 }
26898
26899-static atomic64_t last_value = ATOMIC64_INIT(0);
26900+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26901
26902 void pvclock_resume(void)
26903 {
26904- atomic64_set(&last_value, 0);
26905+ atomic64_set_unchecked(&last_value, 0);
26906 }
26907
26908 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26909@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26910 * updating at the same time, and one of them could be slightly behind,
26911 * making the assumption that last_value always go forward fail to hold.
26912 */
26913- last = atomic64_read(&last_value);
26914+ last = atomic64_read_unchecked(&last_value);
26915 do {
26916 if (ret < last)
26917 return last;
26918- last = atomic64_cmpxchg(&last_value, last, ret);
26919+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26920 } while (unlikely(last != ret));
26921
26922 return ret;
26923diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26924index 86db4bc..a50a54a 100644
26925--- a/arch/x86/kernel/reboot.c
26926+++ b/arch/x86/kernel/reboot.c
26927@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26928
26929 void __noreturn machine_real_restart(unsigned int type)
26930 {
26931+
26932+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26933+ struct desc_struct *gdt;
26934+#endif
26935+
26936 local_irq_disable();
26937
26938 /*
26939@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26940
26941 /* Jump to the identity-mapped low memory code */
26942 #ifdef CONFIG_X86_32
26943- asm volatile("jmpl *%0" : :
26944+
26945+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26946+ gdt = get_cpu_gdt_table(smp_processor_id());
26947+ pax_open_kernel();
26948+#ifdef CONFIG_PAX_MEMORY_UDEREF
26949+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26950+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26951+ loadsegment(ds, __KERNEL_DS);
26952+ loadsegment(es, __KERNEL_DS);
26953+ loadsegment(ss, __KERNEL_DS);
26954+#endif
26955+#ifdef CONFIG_PAX_KERNEXEC
26956+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26957+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26958+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26959+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26960+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26961+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26962+#endif
26963+ pax_close_kernel();
26964+#endif
26965+
26966+ asm volatile("ljmpl *%0" : :
26967 "rm" (real_mode_header->machine_real_restart_asm),
26968 "a" (type));
26969 #else
26970@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26971 /*
26972 * This is a single dmi_table handling all reboot quirks.
26973 */
26974-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26975+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
26976
26977 /* Acer */
26978 { /* Handle reboot issue on Acer Aspire one */
26979@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26980 * This means that this function can never return, it can misbehave
26981 * by not rebooting properly and hanging.
26982 */
26983-static void native_machine_emergency_restart(void)
26984+static void __noreturn native_machine_emergency_restart(void)
26985 {
26986 int i;
26987 int attempt = 0;
26988@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
26989 #endif
26990 }
26991
26992-static void __machine_emergency_restart(int emergency)
26993+static void __noreturn __machine_emergency_restart(int emergency)
26994 {
26995 reboot_emergency = emergency;
26996 machine_ops.emergency_restart();
26997 }
26998
26999-static void native_machine_restart(char *__unused)
27000+static void __noreturn native_machine_restart(char *__unused)
27001 {
27002 pr_notice("machine restart\n");
27003
27004@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27005 __machine_emergency_restart(0);
27006 }
27007
27008-static void native_machine_halt(void)
27009+static void __noreturn native_machine_halt(void)
27010 {
27011 /* Stop other cpus and apics */
27012 machine_shutdown();
27013@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27014 stop_this_cpu(NULL);
27015 }
27016
27017-static void native_machine_power_off(void)
27018+static void __noreturn native_machine_power_off(void)
27019 {
27020 if (pm_power_off) {
27021 if (!reboot_force)
27022@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27023 }
27024 /* A fallback in case there is no PM info available */
27025 tboot_shutdown(TB_SHUTDOWN_HALT);
27026+ unreachable();
27027 }
27028
27029-struct machine_ops machine_ops = {
27030+struct machine_ops machine_ops __read_only = {
27031 .power_off = native_machine_power_off,
27032 .shutdown = native_machine_shutdown,
27033 .emergency_restart = native_machine_emergency_restart,
27034diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27035index c8e41e9..64049ef 100644
27036--- a/arch/x86/kernel/reboot_fixups_32.c
27037+++ b/arch/x86/kernel/reboot_fixups_32.c
27038@@ -57,7 +57,7 @@ struct device_fixup {
27039 unsigned int vendor;
27040 unsigned int device;
27041 void (*reboot_fixup)(struct pci_dev *);
27042-};
27043+} __do_const;
27044
27045 /*
27046 * PCI ids solely used for fixups_table go here
27047diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27048index 3fd2c69..a444264 100644
27049--- a/arch/x86/kernel/relocate_kernel_64.S
27050+++ b/arch/x86/kernel/relocate_kernel_64.S
27051@@ -96,8 +96,7 @@ relocate_kernel:
27052
27053 /* jump to identity mapped page */
27054 addq $(identity_mapped - relocate_kernel), %r8
27055- pushq %r8
27056- ret
27057+ jmp *%r8
27058
27059 identity_mapped:
27060 /* set return address to 0 if not preserving context */
27061diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27062index 0a2421c..11f3f36 100644
27063--- a/arch/x86/kernel/setup.c
27064+++ b/arch/x86/kernel/setup.c
27065@@ -111,6 +111,7 @@
27066 #include <asm/mce.h>
27067 #include <asm/alternative.h>
27068 #include <asm/prom.h>
27069+#include <asm/boot.h>
27070
27071 /*
27072 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27073@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27074 #endif
27075
27076
27077-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27078-__visible unsigned long mmu_cr4_features;
27079+#ifdef CONFIG_X86_64
27080+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27081+#elif defined(CONFIG_X86_PAE)
27082+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27083 #else
27084-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27085+__visible unsigned long mmu_cr4_features __read_only;
27086 #endif
27087
27088 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27089@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27090 * area (640->1Mb) as ram even though it is not.
27091 * take them out.
27092 */
27093- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27094+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27095
27096 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27097 }
27098@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27099 /* called before trim_bios_range() to spare extra sanitize */
27100 static void __init e820_add_kernel_range(void)
27101 {
27102- u64 start = __pa_symbol(_text);
27103+ u64 start = __pa_symbol(ktla_ktva(_text));
27104 u64 size = __pa_symbol(_end) - start;
27105
27106 /*
27107@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27108
27109 void __init setup_arch(char **cmdline_p)
27110 {
27111+#ifdef CONFIG_X86_32
27112+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27113+#else
27114 memblock_reserve(__pa_symbol(_text),
27115 (unsigned long)__bss_stop - (unsigned long)_text);
27116+#endif
27117
27118 early_reserve_initrd();
27119
27120@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27121
27122 if (!boot_params.hdr.root_flags)
27123 root_mountflags &= ~MS_RDONLY;
27124- init_mm.start_code = (unsigned long) _text;
27125- init_mm.end_code = (unsigned long) _etext;
27126+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27127+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27128 init_mm.end_data = (unsigned long) _edata;
27129 init_mm.brk = _brk_end;
27130
27131 mpx_mm_init(&init_mm);
27132
27133- code_resource.start = __pa_symbol(_text);
27134- code_resource.end = __pa_symbol(_etext)-1;
27135- data_resource.start = __pa_symbol(_etext);
27136+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27137+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27138+ data_resource.start = __pa_symbol(_sdata);
27139 data_resource.end = __pa_symbol(_edata)-1;
27140 bss_resource.start = __pa_symbol(__bss_start);
27141 bss_resource.end = __pa_symbol(__bss_stop)-1;
27142diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27143index e4fcb87..9c06c55 100644
27144--- a/arch/x86/kernel/setup_percpu.c
27145+++ b/arch/x86/kernel/setup_percpu.c
27146@@ -21,19 +21,17 @@
27147 #include <asm/cpu.h>
27148 #include <asm/stackprotector.h>
27149
27150-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27151+#ifdef CONFIG_SMP
27152+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27153 EXPORT_PER_CPU_SYMBOL(cpu_number);
27154+#endif
27155
27156-#ifdef CONFIG_X86_64
27157 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27158-#else
27159-#define BOOT_PERCPU_OFFSET 0
27160-#endif
27161
27162 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27163 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27164
27165-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27166+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27167 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27168 };
27169 EXPORT_SYMBOL(__per_cpu_offset);
27170@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27171 {
27172 #ifdef CONFIG_NEED_MULTIPLE_NODES
27173 pg_data_t *last = NULL;
27174- unsigned int cpu;
27175+ int cpu;
27176
27177 for_each_possible_cpu(cpu) {
27178 int node = early_cpu_to_node(cpu);
27179@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27180 {
27181 #ifdef CONFIG_X86_32
27182 struct desc_struct gdt;
27183+ unsigned long base = per_cpu_offset(cpu);
27184
27185- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27186- 0x2 | DESCTYPE_S, 0x8);
27187- gdt.s = 1;
27188+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27189+ 0x83 | DESCTYPE_S, 0xC);
27190 write_gdt_entry(get_cpu_gdt_table(cpu),
27191 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27192 #endif
27193@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27194 /* alrighty, percpu areas up and running */
27195 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27196 for_each_possible_cpu(cpu) {
27197+#ifdef CONFIG_CC_STACKPROTECTOR
27198+#ifdef CONFIG_X86_32
27199+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27200+#endif
27201+#endif
27202 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27203 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27204 per_cpu(cpu_number, cpu) = cpu;
27205@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27206 */
27207 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27208 #endif
27209+#ifdef CONFIG_CC_STACKPROTECTOR
27210+#ifdef CONFIG_X86_32
27211+ if (!cpu)
27212+ per_cpu(stack_canary.canary, cpu) = canary;
27213+#endif
27214+#endif
27215 /*
27216 * Up to this point, the boot CPU has been using .init.data
27217 * area. Reload any changed state for the boot CPU.
27218diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27219index e504246..ba10432 100644
27220--- a/arch/x86/kernel/signal.c
27221+++ b/arch/x86/kernel/signal.c
27222@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27223 * Align the stack pointer according to the i386 ABI,
27224 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27225 */
27226- sp = ((sp + 4) & -16ul) - 4;
27227+ sp = ((sp - 12) & -16ul) - 4;
27228 #else /* !CONFIG_X86_32 */
27229 sp = round_down(sp, 16) - 8;
27230 #endif
27231@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27232 }
27233
27234 if (current->mm->context.vdso)
27235- restorer = current->mm->context.vdso +
27236- selected_vdso32->sym___kernel_sigreturn;
27237+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27238 else
27239- restorer = &frame->retcode;
27240+ restorer = (void __user *)&frame->retcode;
27241 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27242 restorer = ksig->ka.sa.sa_restorer;
27243
27244@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27245 * reasons and because gdb uses it as a signature to notice
27246 * signal handler stack frames.
27247 */
27248- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27249+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27250
27251 if (err)
27252 return -EFAULT;
27253@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27254 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27255
27256 /* Set up to return from userspace. */
27257- restorer = current->mm->context.vdso +
27258- selected_vdso32->sym___kernel_rt_sigreturn;
27259+ if (current->mm->context.vdso)
27260+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27261+ else
27262+ restorer = (void __user *)&frame->retcode;
27263 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27264 restorer = ksig->ka.sa.sa_restorer;
27265 put_user_ex(restorer, &frame->pretcode);
27266@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27267 * reasons and because gdb uses it as a signature to notice
27268 * signal handler stack frames.
27269 */
27270- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27271+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27272 } put_user_catch(err);
27273
27274 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27275@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27276 {
27277 int usig = signr_convert(ksig->sig);
27278 sigset_t *set = sigmask_to_save();
27279- compat_sigset_t *cset = (compat_sigset_t *) set;
27280+ sigset_t sigcopy;
27281+ compat_sigset_t *cset;
27282+
27283+ sigcopy = *set;
27284+
27285+ cset = (compat_sigset_t *) &sigcopy;
27286
27287 /* Set up the stack frame */
27288 if (is_ia32_frame()) {
27289@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27290 } else if (is_x32_frame()) {
27291 return x32_setup_rt_frame(ksig, cset, regs);
27292 } else {
27293- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27294+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27295 }
27296 }
27297
27298diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27299index be8e1bd..a3d93fa 100644
27300--- a/arch/x86/kernel/smp.c
27301+++ b/arch/x86/kernel/smp.c
27302@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27303
27304 __setup("nonmi_ipi", nonmi_ipi_setup);
27305
27306-struct smp_ops smp_ops = {
27307+struct smp_ops smp_ops __read_only = {
27308 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27309 .smp_prepare_cpus = native_smp_prepare_cpus,
27310 .smp_cpus_done = native_smp_cpus_done,
27311diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27312index febc6aa..37d8edf 100644
27313--- a/arch/x86/kernel/smpboot.c
27314+++ b/arch/x86/kernel/smpboot.c
27315@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27316
27317 enable_start_cpu0 = 0;
27318
27319-#ifdef CONFIG_X86_32
27320+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27321+ barrier();
27322+
27323 /* switch away from the initial page table */
27324+#ifdef CONFIG_PAX_PER_CPU_PGD
27325+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27326+#else
27327 load_cr3(swapper_pg_dir);
27328+#endif
27329 __flush_tlb_all();
27330-#endif
27331
27332- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27333- barrier();
27334 /*
27335 * Check TSC synchronization with the BP:
27336 */
27337@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27338 alternatives_enable_smp();
27339
27340 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27341- (THREAD_SIZE + task_stack_page(idle))) - 1);
27342+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27343 per_cpu(current_task, cpu) = idle;
27344+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27345
27346 #ifdef CONFIG_X86_32
27347 /* Stack for startup_32 can be just as for start_secondary onwards */
27348@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27349 clear_tsk_thread_flag(idle, TIF_FORK);
27350 initial_gs = per_cpu_offset(cpu);
27351 #endif
27352- per_cpu(kernel_stack, cpu) =
27353- (unsigned long)task_stack_page(idle) -
27354- KERNEL_STACK_OFFSET + THREAD_SIZE;
27355+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27356+ pax_open_kernel();
27357 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27358+ pax_close_kernel();
27359 initial_code = (unsigned long)start_secondary;
27360 stack_start = idle->thread.sp;
27361
27362@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27363 /* the FPU context is blank, nobody can own it */
27364 __cpu_disable_lazy_restore(cpu);
27365
27366+#ifdef CONFIG_PAX_PER_CPU_PGD
27367+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27368+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27369+ KERNEL_PGD_PTRS);
27370+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27371+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27372+ KERNEL_PGD_PTRS);
27373+#endif
27374+
27375 err = do_boot_cpu(apicid, cpu, tidle);
27376 if (err) {
27377 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27378diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27379index 9b4d51d..5d28b58 100644
27380--- a/arch/x86/kernel/step.c
27381+++ b/arch/x86/kernel/step.c
27382@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27383 struct desc_struct *desc;
27384 unsigned long base;
27385
27386- seg &= ~7UL;
27387+ seg >>= 3;
27388
27389 mutex_lock(&child->mm->context.lock);
27390- if (unlikely((seg >> 3) >= child->mm->context.size))
27391+ if (unlikely(seg >= child->mm->context.size))
27392 addr = -1L; /* bogus selector, access would fault */
27393 else {
27394 desc = child->mm->context.ldt + seg;
27395@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27396 addr += base;
27397 }
27398 mutex_unlock(&child->mm->context.lock);
27399- }
27400+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27401+ addr = ktla_ktva(addr);
27402
27403 return addr;
27404 }
27405@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27406 unsigned char opcode[15];
27407 unsigned long addr = convert_ip_to_linear(child, regs);
27408
27409+ if (addr == -EINVAL)
27410+ return 0;
27411+
27412 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27413 for (i = 0; i < copied; i++) {
27414 switch (opcode[i]) {
27415diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27416new file mode 100644
27417index 0000000..5877189
27418--- /dev/null
27419+++ b/arch/x86/kernel/sys_i386_32.c
27420@@ -0,0 +1,189 @@
27421+/*
27422+ * This file contains various random system calls that
27423+ * have a non-standard calling sequence on the Linux/i386
27424+ * platform.
27425+ */
27426+
27427+#include <linux/errno.h>
27428+#include <linux/sched.h>
27429+#include <linux/mm.h>
27430+#include <linux/fs.h>
27431+#include <linux/smp.h>
27432+#include <linux/sem.h>
27433+#include <linux/msg.h>
27434+#include <linux/shm.h>
27435+#include <linux/stat.h>
27436+#include <linux/syscalls.h>
27437+#include <linux/mman.h>
27438+#include <linux/file.h>
27439+#include <linux/utsname.h>
27440+#include <linux/ipc.h>
27441+#include <linux/elf.h>
27442+
27443+#include <linux/uaccess.h>
27444+#include <linux/unistd.h>
27445+
27446+#include <asm/syscalls.h>
27447+
27448+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27449+{
27450+ unsigned long pax_task_size = TASK_SIZE;
27451+
27452+#ifdef CONFIG_PAX_SEGMEXEC
27453+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27454+ pax_task_size = SEGMEXEC_TASK_SIZE;
27455+#endif
27456+
27457+ if (flags & MAP_FIXED)
27458+ if (len > pax_task_size || addr > pax_task_size - len)
27459+ return -EINVAL;
27460+
27461+ return 0;
27462+}
27463+
27464+/*
27465+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27466+ */
27467+static unsigned long get_align_mask(void)
27468+{
27469+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27470+ return 0;
27471+
27472+ if (!(current->flags & PF_RANDOMIZE))
27473+ return 0;
27474+
27475+ return va_align.mask;
27476+}
27477+
27478+unsigned long
27479+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27480+ unsigned long len, unsigned long pgoff, unsigned long flags)
27481+{
27482+ struct mm_struct *mm = current->mm;
27483+ struct vm_area_struct *vma;
27484+ unsigned long pax_task_size = TASK_SIZE;
27485+ struct vm_unmapped_area_info info;
27486+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27487+
27488+#ifdef CONFIG_PAX_SEGMEXEC
27489+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27490+ pax_task_size = SEGMEXEC_TASK_SIZE;
27491+#endif
27492+
27493+ pax_task_size -= PAGE_SIZE;
27494+
27495+ if (len > pax_task_size)
27496+ return -ENOMEM;
27497+
27498+ if (flags & MAP_FIXED)
27499+ return addr;
27500+
27501+#ifdef CONFIG_PAX_RANDMMAP
27502+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27503+#endif
27504+
27505+ if (addr) {
27506+ addr = PAGE_ALIGN(addr);
27507+ if (pax_task_size - len >= addr) {
27508+ vma = find_vma(mm, addr);
27509+ if (check_heap_stack_gap(vma, addr, len, offset))
27510+ return addr;
27511+ }
27512+ }
27513+
27514+ info.flags = 0;
27515+ info.length = len;
27516+ info.align_mask = filp ? get_align_mask() : 0;
27517+ info.align_offset = pgoff << PAGE_SHIFT;
27518+ info.threadstack_offset = offset;
27519+
27520+#ifdef CONFIG_PAX_PAGEEXEC
27521+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27522+ info.low_limit = 0x00110000UL;
27523+ info.high_limit = mm->start_code;
27524+
27525+#ifdef CONFIG_PAX_RANDMMAP
27526+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27527+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27528+#endif
27529+
27530+ if (info.low_limit < info.high_limit) {
27531+ addr = vm_unmapped_area(&info);
27532+ if (!IS_ERR_VALUE(addr))
27533+ return addr;
27534+ }
27535+ } else
27536+#endif
27537+
27538+ info.low_limit = mm->mmap_base;
27539+ info.high_limit = pax_task_size;
27540+
27541+ return vm_unmapped_area(&info);
27542+}
27543+
27544+unsigned long
27545+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27546+ const unsigned long len, const unsigned long pgoff,
27547+ const unsigned long flags)
27548+{
27549+ struct vm_area_struct *vma;
27550+ struct mm_struct *mm = current->mm;
27551+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27552+ struct vm_unmapped_area_info info;
27553+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27554+
27555+#ifdef CONFIG_PAX_SEGMEXEC
27556+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27557+ pax_task_size = SEGMEXEC_TASK_SIZE;
27558+#endif
27559+
27560+ pax_task_size -= PAGE_SIZE;
27561+
27562+ /* requested length too big for entire address space */
27563+ if (len > pax_task_size)
27564+ return -ENOMEM;
27565+
27566+ if (flags & MAP_FIXED)
27567+ return addr;
27568+
27569+#ifdef CONFIG_PAX_PAGEEXEC
27570+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27571+ goto bottomup;
27572+#endif
27573+
27574+#ifdef CONFIG_PAX_RANDMMAP
27575+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27576+#endif
27577+
27578+ /* requesting a specific address */
27579+ if (addr) {
27580+ addr = PAGE_ALIGN(addr);
27581+ if (pax_task_size - len >= addr) {
27582+ vma = find_vma(mm, addr);
27583+ if (check_heap_stack_gap(vma, addr, len, offset))
27584+ return addr;
27585+ }
27586+ }
27587+
27588+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27589+ info.length = len;
27590+ info.low_limit = PAGE_SIZE;
27591+ info.high_limit = mm->mmap_base;
27592+ info.align_mask = filp ? get_align_mask() : 0;
27593+ info.align_offset = pgoff << PAGE_SHIFT;
27594+ info.threadstack_offset = offset;
27595+
27596+ addr = vm_unmapped_area(&info);
27597+ if (!(addr & ~PAGE_MASK))
27598+ return addr;
27599+ VM_BUG_ON(addr != -ENOMEM);
27600+
27601+bottomup:
27602+ /*
27603+ * A failed mmap() very likely causes application failure,
27604+ * so fall back to the bottom-up function here. This scenario
27605+ * can happen with large stack limits and large mmap()
27606+ * allocations.
27607+ */
27608+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27609+}
27610diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27611index 30277e2..5664a29 100644
27612--- a/arch/x86/kernel/sys_x86_64.c
27613+++ b/arch/x86/kernel/sys_x86_64.c
27614@@ -81,8 +81,8 @@ out:
27615 return error;
27616 }
27617
27618-static void find_start_end(unsigned long flags, unsigned long *begin,
27619- unsigned long *end)
27620+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27621+ unsigned long *begin, unsigned long *end)
27622 {
27623 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27624 unsigned long new_begin;
27625@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27626 *begin = new_begin;
27627 }
27628 } else {
27629- *begin = current->mm->mmap_legacy_base;
27630+ *begin = mm->mmap_legacy_base;
27631 *end = TASK_SIZE;
27632 }
27633 }
27634@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27635 struct vm_area_struct *vma;
27636 struct vm_unmapped_area_info info;
27637 unsigned long begin, end;
27638+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27639
27640 if (flags & MAP_FIXED)
27641 return addr;
27642
27643- find_start_end(flags, &begin, &end);
27644+ find_start_end(mm, flags, &begin, &end);
27645
27646 if (len > end)
27647 return -ENOMEM;
27648
27649+#ifdef CONFIG_PAX_RANDMMAP
27650+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27651+#endif
27652+
27653 if (addr) {
27654 addr = PAGE_ALIGN(addr);
27655 vma = find_vma(mm, addr);
27656- if (end - len >= addr &&
27657- (!vma || addr + len <= vma->vm_start))
27658+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27659 return addr;
27660 }
27661
27662@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27663 info.high_limit = end;
27664 info.align_mask = filp ? get_align_mask() : 0;
27665 info.align_offset = pgoff << PAGE_SHIFT;
27666+ info.threadstack_offset = offset;
27667 return vm_unmapped_area(&info);
27668 }
27669
27670@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27671 struct mm_struct *mm = current->mm;
27672 unsigned long addr = addr0;
27673 struct vm_unmapped_area_info info;
27674+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27675
27676 /* requested length too big for entire address space */
27677 if (len > TASK_SIZE)
27678@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27679 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27680 goto bottomup;
27681
27682+#ifdef CONFIG_PAX_RANDMMAP
27683+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27684+#endif
27685+
27686 /* requesting a specific address */
27687 if (addr) {
27688 addr = PAGE_ALIGN(addr);
27689 vma = find_vma(mm, addr);
27690- if (TASK_SIZE - len >= addr &&
27691- (!vma || addr + len <= vma->vm_start))
27692+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27693 return addr;
27694 }
27695
27696@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27697 info.high_limit = mm->mmap_base;
27698 info.align_mask = filp ? get_align_mask() : 0;
27699 info.align_offset = pgoff << PAGE_SHIFT;
27700+ info.threadstack_offset = offset;
27701 addr = vm_unmapped_area(&info);
27702 if (!(addr & ~PAGE_MASK))
27703 return addr;
27704diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27705index 91a4496..bb87552 100644
27706--- a/arch/x86/kernel/tboot.c
27707+++ b/arch/x86/kernel/tboot.c
27708@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27709
27710 void tboot_shutdown(u32 shutdown_type)
27711 {
27712- void (*shutdown)(void);
27713+ void (* __noreturn shutdown)(void);
27714
27715 if (!tboot_enabled())
27716 return;
27717@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27718
27719 switch_to_tboot_pt();
27720
27721- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27722+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27723 shutdown();
27724
27725 /* should not reach here */
27726@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27727 return -ENODEV;
27728 }
27729
27730-static atomic_t ap_wfs_count;
27731+static atomic_unchecked_t ap_wfs_count;
27732
27733 static int tboot_wait_for_aps(int num_aps)
27734 {
27735@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27736 {
27737 switch (action) {
27738 case CPU_DYING:
27739- atomic_inc(&ap_wfs_count);
27740+ atomic_inc_unchecked(&ap_wfs_count);
27741 if (num_online_cpus() == 1)
27742- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27743+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27744 return NOTIFY_BAD;
27745 break;
27746 }
27747@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27748
27749 tboot_create_trampoline();
27750
27751- atomic_set(&ap_wfs_count, 0);
27752+ atomic_set_unchecked(&ap_wfs_count, 0);
27753 register_hotcpu_notifier(&tboot_cpu_notifier);
27754
27755 #ifdef CONFIG_DEBUG_FS
27756diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27757index 25adc0e..1df4349 100644
27758--- a/arch/x86/kernel/time.c
27759+++ b/arch/x86/kernel/time.c
27760@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27761 {
27762 unsigned long pc = instruction_pointer(regs);
27763
27764- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27765+ if (!user_mode(regs) && in_lock_functions(pc)) {
27766 #ifdef CONFIG_FRAME_POINTER
27767- return *(unsigned long *)(regs->bp + sizeof(long));
27768+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27769 #else
27770 unsigned long *sp =
27771 (unsigned long *)kernel_stack_pointer(regs);
27772@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27773 * or above a saved flags. Eflags has bits 22-31 zero,
27774 * kernel addresses don't.
27775 */
27776+
27777+#ifdef CONFIG_PAX_KERNEXEC
27778+ return ktla_ktva(sp[0]);
27779+#else
27780 if (sp[0] >> 22)
27781 return sp[0];
27782 if (sp[1] >> 22)
27783 return sp[1];
27784 #endif
27785+
27786+#endif
27787 }
27788 return pc;
27789 }
27790diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27791index 7fc5e84..c6e445a 100644
27792--- a/arch/x86/kernel/tls.c
27793+++ b/arch/x86/kernel/tls.c
27794@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27795 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27796 return -EINVAL;
27797
27798+#ifdef CONFIG_PAX_SEGMEXEC
27799+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27800+ return -EINVAL;
27801+#endif
27802+
27803 set_tls_desc(p, idx, &info, 1);
27804
27805 return 0;
27806@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27807
27808 if (kbuf)
27809 info = kbuf;
27810- else if (__copy_from_user(infobuf, ubuf, count))
27811+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27812 return -EFAULT;
27813 else
27814 info = infobuf;
27815diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27816index 1c113db..287b42e 100644
27817--- a/arch/x86/kernel/tracepoint.c
27818+++ b/arch/x86/kernel/tracepoint.c
27819@@ -9,11 +9,11 @@
27820 #include <linux/atomic.h>
27821
27822 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27823-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27824+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27825 (unsigned long) trace_idt_table };
27826
27827 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27828-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27829+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27830
27831 static int trace_irq_vector_refcount;
27832 static DEFINE_MUTEX(irq_vector_mutex);
27833diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27834index 4ff5d16..736e3e1 100644
27835--- a/arch/x86/kernel/traps.c
27836+++ b/arch/x86/kernel/traps.c
27837@@ -68,7 +68,7 @@
27838 #include <asm/proto.h>
27839
27840 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27841-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27842+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27843 #else
27844 #include <asm/processor-flags.h>
27845 #include <asm/setup.h>
27846@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27847 #endif
27848
27849 /* Must be page-aligned because the real IDT is used in a fixmap. */
27850-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27851+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27852
27853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27854 EXPORT_SYMBOL_GPL(used_vectors);
27855@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27856 {
27857 enum ctx_state prev_state;
27858
27859- if (user_mode_vm(regs)) {
27860+ if (user_mode(regs)) {
27861 /* Other than that, we're just an exception. */
27862 prev_state = exception_enter();
27863 } else {
27864@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27865 /* Must be before exception_exit. */
27866 preempt_count_sub(HARDIRQ_OFFSET);
27867
27868- if (user_mode_vm(regs))
27869+ if (user_mode(regs))
27870 return exception_exit(prev_state);
27871 else
27872 rcu_nmi_exit();
27873@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27874 *
27875 * IST exception handlers normally cannot schedule. As a special
27876 * exception, if the exception interrupted userspace code (i.e.
27877- * user_mode_vm(regs) would return true) and the exception was not
27878+ * user_mode(regs) would return true) and the exception was not
27879 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27880 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27881 * Callers are responsible for enabling interrupts themselves inside
27882@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27883 */
27884 void ist_begin_non_atomic(struct pt_regs *regs)
27885 {
27886- BUG_ON(!user_mode_vm(regs));
27887+ BUG_ON(!user_mode(regs));
27888
27889 /*
27890 * Sanity check: we need to be on the normal thread stack. This
27891@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27892 }
27893
27894 static nokprobe_inline int
27895-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27896+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27897 struct pt_regs *regs, long error_code)
27898 {
27899 #ifdef CONFIG_X86_32
27900- if (regs->flags & X86_VM_MASK) {
27901+ if (v8086_mode(regs)) {
27902 /*
27903 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27904 * On nmi (interrupt 2), do_trap should not be called.
27905@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27906 return -1;
27907 }
27908 #endif
27909- if (!user_mode(regs)) {
27910+ if (!user_mode_novm(regs)) {
27911 if (!fixup_exception(regs)) {
27912 tsk->thread.error_code = error_code;
27913 tsk->thread.trap_nr = trapnr;
27914+
27915+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27916+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27917+ str = "PAX: suspicious stack segment fault";
27918+#endif
27919+
27920 die(str, regs, error_code);
27921 }
27922+
27923+#ifdef CONFIG_PAX_REFCOUNT
27924+ if (trapnr == X86_TRAP_OF)
27925+ pax_report_refcount_overflow(regs);
27926+#endif
27927+
27928 return 0;
27929 }
27930
27931@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27932 }
27933
27934 static void
27935-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27936+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27937 long error_code, siginfo_t *info)
27938 {
27939 struct task_struct *tsk = current;
27940@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27941 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27942 printk_ratelimit()) {
27943 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27944- tsk->comm, tsk->pid, str,
27945+ tsk->comm, task_pid_nr(tsk), str,
27946 regs->ip, regs->sp, error_code);
27947 print_vma_addr(" in ", regs->ip);
27948 pr_cont("\n");
27949@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27950 tsk->thread.error_code = error_code;
27951 tsk->thread.trap_nr = X86_TRAP_DF;
27952
27953+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27954+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27955+ die("grsec: kernel stack overflow detected", regs, error_code);
27956+#endif
27957+
27958 #ifdef CONFIG_DOUBLEFAULT
27959 df_debug(regs, error_code);
27960 #endif
27961@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27962 goto exit;
27963 conditional_sti(regs);
27964
27965- if (!user_mode_vm(regs))
27966+ if (!user_mode(regs))
27967 die("bounds", regs, error_code);
27968
27969 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27970@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27971 conditional_sti(regs);
27972
27973 #ifdef CONFIG_X86_32
27974- if (regs->flags & X86_VM_MASK) {
27975+ if (v8086_mode(regs)) {
27976 local_irq_enable();
27977 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27978 goto exit;
27979@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27980 #endif
27981
27982 tsk = current;
27983- if (!user_mode(regs)) {
27984+ if (!user_mode_novm(regs)) {
27985 if (fixup_exception(regs))
27986 goto exit;
27987
27988 tsk->thread.error_code = error_code;
27989 tsk->thread.trap_nr = X86_TRAP_GP;
27990 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27991- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27992+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27993+
27994+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27995+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
27996+ die("PAX: suspicious general protection fault", regs, error_code);
27997+ else
27998+#endif
27999+
28000 die("general protection fault", regs, error_code);
28001+ }
28002 goto exit;
28003 }
28004
28005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28006+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28007+ struct mm_struct *mm = tsk->mm;
28008+ unsigned long limit;
28009+
28010+ down_write(&mm->mmap_sem);
28011+ limit = mm->context.user_cs_limit;
28012+ if (limit < TASK_SIZE) {
28013+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28014+ up_write(&mm->mmap_sem);
28015+ return;
28016+ }
28017+ up_write(&mm->mmap_sem);
28018+ }
28019+#endif
28020+
28021 tsk->thread.error_code = error_code;
28022 tsk->thread.trap_nr = X86_TRAP_GP;
28023
28024@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28025 container_of(task_pt_regs(current),
28026 struct bad_iret_stack, regs);
28027
28028+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28029+ new_stack = s;
28030+
28031 /* Copy the IRET target to the new stack. */
28032 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28033
28034 /* Copy the remainder of the stack from the current stack. */
28035 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28036
28037- BUG_ON(!user_mode_vm(&new_stack->regs));
28038+ BUG_ON(!user_mode(&new_stack->regs));
28039 return new_stack;
28040 }
28041 NOKPROBE_SYMBOL(fixup_bad_iret);
28042@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28043 * then it's very likely the result of an icebp/int01 trap.
28044 * User wants a sigtrap for that.
28045 */
28046- if (!dr6 && user_mode_vm(regs))
28047+ if (!dr6 && user_mode(regs))
28048 user_icebp = 1;
28049
28050 /* Catch kmemcheck conditions first of all! */
28051@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28052 /* It's safe to allow irq's after DR6 has been saved */
28053 preempt_conditional_sti(regs);
28054
28055- if (regs->flags & X86_VM_MASK) {
28056+ if (v8086_mode(regs)) {
28057 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28058 X86_TRAP_DB);
28059 preempt_conditional_cli(regs);
28060@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28061 * We already checked v86 mode above, so we can check for kernel mode
28062 * by just checking the CPL of CS.
28063 */
28064- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28065+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28066 tsk->thread.debugreg6 &= ~DR_STEP;
28067 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28068 regs->flags &= ~X86_EFLAGS_TF;
28069@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28070 return;
28071 conditional_sti(regs);
28072
28073- if (!user_mode_vm(regs))
28074+ if (!user_mode(regs))
28075 {
28076 if (!fixup_exception(regs)) {
28077 task->thread.error_code = error_code;
28078diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28079index 5054497..139f8f8 100644
28080--- a/arch/x86/kernel/tsc.c
28081+++ b/arch/x86/kernel/tsc.c
28082@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28083 */
28084 smp_wmb();
28085
28086- ACCESS_ONCE(c2n->head) = data;
28087+ ACCESS_ONCE_RW(c2n->head) = data;
28088 }
28089
28090 /*
28091diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28092index 81f8adb0..fff670e 100644
28093--- a/arch/x86/kernel/uprobes.c
28094+++ b/arch/x86/kernel/uprobes.c
28095@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28096 int ret = NOTIFY_DONE;
28097
28098 /* We are only interested in userspace traps */
28099- if (regs && !user_mode_vm(regs))
28100+ if (regs && !user_mode(regs))
28101 return NOTIFY_DONE;
28102
28103 switch (val) {
28104@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28105
28106 if (nleft != rasize) {
28107 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28108- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28109+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28110
28111 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28112 }
28113diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28114index b9242ba..50c5edd 100644
28115--- a/arch/x86/kernel/verify_cpu.S
28116+++ b/arch/x86/kernel/verify_cpu.S
28117@@ -20,6 +20,7 @@
28118 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28119 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28120 * arch/x86/kernel/head_32.S: processor startup
28121+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28122 *
28123 * verify_cpu, returns the status of longmode and SSE in register %eax.
28124 * 0: Success 1: Failure
28125diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28126index e8edcf5..27f9344 100644
28127--- a/arch/x86/kernel/vm86_32.c
28128+++ b/arch/x86/kernel/vm86_32.c
28129@@ -44,6 +44,7 @@
28130 #include <linux/ptrace.h>
28131 #include <linux/audit.h>
28132 #include <linux/stddef.h>
28133+#include <linux/grsecurity.h>
28134
28135 #include <asm/uaccess.h>
28136 #include <asm/io.h>
28137@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28138 do_exit(SIGSEGV);
28139 }
28140
28141- tss = &per_cpu(init_tss, get_cpu());
28142+ tss = init_tss + get_cpu();
28143 current->thread.sp0 = current->thread.saved_sp0;
28144 current->thread.sysenter_cs = __KERNEL_CS;
28145 load_sp0(tss, &current->thread);
28146@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28147
28148 if (tsk->thread.saved_sp0)
28149 return -EPERM;
28150+
28151+#ifdef CONFIG_GRKERNSEC_VM86
28152+ if (!capable(CAP_SYS_RAWIO)) {
28153+ gr_handle_vm86();
28154+ return -EPERM;
28155+ }
28156+#endif
28157+
28158 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28159 offsetof(struct kernel_vm86_struct, vm86plus) -
28160 sizeof(info.regs));
28161@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28162 int tmp;
28163 struct vm86plus_struct __user *v86;
28164
28165+#ifdef CONFIG_GRKERNSEC_VM86
28166+ if (!capable(CAP_SYS_RAWIO)) {
28167+ gr_handle_vm86();
28168+ return -EPERM;
28169+ }
28170+#endif
28171+
28172 tsk = current;
28173 switch (cmd) {
28174 case VM86_REQUEST_IRQ:
28175@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28176 tsk->thread.saved_fs = info->regs32->fs;
28177 tsk->thread.saved_gs = get_user_gs(info->regs32);
28178
28179- tss = &per_cpu(init_tss, get_cpu());
28180+ tss = init_tss + get_cpu();
28181 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28182 if (cpu_has_sep)
28183 tsk->thread.sysenter_cs = 0;
28184@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28185 goto cannot_handle;
28186 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28187 goto cannot_handle;
28188- intr_ptr = (unsigned long __user *) (i << 2);
28189+ intr_ptr = (__force unsigned long __user *) (i << 2);
28190 if (get_user(segoffs, intr_ptr))
28191 goto cannot_handle;
28192 if ((segoffs >> 16) == BIOSSEG)
28193diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28194index 00bf300..129df8e 100644
28195--- a/arch/x86/kernel/vmlinux.lds.S
28196+++ b/arch/x86/kernel/vmlinux.lds.S
28197@@ -26,6 +26,13 @@
28198 #include <asm/page_types.h>
28199 #include <asm/cache.h>
28200 #include <asm/boot.h>
28201+#include <asm/segment.h>
28202+
28203+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28204+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28205+#else
28206+#define __KERNEL_TEXT_OFFSET 0
28207+#endif
28208
28209 #undef i386 /* in case the preprocessor is a 32bit one */
28210
28211@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28212
28213 PHDRS {
28214 text PT_LOAD FLAGS(5); /* R_E */
28215+#ifdef CONFIG_X86_32
28216+ module PT_LOAD FLAGS(5); /* R_E */
28217+#endif
28218+#ifdef CONFIG_XEN
28219+ rodata PT_LOAD FLAGS(5); /* R_E */
28220+#else
28221+ rodata PT_LOAD FLAGS(4); /* R__ */
28222+#endif
28223 data PT_LOAD FLAGS(6); /* RW_ */
28224-#ifdef CONFIG_X86_64
28225+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28226 #ifdef CONFIG_SMP
28227 percpu PT_LOAD FLAGS(6); /* RW_ */
28228 #endif
28229+ text.init PT_LOAD FLAGS(5); /* R_E */
28230+ text.exit PT_LOAD FLAGS(5); /* R_E */
28231 init PT_LOAD FLAGS(7); /* RWE */
28232-#endif
28233 note PT_NOTE FLAGS(0); /* ___ */
28234 }
28235
28236 SECTIONS
28237 {
28238 #ifdef CONFIG_X86_32
28239- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28240- phys_startup_32 = startup_32 - LOAD_OFFSET;
28241+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28242 #else
28243- . = __START_KERNEL;
28244- phys_startup_64 = startup_64 - LOAD_OFFSET;
28245+ . = __START_KERNEL;
28246 #endif
28247
28248 /* Text and read-only data */
28249- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28250- _text = .;
28251+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28252 /* bootstrapping code */
28253+#ifdef CONFIG_X86_32
28254+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28255+#else
28256+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28257+#endif
28258+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28259+ _text = .;
28260 HEAD_TEXT
28261 . = ALIGN(8);
28262 _stext = .;
28263@@ -104,13 +124,47 @@ SECTIONS
28264 IRQENTRY_TEXT
28265 *(.fixup)
28266 *(.gnu.warning)
28267- /* End of text section */
28268- _etext = .;
28269 } :text = 0x9090
28270
28271- NOTES :text :note
28272+ . += __KERNEL_TEXT_OFFSET;
28273
28274- EXCEPTION_TABLE(16) :text = 0x9090
28275+#ifdef CONFIG_X86_32
28276+ . = ALIGN(PAGE_SIZE);
28277+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28278+
28279+#ifdef CONFIG_PAX_KERNEXEC
28280+ MODULES_EXEC_VADDR = .;
28281+ BYTE(0)
28282+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28283+ . = ALIGN(HPAGE_SIZE) - 1;
28284+ MODULES_EXEC_END = .;
28285+#endif
28286+
28287+ } :module
28288+#endif
28289+
28290+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28291+ /* End of text section */
28292+ BYTE(0)
28293+ _etext = . - __KERNEL_TEXT_OFFSET;
28294+ }
28295+
28296+#ifdef CONFIG_X86_32
28297+ . = ALIGN(PAGE_SIZE);
28298+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28299+ . = ALIGN(PAGE_SIZE);
28300+ *(.empty_zero_page)
28301+ *(.initial_pg_fixmap)
28302+ *(.initial_pg_pmd)
28303+ *(.initial_page_table)
28304+ *(.swapper_pg_dir)
28305+ } :rodata
28306+#endif
28307+
28308+ . = ALIGN(PAGE_SIZE);
28309+ NOTES :rodata :note
28310+
28311+ EXCEPTION_TABLE(16) :rodata
28312
28313 #if defined(CONFIG_DEBUG_RODATA)
28314 /* .text should occupy whole number of pages */
28315@@ -122,16 +176,20 @@ SECTIONS
28316
28317 /* Data */
28318 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28319+
28320+#ifdef CONFIG_PAX_KERNEXEC
28321+ . = ALIGN(HPAGE_SIZE);
28322+#else
28323+ . = ALIGN(PAGE_SIZE);
28324+#endif
28325+
28326 /* Start of data section */
28327 _sdata = .;
28328
28329 /* init_task */
28330 INIT_TASK_DATA(THREAD_SIZE)
28331
28332-#ifdef CONFIG_X86_32
28333- /* 32 bit has nosave before _edata */
28334 NOSAVE_DATA
28335-#endif
28336
28337 PAGE_ALIGNED_DATA(PAGE_SIZE)
28338
28339@@ -174,12 +232,19 @@ SECTIONS
28340 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28341
28342 /* Init code and data - will be freed after init */
28343- . = ALIGN(PAGE_SIZE);
28344 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28345+ BYTE(0)
28346+
28347+#ifdef CONFIG_PAX_KERNEXEC
28348+ . = ALIGN(HPAGE_SIZE);
28349+#else
28350+ . = ALIGN(PAGE_SIZE);
28351+#endif
28352+
28353 __init_begin = .; /* paired with __init_end */
28354- }
28355+ } :init.begin
28356
28357-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28358+#ifdef CONFIG_SMP
28359 /*
28360 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28361 * output PHDR, so the next output section - .init.text - should
28362@@ -190,12 +255,27 @@ SECTIONS
28363 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28364 #endif
28365
28366- INIT_TEXT_SECTION(PAGE_SIZE)
28367-#ifdef CONFIG_X86_64
28368- :init
28369-#endif
28370+ . = ALIGN(PAGE_SIZE);
28371+ init_begin = .;
28372+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28373+ VMLINUX_SYMBOL(_sinittext) = .;
28374+ INIT_TEXT
28375+ . = ALIGN(PAGE_SIZE);
28376+ } :text.init
28377
28378- INIT_DATA_SECTION(16)
28379+ /*
28380+ * .exit.text is discard at runtime, not link time, to deal with
28381+ * references from .altinstructions and .eh_frame
28382+ */
28383+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28384+ EXIT_TEXT
28385+ VMLINUX_SYMBOL(_einittext) = .;
28386+ . = ALIGN(16);
28387+ } :text.exit
28388+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28389+
28390+ . = ALIGN(PAGE_SIZE);
28391+ INIT_DATA_SECTION(16) :init
28392
28393 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28394 __x86_cpu_dev_start = .;
28395@@ -266,19 +346,12 @@ SECTIONS
28396 }
28397
28398 . = ALIGN(8);
28399- /*
28400- * .exit.text is discard at runtime, not link time, to deal with
28401- * references from .altinstructions and .eh_frame
28402- */
28403- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28404- EXIT_TEXT
28405- }
28406
28407 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28408 EXIT_DATA
28409 }
28410
28411-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28412+#ifndef CONFIG_SMP
28413 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28414 #endif
28415
28416@@ -297,16 +370,10 @@ SECTIONS
28417 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28418 __smp_locks = .;
28419 *(.smp_locks)
28420- . = ALIGN(PAGE_SIZE);
28421 __smp_locks_end = .;
28422+ . = ALIGN(PAGE_SIZE);
28423 }
28424
28425-#ifdef CONFIG_X86_64
28426- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28427- NOSAVE_DATA
28428- }
28429-#endif
28430-
28431 /* BSS */
28432 . = ALIGN(PAGE_SIZE);
28433 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28434@@ -322,6 +389,7 @@ SECTIONS
28435 __brk_base = .;
28436 . += 64 * 1024; /* 64k alignment slop space */
28437 *(.brk_reservation) /* areas brk users have reserved */
28438+ . = ALIGN(HPAGE_SIZE);
28439 __brk_limit = .;
28440 }
28441
28442@@ -348,13 +416,12 @@ SECTIONS
28443 * for the boot processor.
28444 */
28445 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28446-INIT_PER_CPU(gdt_page);
28447 INIT_PER_CPU(irq_stack_union);
28448
28449 /*
28450 * Build-time check on the image size:
28451 */
28452-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28453+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28454 "kernel image bigger than KERNEL_IMAGE_SIZE");
28455
28456 #ifdef CONFIG_SMP
28457diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28458index 2dcc6ff..082dc7a 100644
28459--- a/arch/x86/kernel/vsyscall_64.c
28460+++ b/arch/x86/kernel/vsyscall_64.c
28461@@ -38,15 +38,13 @@
28462 #define CREATE_TRACE_POINTS
28463 #include "vsyscall_trace.h"
28464
28465-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28466+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28467
28468 static int __init vsyscall_setup(char *str)
28469 {
28470 if (str) {
28471 if (!strcmp("emulate", str))
28472 vsyscall_mode = EMULATE;
28473- else if (!strcmp("native", str))
28474- vsyscall_mode = NATIVE;
28475 else if (!strcmp("none", str))
28476 vsyscall_mode = NONE;
28477 else
28478@@ -264,8 +262,7 @@ do_ret:
28479 return true;
28480
28481 sigsegv:
28482- force_sig(SIGSEGV, current);
28483- return true;
28484+ do_group_exit(SIGKILL);
28485 }
28486
28487 /*
28488@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28489 static struct vm_area_struct gate_vma = {
28490 .vm_start = VSYSCALL_ADDR,
28491 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28492- .vm_page_prot = PAGE_READONLY_EXEC,
28493- .vm_flags = VM_READ | VM_EXEC,
28494+ .vm_page_prot = PAGE_READONLY,
28495+ .vm_flags = VM_READ,
28496 .vm_ops = &gate_vma_ops,
28497 };
28498
28499@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28500 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28501
28502 if (vsyscall_mode != NONE)
28503- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28504- vsyscall_mode == NATIVE
28505- ? PAGE_KERNEL_VSYSCALL
28506- : PAGE_KERNEL_VVAR);
28507+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28508
28509 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28510 (unsigned long)VSYSCALL_ADDR);
28511diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28512index 37d8fa4..66e319a 100644
28513--- a/arch/x86/kernel/x8664_ksyms_64.c
28514+++ b/arch/x86/kernel/x8664_ksyms_64.c
28515@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28516 EXPORT_SYMBOL(copy_user_generic_unrolled);
28517 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28518 EXPORT_SYMBOL(__copy_user_nocache);
28519-EXPORT_SYMBOL(_copy_from_user);
28520-EXPORT_SYMBOL(_copy_to_user);
28521
28522 EXPORT_SYMBOL(copy_page);
28523 EXPORT_SYMBOL(clear_page);
28524@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28525 EXPORT_SYMBOL(___preempt_schedule_context);
28526 #endif
28527 #endif
28528+
28529+#ifdef CONFIG_PAX_PER_CPU_PGD
28530+EXPORT_SYMBOL(cpu_pgd);
28531+#endif
28532diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28533index 234b072..b7ab191 100644
28534--- a/arch/x86/kernel/x86_init.c
28535+++ b/arch/x86/kernel/x86_init.c
28536@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28537 static void default_nmi_init(void) { };
28538 static int default_i8042_detect(void) { return 1; };
28539
28540-struct x86_platform_ops x86_platform = {
28541+struct x86_platform_ops x86_platform __read_only = {
28542 .calibrate_tsc = native_calibrate_tsc,
28543 .get_wallclock = mach_get_cmos_time,
28544 .set_wallclock = mach_set_rtc_mmss,
28545@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28546 EXPORT_SYMBOL_GPL(x86_platform);
28547
28548 #if defined(CONFIG_PCI_MSI)
28549-struct x86_msi_ops x86_msi = {
28550+struct x86_msi_ops x86_msi __read_only = {
28551 .setup_msi_irqs = native_setup_msi_irqs,
28552 .compose_msi_msg = native_compose_msi_msg,
28553 .teardown_msi_irq = native_teardown_msi_irq,
28554@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28555 }
28556 #endif
28557
28558-struct x86_io_apic_ops x86_io_apic_ops = {
28559+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28560 .init = native_io_apic_init_mappings,
28561 .read = native_io_apic_read,
28562 .write = native_io_apic_write,
28563diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28564index cdc6cf9..e04f495 100644
28565--- a/arch/x86/kernel/xsave.c
28566+++ b/arch/x86/kernel/xsave.c
28567@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28568
28569 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28570 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28571- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28572+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28573
28574 if (!use_xsave())
28575 return err;
28576
28577- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28578+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28579
28580 /*
28581 * Read the xstate_bv which we copied (directly from the cpu or
28582 * from the state in task struct) to the user buffers.
28583 */
28584- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28585+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28586
28587 /*
28588 * For legacy compatible, we always set FP/SSE bits in the bit
28589@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28590 */
28591 xstate_bv |= XSTATE_FPSSE;
28592
28593- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28594+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28595
28596 return err;
28597 }
28598@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28599 {
28600 int err;
28601
28602+ buf = (struct xsave_struct __user *)____m(buf);
28603 if (use_xsave())
28604 err = xsave_user(buf);
28605 else if (use_fxsr())
28606@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28607 */
28608 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28609 {
28610+ buf = (void __user *)____m(buf);
28611 if (use_xsave()) {
28612 if ((unsigned long)buf % 64 || fx_only) {
28613 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28614diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28615index 8a80737..bac4961 100644
28616--- a/arch/x86/kvm/cpuid.c
28617+++ b/arch/x86/kvm/cpuid.c
28618@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28619 struct kvm_cpuid2 *cpuid,
28620 struct kvm_cpuid_entry2 __user *entries)
28621 {
28622- int r;
28623+ int r, i;
28624
28625 r = -E2BIG;
28626 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28627 goto out;
28628 r = -EFAULT;
28629- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28630- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28631+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28632 goto out;
28633+ for (i = 0; i < cpuid->nent; ++i) {
28634+ struct kvm_cpuid_entry2 cpuid_entry;
28635+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28636+ goto out;
28637+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28638+ }
28639 vcpu->arch.cpuid_nent = cpuid->nent;
28640 kvm_apic_set_version(vcpu);
28641 kvm_x86_ops->cpuid_update(vcpu);
28642@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28643 struct kvm_cpuid2 *cpuid,
28644 struct kvm_cpuid_entry2 __user *entries)
28645 {
28646- int r;
28647+ int r, i;
28648
28649 r = -E2BIG;
28650 if (cpuid->nent < vcpu->arch.cpuid_nent)
28651 goto out;
28652 r = -EFAULT;
28653- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28654- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28655+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28656 goto out;
28657+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28658+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28659+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28660+ goto out;
28661+ }
28662 return 0;
28663
28664 out:
28665diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28666index 106c015..2db7161 100644
28667--- a/arch/x86/kvm/emulate.c
28668+++ b/arch/x86/kvm/emulate.c
28669@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28670 int cr = ctxt->modrm_reg;
28671 u64 efer = 0;
28672
28673- static u64 cr_reserved_bits[] = {
28674+ static const u64 cr_reserved_bits[] = {
28675 0xffffffff00000000ULL,
28676 0, 0, 0, /* CR3 checked later */
28677 CR4_RESERVED_BITS,
28678diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28679index 4ee827d..a14eff9 100644
28680--- a/arch/x86/kvm/lapic.c
28681+++ b/arch/x86/kvm/lapic.c
28682@@ -56,7 +56,7 @@
28683 #define APIC_BUS_CYCLE_NS 1
28684
28685 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28686-#define apic_debug(fmt, arg...)
28687+#define apic_debug(fmt, arg...) do {} while (0)
28688
28689 #define APIC_LVT_NUM 6
28690 /* 14 is the version for Xeon and Pentium 8.4.8*/
28691diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28692index fd49c86..77e1aa0 100644
28693--- a/arch/x86/kvm/paging_tmpl.h
28694+++ b/arch/x86/kvm/paging_tmpl.h
28695@@ -343,7 +343,7 @@ retry_walk:
28696 if (unlikely(kvm_is_error_hva(host_addr)))
28697 goto error;
28698
28699- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28700+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28701 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28702 goto error;
28703 walker->ptep_user[walker->level - 1] = ptep_user;
28704diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28705index cc618c8..3f72f76 100644
28706--- a/arch/x86/kvm/svm.c
28707+++ b/arch/x86/kvm/svm.c
28708@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28709 int cpu = raw_smp_processor_id();
28710
28711 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28712+
28713+ pax_open_kernel();
28714 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28715+ pax_close_kernel();
28716+
28717 load_TR_desc();
28718 }
28719
28720@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28721 #endif
28722 #endif
28723
28724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28725+ __set_fs(current_thread_info()->addr_limit);
28726+#endif
28727+
28728 reload_tss(vcpu);
28729
28730 local_irq_disable();
28731diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28732index ae4f6d3..7f5f59b 100644
28733--- a/arch/x86/kvm/vmx.c
28734+++ b/arch/x86/kvm/vmx.c
28735@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28736 #endif
28737 }
28738
28739-static void vmcs_clear_bits(unsigned long field, u32 mask)
28740+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28741 {
28742 vmcs_writel(field, vmcs_readl(field) & ~mask);
28743 }
28744
28745-static void vmcs_set_bits(unsigned long field, u32 mask)
28746+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28747 {
28748 vmcs_writel(field, vmcs_readl(field) | mask);
28749 }
28750@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28751 struct desc_struct *descs;
28752
28753 descs = (void *)gdt->address;
28754+
28755+ pax_open_kernel();
28756 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28757+ pax_close_kernel();
28758+
28759 load_TR_desc();
28760 }
28761
28762@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28763 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28764 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28765
28766+#ifdef CONFIG_PAX_PER_CPU_PGD
28767+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28768+#endif
28769+
28770 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28771 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28772 vmx->loaded_vmcs->cpu = cpu;
28773@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28774 * reads and returns guest's timestamp counter "register"
28775 * guest_tsc = host_tsc + tsc_offset -- 21.3
28776 */
28777-static u64 guest_read_tsc(void)
28778+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28779 {
28780 u64 host_tsc, tsc_offset;
28781
28782@@ -4458,7 +4466,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28783 unsigned long cr4;
28784
28785 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28786+
28787+#ifndef CONFIG_PAX_PER_CPU_PGD
28788 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28789+#endif
28790
28791 /* Save the most likely value for this task's CR4 in the VMCS. */
28792 cr4 = cr4_read_shadow();
28793@@ -4485,7 +4496,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28794 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28795 vmx->host_idt_base = dt.address;
28796
28797- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28798+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28799
28800 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28801 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28802@@ -6096,11 +6107,17 @@ static __init int hardware_setup(void)
28803 * page upon invalidation. No need to do anything if not
28804 * using the APIC_ACCESS_ADDR VMCS field.
28805 */
28806- if (!flexpriority_enabled)
28807- kvm_x86_ops->set_apic_access_page_addr = NULL;
28808+ if (!flexpriority_enabled) {
28809+ pax_open_kernel();
28810+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28811+ pax_close_kernel();
28812+ }
28813
28814- if (!cpu_has_vmx_tpr_shadow())
28815- kvm_x86_ops->update_cr8_intercept = NULL;
28816+ if (!cpu_has_vmx_tpr_shadow()) {
28817+ pax_open_kernel();
28818+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28819+ pax_close_kernel();
28820+ }
28821
28822 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28823 kvm_disable_largepages();
28824@@ -6111,14 +6128,16 @@ static __init int hardware_setup(void)
28825 if (!cpu_has_vmx_apicv())
28826 enable_apicv = 0;
28827
28828+ pax_open_kernel();
28829 if (enable_apicv)
28830- kvm_x86_ops->update_cr8_intercept = NULL;
28831+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28832 else {
28833- kvm_x86_ops->hwapic_irr_update = NULL;
28834- kvm_x86_ops->hwapic_isr_update = NULL;
28835- kvm_x86_ops->deliver_posted_interrupt = NULL;
28836- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28837+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28838+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28839+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28840+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28841 }
28842+ pax_close_kernel();
28843
28844 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28845 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28846@@ -6171,10 +6190,12 @@ static __init int hardware_setup(void)
28847 enable_pml = 0;
28848
28849 if (!enable_pml) {
28850- kvm_x86_ops->slot_enable_log_dirty = NULL;
28851- kvm_x86_ops->slot_disable_log_dirty = NULL;
28852- kvm_x86_ops->flush_log_dirty = NULL;
28853- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28854+ pax_open_kernel();
28855+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28856+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28857+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28858+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28859+ pax_close_kernel();
28860 }
28861
28862 return alloc_kvm_area();
28863@@ -8219,6 +8240,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28864 "jmp 2f \n\t"
28865 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28866 "2: "
28867+
28868+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28869+ "ljmp %[cs],$3f\n\t"
28870+ "3: "
28871+#endif
28872+
28873 /* Save guest registers, load host registers, keep flags */
28874 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28875 "pop %0 \n\t"
28876@@ -8271,6 +8298,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28877 #endif
28878 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28879 [wordsize]"i"(sizeof(ulong))
28880+
28881+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28882+ ,[cs]"i"(__KERNEL_CS)
28883+#endif
28884+
28885 : "cc", "memory"
28886 #ifdef CONFIG_X86_64
28887 , "rax", "rbx", "rdi", "rsi"
28888@@ -8284,7 +8316,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28889 if (debugctlmsr)
28890 update_debugctlmsr(debugctlmsr);
28891
28892-#ifndef CONFIG_X86_64
28893+#ifdef CONFIG_X86_32
28894 /*
28895 * The sysexit path does not restore ds/es, so we must set them to
28896 * a reasonable value ourselves.
28897@@ -8293,8 +8325,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28898 * may be executed in interrupt context, which saves and restore segments
28899 * around it, nullifying its effect.
28900 */
28901- loadsegment(ds, __USER_DS);
28902- loadsegment(es, __USER_DS);
28903+ loadsegment(ds, __KERNEL_DS);
28904+ loadsegment(es, __KERNEL_DS);
28905+ loadsegment(ss, __KERNEL_DS);
28906+
28907+#ifdef CONFIG_PAX_KERNEXEC
28908+ loadsegment(fs, __KERNEL_PERCPU);
28909+#endif
28910+
28911+#ifdef CONFIG_PAX_MEMORY_UDEREF
28912+ __set_fs(current_thread_info()->addr_limit);
28913+#endif
28914+
28915 #endif
28916
28917 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28918diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28919index 32bf19e..c8de1b5 100644
28920--- a/arch/x86/kvm/x86.c
28921+++ b/arch/x86/kvm/x86.c
28922@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28923 {
28924 struct kvm *kvm = vcpu->kvm;
28925 int lm = is_long_mode(vcpu);
28926- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28927- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28928+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28929+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28930 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28931 : kvm->arch.xen_hvm_config.blob_size_32;
28932 u32 page_num = data & ~PAGE_MASK;
28933@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28934 if (n < msr_list.nmsrs)
28935 goto out;
28936 r = -EFAULT;
28937+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28938+ goto out;
28939 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28940 num_msrs_to_save * sizeof(u32)))
28941 goto out;
28942@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28943 };
28944 #endif
28945
28946-int kvm_arch_init(void *opaque)
28947+int kvm_arch_init(const void *opaque)
28948 {
28949 int r;
28950 struct kvm_x86_ops *ops = opaque;
28951diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28952index ac4453d..1f43bf3 100644
28953--- a/arch/x86/lguest/boot.c
28954+++ b/arch/x86/lguest/boot.c
28955@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28956 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28957 * Launcher to reboot us.
28958 */
28959-static void lguest_restart(char *reason)
28960+static __noreturn void lguest_restart(char *reason)
28961 {
28962 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28963+ BUG();
28964 }
28965
28966 /*G:050
28967diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28968index 00933d5..3a64af9 100644
28969--- a/arch/x86/lib/atomic64_386_32.S
28970+++ b/arch/x86/lib/atomic64_386_32.S
28971@@ -48,6 +48,10 @@ BEGIN(read)
28972 movl (v), %eax
28973 movl 4(v), %edx
28974 RET_ENDP
28975+BEGIN(read_unchecked)
28976+ movl (v), %eax
28977+ movl 4(v), %edx
28978+RET_ENDP
28979 #undef v
28980
28981 #define v %esi
28982@@ -55,6 +59,10 @@ BEGIN(set)
28983 movl %ebx, (v)
28984 movl %ecx, 4(v)
28985 RET_ENDP
28986+BEGIN(set_unchecked)
28987+ movl %ebx, (v)
28988+ movl %ecx, 4(v)
28989+RET_ENDP
28990 #undef v
28991
28992 #define v %esi
28993@@ -70,6 +78,20 @@ RET_ENDP
28994 BEGIN(add)
28995 addl %eax, (v)
28996 adcl %edx, 4(v)
28997+
28998+#ifdef CONFIG_PAX_REFCOUNT
28999+ jno 0f
29000+ subl %eax, (v)
29001+ sbbl %edx, 4(v)
29002+ int $4
29003+0:
29004+ _ASM_EXTABLE(0b, 0b)
29005+#endif
29006+
29007+RET_ENDP
29008+BEGIN(add_unchecked)
29009+ addl %eax, (v)
29010+ adcl %edx, 4(v)
29011 RET_ENDP
29012 #undef v
29013
29014@@ -77,6 +99,24 @@ RET_ENDP
29015 BEGIN(add_return)
29016 addl (v), %eax
29017 adcl 4(v), %edx
29018+
29019+#ifdef CONFIG_PAX_REFCOUNT
29020+ into
29021+1234:
29022+ _ASM_EXTABLE(1234b, 2f)
29023+#endif
29024+
29025+ movl %eax, (v)
29026+ movl %edx, 4(v)
29027+
29028+#ifdef CONFIG_PAX_REFCOUNT
29029+2:
29030+#endif
29031+
29032+RET_ENDP
29033+BEGIN(add_return_unchecked)
29034+ addl (v), %eax
29035+ adcl 4(v), %edx
29036 movl %eax, (v)
29037 movl %edx, 4(v)
29038 RET_ENDP
29039@@ -86,6 +126,20 @@ RET_ENDP
29040 BEGIN(sub)
29041 subl %eax, (v)
29042 sbbl %edx, 4(v)
29043+
29044+#ifdef CONFIG_PAX_REFCOUNT
29045+ jno 0f
29046+ addl %eax, (v)
29047+ adcl %edx, 4(v)
29048+ int $4
29049+0:
29050+ _ASM_EXTABLE(0b, 0b)
29051+#endif
29052+
29053+RET_ENDP
29054+BEGIN(sub_unchecked)
29055+ subl %eax, (v)
29056+ sbbl %edx, 4(v)
29057 RET_ENDP
29058 #undef v
29059
29060@@ -96,6 +150,27 @@ BEGIN(sub_return)
29061 sbbl $0, %edx
29062 addl (v), %eax
29063 adcl 4(v), %edx
29064+
29065+#ifdef CONFIG_PAX_REFCOUNT
29066+ into
29067+1234:
29068+ _ASM_EXTABLE(1234b, 2f)
29069+#endif
29070+
29071+ movl %eax, (v)
29072+ movl %edx, 4(v)
29073+
29074+#ifdef CONFIG_PAX_REFCOUNT
29075+2:
29076+#endif
29077+
29078+RET_ENDP
29079+BEGIN(sub_return_unchecked)
29080+ negl %edx
29081+ negl %eax
29082+ sbbl $0, %edx
29083+ addl (v), %eax
29084+ adcl 4(v), %edx
29085 movl %eax, (v)
29086 movl %edx, 4(v)
29087 RET_ENDP
29088@@ -105,6 +180,20 @@ RET_ENDP
29089 BEGIN(inc)
29090 addl $1, (v)
29091 adcl $0, 4(v)
29092+
29093+#ifdef CONFIG_PAX_REFCOUNT
29094+ jno 0f
29095+ subl $1, (v)
29096+ sbbl $0, 4(v)
29097+ int $4
29098+0:
29099+ _ASM_EXTABLE(0b, 0b)
29100+#endif
29101+
29102+RET_ENDP
29103+BEGIN(inc_unchecked)
29104+ addl $1, (v)
29105+ adcl $0, 4(v)
29106 RET_ENDP
29107 #undef v
29108
29109@@ -114,6 +203,26 @@ BEGIN(inc_return)
29110 movl 4(v), %edx
29111 addl $1, %eax
29112 adcl $0, %edx
29113+
29114+#ifdef CONFIG_PAX_REFCOUNT
29115+ into
29116+1234:
29117+ _ASM_EXTABLE(1234b, 2f)
29118+#endif
29119+
29120+ movl %eax, (v)
29121+ movl %edx, 4(v)
29122+
29123+#ifdef CONFIG_PAX_REFCOUNT
29124+2:
29125+#endif
29126+
29127+RET_ENDP
29128+BEGIN(inc_return_unchecked)
29129+ movl (v), %eax
29130+ movl 4(v), %edx
29131+ addl $1, %eax
29132+ adcl $0, %edx
29133 movl %eax, (v)
29134 movl %edx, 4(v)
29135 RET_ENDP
29136@@ -123,6 +232,20 @@ RET_ENDP
29137 BEGIN(dec)
29138 subl $1, (v)
29139 sbbl $0, 4(v)
29140+
29141+#ifdef CONFIG_PAX_REFCOUNT
29142+ jno 0f
29143+ addl $1, (v)
29144+ adcl $0, 4(v)
29145+ int $4
29146+0:
29147+ _ASM_EXTABLE(0b, 0b)
29148+#endif
29149+
29150+RET_ENDP
29151+BEGIN(dec_unchecked)
29152+ subl $1, (v)
29153+ sbbl $0, 4(v)
29154 RET_ENDP
29155 #undef v
29156
29157@@ -132,6 +255,26 @@ BEGIN(dec_return)
29158 movl 4(v), %edx
29159 subl $1, %eax
29160 sbbl $0, %edx
29161+
29162+#ifdef CONFIG_PAX_REFCOUNT
29163+ into
29164+1234:
29165+ _ASM_EXTABLE(1234b, 2f)
29166+#endif
29167+
29168+ movl %eax, (v)
29169+ movl %edx, 4(v)
29170+
29171+#ifdef CONFIG_PAX_REFCOUNT
29172+2:
29173+#endif
29174+
29175+RET_ENDP
29176+BEGIN(dec_return_unchecked)
29177+ movl (v), %eax
29178+ movl 4(v), %edx
29179+ subl $1, %eax
29180+ sbbl $0, %edx
29181 movl %eax, (v)
29182 movl %edx, 4(v)
29183 RET_ENDP
29184@@ -143,6 +286,13 @@ BEGIN(add_unless)
29185 adcl %edx, %edi
29186 addl (v), %eax
29187 adcl 4(v), %edx
29188+
29189+#ifdef CONFIG_PAX_REFCOUNT
29190+ into
29191+1234:
29192+ _ASM_EXTABLE(1234b, 2f)
29193+#endif
29194+
29195 cmpl %eax, %ecx
29196 je 3f
29197 1:
29198@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29199 1:
29200 addl $1, %eax
29201 adcl $0, %edx
29202+
29203+#ifdef CONFIG_PAX_REFCOUNT
29204+ into
29205+1234:
29206+ _ASM_EXTABLE(1234b, 2f)
29207+#endif
29208+
29209 movl %eax, (v)
29210 movl %edx, 4(v)
29211 movl $1, %eax
29212@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29213 movl 4(v), %edx
29214 subl $1, %eax
29215 sbbl $0, %edx
29216+
29217+#ifdef CONFIG_PAX_REFCOUNT
29218+ into
29219+1234:
29220+ _ASM_EXTABLE(1234b, 1f)
29221+#endif
29222+
29223 js 1f
29224 movl %eax, (v)
29225 movl %edx, 4(v)
29226diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29227index f5cc9eb..51fa319 100644
29228--- a/arch/x86/lib/atomic64_cx8_32.S
29229+++ b/arch/x86/lib/atomic64_cx8_32.S
29230@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29231 CFI_STARTPROC
29232
29233 read64 %ecx
29234+ pax_force_retaddr
29235 ret
29236 CFI_ENDPROC
29237 ENDPROC(atomic64_read_cx8)
29238
29239+ENTRY(atomic64_read_unchecked_cx8)
29240+ CFI_STARTPROC
29241+
29242+ read64 %ecx
29243+ pax_force_retaddr
29244+ ret
29245+ CFI_ENDPROC
29246+ENDPROC(atomic64_read_unchecked_cx8)
29247+
29248 ENTRY(atomic64_set_cx8)
29249 CFI_STARTPROC
29250
29251@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29252 cmpxchg8b (%esi)
29253 jne 1b
29254
29255+ pax_force_retaddr
29256 ret
29257 CFI_ENDPROC
29258 ENDPROC(atomic64_set_cx8)
29259
29260+ENTRY(atomic64_set_unchecked_cx8)
29261+ CFI_STARTPROC
29262+
29263+1:
29264+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29265+ * are atomic on 586 and newer */
29266+ cmpxchg8b (%esi)
29267+ jne 1b
29268+
29269+ pax_force_retaddr
29270+ ret
29271+ CFI_ENDPROC
29272+ENDPROC(atomic64_set_unchecked_cx8)
29273+
29274 ENTRY(atomic64_xchg_cx8)
29275 CFI_STARTPROC
29276
29277@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29278 cmpxchg8b (%esi)
29279 jne 1b
29280
29281+ pax_force_retaddr
29282 ret
29283 CFI_ENDPROC
29284 ENDPROC(atomic64_xchg_cx8)
29285
29286-.macro addsub_return func ins insc
29287-ENTRY(atomic64_\func\()_return_cx8)
29288+.macro addsub_return func ins insc unchecked=""
29289+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29290 CFI_STARTPROC
29291 SAVE ebp
29292 SAVE ebx
29293@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29294 movl %edx, %ecx
29295 \ins\()l %esi, %ebx
29296 \insc\()l %edi, %ecx
29297+
29298+.ifb \unchecked
29299+#ifdef CONFIG_PAX_REFCOUNT
29300+ into
29301+2:
29302+ _ASM_EXTABLE(2b, 3f)
29303+#endif
29304+.endif
29305+
29306 LOCK_PREFIX
29307 cmpxchg8b (%ebp)
29308 jne 1b
29309-
29310-10:
29311 movl %ebx, %eax
29312 movl %ecx, %edx
29313+
29314+.ifb \unchecked
29315+#ifdef CONFIG_PAX_REFCOUNT
29316+3:
29317+#endif
29318+.endif
29319+
29320 RESTORE edi
29321 RESTORE esi
29322 RESTORE ebx
29323 RESTORE ebp
29324+ pax_force_retaddr
29325 ret
29326 CFI_ENDPROC
29327-ENDPROC(atomic64_\func\()_return_cx8)
29328+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29329 .endm
29330
29331 addsub_return add add adc
29332 addsub_return sub sub sbb
29333+addsub_return add add adc _unchecked
29334+addsub_return sub sub sbb _unchecked
29335
29336-.macro incdec_return func ins insc
29337-ENTRY(atomic64_\func\()_return_cx8)
29338+.macro incdec_return func ins insc unchecked=""
29339+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29340 CFI_STARTPROC
29341 SAVE ebx
29342
29343@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29344 movl %edx, %ecx
29345 \ins\()l $1, %ebx
29346 \insc\()l $0, %ecx
29347+
29348+.ifb \unchecked
29349+#ifdef CONFIG_PAX_REFCOUNT
29350+ into
29351+2:
29352+ _ASM_EXTABLE(2b, 3f)
29353+#endif
29354+.endif
29355+
29356 LOCK_PREFIX
29357 cmpxchg8b (%esi)
29358 jne 1b
29359
29360-10:
29361 movl %ebx, %eax
29362 movl %ecx, %edx
29363+
29364+.ifb \unchecked
29365+#ifdef CONFIG_PAX_REFCOUNT
29366+3:
29367+#endif
29368+.endif
29369+
29370 RESTORE ebx
29371+ pax_force_retaddr
29372 ret
29373 CFI_ENDPROC
29374-ENDPROC(atomic64_\func\()_return_cx8)
29375+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29376 .endm
29377
29378 incdec_return inc add adc
29379 incdec_return dec sub sbb
29380+incdec_return inc add adc _unchecked
29381+incdec_return dec sub sbb _unchecked
29382
29383 ENTRY(atomic64_dec_if_positive_cx8)
29384 CFI_STARTPROC
29385@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29386 movl %edx, %ecx
29387 subl $1, %ebx
29388 sbb $0, %ecx
29389+
29390+#ifdef CONFIG_PAX_REFCOUNT
29391+ into
29392+1234:
29393+ _ASM_EXTABLE(1234b, 2f)
29394+#endif
29395+
29396 js 2f
29397 LOCK_PREFIX
29398 cmpxchg8b (%esi)
29399@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29400 movl %ebx, %eax
29401 movl %ecx, %edx
29402 RESTORE ebx
29403+ pax_force_retaddr
29404 ret
29405 CFI_ENDPROC
29406 ENDPROC(atomic64_dec_if_positive_cx8)
29407@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29408 movl %edx, %ecx
29409 addl %ebp, %ebx
29410 adcl %edi, %ecx
29411+
29412+#ifdef CONFIG_PAX_REFCOUNT
29413+ into
29414+1234:
29415+ _ASM_EXTABLE(1234b, 3f)
29416+#endif
29417+
29418 LOCK_PREFIX
29419 cmpxchg8b (%esi)
29420 jne 1b
29421@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29422 CFI_ADJUST_CFA_OFFSET -8
29423 RESTORE ebx
29424 RESTORE ebp
29425+ pax_force_retaddr
29426 ret
29427 4:
29428 cmpl %edx, 4(%esp)
29429@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29430 xorl %ecx, %ecx
29431 addl $1, %ebx
29432 adcl %edx, %ecx
29433+
29434+#ifdef CONFIG_PAX_REFCOUNT
29435+ into
29436+1234:
29437+ _ASM_EXTABLE(1234b, 3f)
29438+#endif
29439+
29440 LOCK_PREFIX
29441 cmpxchg8b (%esi)
29442 jne 1b
29443@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29444 movl $1, %eax
29445 3:
29446 RESTORE ebx
29447+ pax_force_retaddr
29448 ret
29449 CFI_ENDPROC
29450 ENDPROC(atomic64_inc_not_zero_cx8)
29451diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29452index e78b8eee..7e173a8 100644
29453--- a/arch/x86/lib/checksum_32.S
29454+++ b/arch/x86/lib/checksum_32.S
29455@@ -29,7 +29,8 @@
29456 #include <asm/dwarf2.h>
29457 #include <asm/errno.h>
29458 #include <asm/asm.h>
29459-
29460+#include <asm/segment.h>
29461+
29462 /*
29463 * computes a partial checksum, e.g. for TCP/UDP fragments
29464 */
29465@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29466
29467 #define ARGBASE 16
29468 #define FP 12
29469-
29470-ENTRY(csum_partial_copy_generic)
29471+
29472+ENTRY(csum_partial_copy_generic_to_user)
29473 CFI_STARTPROC
29474+
29475+#ifdef CONFIG_PAX_MEMORY_UDEREF
29476+ pushl_cfi %gs
29477+ popl_cfi %es
29478+ jmp csum_partial_copy_generic
29479+#endif
29480+
29481+ENTRY(csum_partial_copy_generic_from_user)
29482+
29483+#ifdef CONFIG_PAX_MEMORY_UDEREF
29484+ pushl_cfi %gs
29485+ popl_cfi %ds
29486+#endif
29487+
29488+ENTRY(csum_partial_copy_generic)
29489 subl $4,%esp
29490 CFI_ADJUST_CFA_OFFSET 4
29491 pushl_cfi %edi
29492@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29493 jmp 4f
29494 SRC(1: movw (%esi), %bx )
29495 addl $2, %esi
29496-DST( movw %bx, (%edi) )
29497+DST( movw %bx, %es:(%edi) )
29498 addl $2, %edi
29499 addw %bx, %ax
29500 adcl $0, %eax
29501@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29502 SRC(1: movl (%esi), %ebx )
29503 SRC( movl 4(%esi), %edx )
29504 adcl %ebx, %eax
29505-DST( movl %ebx, (%edi) )
29506+DST( movl %ebx, %es:(%edi) )
29507 adcl %edx, %eax
29508-DST( movl %edx, 4(%edi) )
29509+DST( movl %edx, %es:4(%edi) )
29510
29511 SRC( movl 8(%esi), %ebx )
29512 SRC( movl 12(%esi), %edx )
29513 adcl %ebx, %eax
29514-DST( movl %ebx, 8(%edi) )
29515+DST( movl %ebx, %es:8(%edi) )
29516 adcl %edx, %eax
29517-DST( movl %edx, 12(%edi) )
29518+DST( movl %edx, %es:12(%edi) )
29519
29520 SRC( movl 16(%esi), %ebx )
29521 SRC( movl 20(%esi), %edx )
29522 adcl %ebx, %eax
29523-DST( movl %ebx, 16(%edi) )
29524+DST( movl %ebx, %es:16(%edi) )
29525 adcl %edx, %eax
29526-DST( movl %edx, 20(%edi) )
29527+DST( movl %edx, %es:20(%edi) )
29528
29529 SRC( movl 24(%esi), %ebx )
29530 SRC( movl 28(%esi), %edx )
29531 adcl %ebx, %eax
29532-DST( movl %ebx, 24(%edi) )
29533+DST( movl %ebx, %es:24(%edi) )
29534 adcl %edx, %eax
29535-DST( movl %edx, 28(%edi) )
29536+DST( movl %edx, %es:28(%edi) )
29537
29538 lea 32(%esi), %esi
29539 lea 32(%edi), %edi
29540@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29541 shrl $2, %edx # This clears CF
29542 SRC(3: movl (%esi), %ebx )
29543 adcl %ebx, %eax
29544-DST( movl %ebx, (%edi) )
29545+DST( movl %ebx, %es:(%edi) )
29546 lea 4(%esi), %esi
29547 lea 4(%edi), %edi
29548 dec %edx
29549@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29550 jb 5f
29551 SRC( movw (%esi), %cx )
29552 leal 2(%esi), %esi
29553-DST( movw %cx, (%edi) )
29554+DST( movw %cx, %es:(%edi) )
29555 leal 2(%edi), %edi
29556 je 6f
29557 shll $16,%ecx
29558 SRC(5: movb (%esi), %cl )
29559-DST( movb %cl, (%edi) )
29560+DST( movb %cl, %es:(%edi) )
29561 6: addl %ecx, %eax
29562 adcl $0, %eax
29563 7:
29564@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29565
29566 6001:
29567 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29568- movl $-EFAULT, (%ebx)
29569+ movl $-EFAULT, %ss:(%ebx)
29570
29571 # zero the complete destination - computing the rest
29572 # is too much work
29573@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29574
29575 6002:
29576 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29577- movl $-EFAULT,(%ebx)
29578+ movl $-EFAULT,%ss:(%ebx)
29579 jmp 5000b
29580
29581 .previous
29582
29583+ pushl_cfi %ss
29584+ popl_cfi %ds
29585+ pushl_cfi %ss
29586+ popl_cfi %es
29587 popl_cfi %ebx
29588 CFI_RESTORE ebx
29589 popl_cfi %esi
29590@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29591 popl_cfi %ecx # equivalent to addl $4,%esp
29592 ret
29593 CFI_ENDPROC
29594-ENDPROC(csum_partial_copy_generic)
29595+ENDPROC(csum_partial_copy_generic_to_user)
29596
29597 #else
29598
29599 /* Version for PentiumII/PPro */
29600
29601 #define ROUND1(x) \
29602+ nop; nop; nop; \
29603 SRC(movl x(%esi), %ebx ) ; \
29604 addl %ebx, %eax ; \
29605- DST(movl %ebx, x(%edi) ) ;
29606+ DST(movl %ebx, %es:x(%edi)) ;
29607
29608 #define ROUND(x) \
29609+ nop; nop; nop; \
29610 SRC(movl x(%esi), %ebx ) ; \
29611 adcl %ebx, %eax ; \
29612- DST(movl %ebx, x(%edi) ) ;
29613+ DST(movl %ebx, %es:x(%edi)) ;
29614
29615 #define ARGBASE 12
29616-
29617-ENTRY(csum_partial_copy_generic)
29618+
29619+ENTRY(csum_partial_copy_generic_to_user)
29620 CFI_STARTPROC
29621+
29622+#ifdef CONFIG_PAX_MEMORY_UDEREF
29623+ pushl_cfi %gs
29624+ popl_cfi %es
29625+ jmp csum_partial_copy_generic
29626+#endif
29627+
29628+ENTRY(csum_partial_copy_generic_from_user)
29629+
29630+#ifdef CONFIG_PAX_MEMORY_UDEREF
29631+ pushl_cfi %gs
29632+ popl_cfi %ds
29633+#endif
29634+
29635+ENTRY(csum_partial_copy_generic)
29636 pushl_cfi %ebx
29637 CFI_REL_OFFSET ebx, 0
29638 pushl_cfi %edi
29639@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29640 subl %ebx, %edi
29641 lea -1(%esi),%edx
29642 andl $-32,%edx
29643- lea 3f(%ebx,%ebx), %ebx
29644+ lea 3f(%ebx,%ebx,2), %ebx
29645 testl %esi, %esi
29646 jmp *%ebx
29647 1: addl $64,%esi
29648@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29649 jb 5f
29650 SRC( movw (%esi), %dx )
29651 leal 2(%esi), %esi
29652-DST( movw %dx, (%edi) )
29653+DST( movw %dx, %es:(%edi) )
29654 leal 2(%edi), %edi
29655 je 6f
29656 shll $16,%edx
29657 5:
29658 SRC( movb (%esi), %dl )
29659-DST( movb %dl, (%edi) )
29660+DST( movb %dl, %es:(%edi) )
29661 6: addl %edx, %eax
29662 adcl $0, %eax
29663 7:
29664 .section .fixup, "ax"
29665 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29666- movl $-EFAULT, (%ebx)
29667+ movl $-EFAULT, %ss:(%ebx)
29668 # zero the complete destination (computing the rest is too much work)
29669 movl ARGBASE+8(%esp),%edi # dst
29670 movl ARGBASE+12(%esp),%ecx # len
29671@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29672 rep; stosb
29673 jmp 7b
29674 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29675- movl $-EFAULT, (%ebx)
29676+ movl $-EFAULT, %ss:(%ebx)
29677 jmp 7b
29678 .previous
29679
29680+#ifdef CONFIG_PAX_MEMORY_UDEREF
29681+ pushl_cfi %ss
29682+ popl_cfi %ds
29683+ pushl_cfi %ss
29684+ popl_cfi %es
29685+#endif
29686+
29687 popl_cfi %esi
29688 CFI_RESTORE esi
29689 popl_cfi %edi
29690@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29691 CFI_RESTORE ebx
29692 ret
29693 CFI_ENDPROC
29694-ENDPROC(csum_partial_copy_generic)
29695+ENDPROC(csum_partial_copy_generic_to_user)
29696
29697 #undef ROUND
29698 #undef ROUND1
29699diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29700index f2145cf..cea889d 100644
29701--- a/arch/x86/lib/clear_page_64.S
29702+++ b/arch/x86/lib/clear_page_64.S
29703@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29704 movl $4096/8,%ecx
29705 xorl %eax,%eax
29706 rep stosq
29707+ pax_force_retaddr
29708 ret
29709 CFI_ENDPROC
29710 ENDPROC(clear_page_c)
29711@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29712 movl $4096,%ecx
29713 xorl %eax,%eax
29714 rep stosb
29715+ pax_force_retaddr
29716 ret
29717 CFI_ENDPROC
29718 ENDPROC(clear_page_c_e)
29719@@ -43,6 +45,7 @@ ENTRY(clear_page)
29720 leaq 64(%rdi),%rdi
29721 jnz .Lloop
29722 nop
29723+ pax_force_retaddr
29724 ret
29725 CFI_ENDPROC
29726 .Lclear_page_end:
29727@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29728
29729 #include <asm/cpufeature.h>
29730
29731- .section .altinstr_replacement,"ax"
29732+ .section .altinstr_replacement,"a"
29733 1: .byte 0xeb /* jmp <disp8> */
29734 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29735 2: .byte 0xeb /* jmp <disp8> */
29736diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29737index 40a1725..5d12ac4 100644
29738--- a/arch/x86/lib/cmpxchg16b_emu.S
29739+++ b/arch/x86/lib/cmpxchg16b_emu.S
29740@@ -8,6 +8,7 @@
29741 #include <linux/linkage.h>
29742 #include <asm/dwarf2.h>
29743 #include <asm/percpu.h>
29744+#include <asm/alternative-asm.h>
29745
29746 .text
29747
29748@@ -46,12 +47,14 @@ CFI_STARTPROC
29749 CFI_REMEMBER_STATE
29750 popfq_cfi
29751 mov $1, %al
29752+ pax_force_retaddr
29753 ret
29754
29755 CFI_RESTORE_STATE
29756 .Lnot_same:
29757 popfq_cfi
29758 xor %al,%al
29759+ pax_force_retaddr
29760 ret
29761
29762 CFI_ENDPROC
29763diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29764index 176cca6..e0d658e 100644
29765--- a/arch/x86/lib/copy_page_64.S
29766+++ b/arch/x86/lib/copy_page_64.S
29767@@ -9,6 +9,7 @@ copy_page_rep:
29768 CFI_STARTPROC
29769 movl $4096/8, %ecx
29770 rep movsq
29771+ pax_force_retaddr
29772 ret
29773 CFI_ENDPROC
29774 ENDPROC(copy_page_rep)
29775@@ -24,8 +25,8 @@ ENTRY(copy_page)
29776 CFI_ADJUST_CFA_OFFSET 2*8
29777 movq %rbx, (%rsp)
29778 CFI_REL_OFFSET rbx, 0
29779- movq %r12, 1*8(%rsp)
29780- CFI_REL_OFFSET r12, 1*8
29781+ movq %r13, 1*8(%rsp)
29782+ CFI_REL_OFFSET r13, 1*8
29783
29784 movl $(4096/64)-5, %ecx
29785 .p2align 4
29786@@ -38,7 +39,7 @@ ENTRY(copy_page)
29787 movq 0x8*4(%rsi), %r9
29788 movq 0x8*5(%rsi), %r10
29789 movq 0x8*6(%rsi), %r11
29790- movq 0x8*7(%rsi), %r12
29791+ movq 0x8*7(%rsi), %r13
29792
29793 prefetcht0 5*64(%rsi)
29794
29795@@ -49,7 +50,7 @@ ENTRY(copy_page)
29796 movq %r9, 0x8*4(%rdi)
29797 movq %r10, 0x8*5(%rdi)
29798 movq %r11, 0x8*6(%rdi)
29799- movq %r12, 0x8*7(%rdi)
29800+ movq %r13, 0x8*7(%rdi)
29801
29802 leaq 64 (%rsi), %rsi
29803 leaq 64 (%rdi), %rdi
29804@@ -68,7 +69,7 @@ ENTRY(copy_page)
29805 movq 0x8*4(%rsi), %r9
29806 movq 0x8*5(%rsi), %r10
29807 movq 0x8*6(%rsi), %r11
29808- movq 0x8*7(%rsi), %r12
29809+ movq 0x8*7(%rsi), %r13
29810
29811 movq %rax, 0x8*0(%rdi)
29812 movq %rbx, 0x8*1(%rdi)
29813@@ -77,7 +78,7 @@ ENTRY(copy_page)
29814 movq %r9, 0x8*4(%rdi)
29815 movq %r10, 0x8*5(%rdi)
29816 movq %r11, 0x8*6(%rdi)
29817- movq %r12, 0x8*7(%rdi)
29818+ movq %r13, 0x8*7(%rdi)
29819
29820 leaq 64(%rdi), %rdi
29821 leaq 64(%rsi), %rsi
29822@@ -85,10 +86,11 @@ ENTRY(copy_page)
29823
29824 movq (%rsp), %rbx
29825 CFI_RESTORE rbx
29826- movq 1*8(%rsp), %r12
29827- CFI_RESTORE r12
29828+ movq 1*8(%rsp), %r13
29829+ CFI_RESTORE r13
29830 addq $2*8, %rsp
29831 CFI_ADJUST_CFA_OFFSET -2*8
29832+ pax_force_retaddr
29833 ret
29834 .Lcopy_page_end:
29835 CFI_ENDPROC
29836@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29837
29838 #include <asm/cpufeature.h>
29839
29840- .section .altinstr_replacement,"ax"
29841+ .section .altinstr_replacement,"a"
29842 1: .byte 0xeb /* jmp <disp8> */
29843 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29844 2:
29845diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29846index dee945d..a84067b 100644
29847--- a/arch/x86/lib/copy_user_64.S
29848+++ b/arch/x86/lib/copy_user_64.S
29849@@ -18,31 +18,7 @@
29850 #include <asm/alternative-asm.h>
29851 #include <asm/asm.h>
29852 #include <asm/smap.h>
29853-
29854-/*
29855- * By placing feature2 after feature1 in altinstructions section, we logically
29856- * implement:
29857- * If CPU has feature2, jmp to alt2 is used
29858- * else if CPU has feature1, jmp to alt1 is used
29859- * else jmp to orig is used.
29860- */
29861- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29862-0:
29863- .byte 0xe9 /* 32bit jump */
29864- .long \orig-1f /* by default jump to orig */
29865-1:
29866- .section .altinstr_replacement,"ax"
29867-2: .byte 0xe9 /* near jump with 32bit immediate */
29868- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29869-3: .byte 0xe9 /* near jump with 32bit immediate */
29870- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29871- .previous
29872-
29873- .section .altinstructions,"a"
29874- altinstruction_entry 0b,2b,\feature1,5,5
29875- altinstruction_entry 0b,3b,\feature2,5,5
29876- .previous
29877- .endm
29878+#include <asm/pgtable.h>
29879
29880 .macro ALIGN_DESTINATION
29881 #ifdef FIX_ALIGNMENT
29882@@ -70,52 +46,6 @@
29883 #endif
29884 .endm
29885
29886-/* Standard copy_to_user with segment limit checking */
29887-ENTRY(_copy_to_user)
29888- CFI_STARTPROC
29889- GET_THREAD_INFO(%rax)
29890- movq %rdi,%rcx
29891- addq %rdx,%rcx
29892- jc bad_to_user
29893- cmpq TI_addr_limit(%rax),%rcx
29894- ja bad_to_user
29895- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29896- copy_user_generic_unrolled,copy_user_generic_string, \
29897- copy_user_enhanced_fast_string
29898- CFI_ENDPROC
29899-ENDPROC(_copy_to_user)
29900-
29901-/* Standard copy_from_user with segment limit checking */
29902-ENTRY(_copy_from_user)
29903- CFI_STARTPROC
29904- GET_THREAD_INFO(%rax)
29905- movq %rsi,%rcx
29906- addq %rdx,%rcx
29907- jc bad_from_user
29908- cmpq TI_addr_limit(%rax),%rcx
29909- ja bad_from_user
29910- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29911- copy_user_generic_unrolled,copy_user_generic_string, \
29912- copy_user_enhanced_fast_string
29913- CFI_ENDPROC
29914-ENDPROC(_copy_from_user)
29915-
29916- .section .fixup,"ax"
29917- /* must zero dest */
29918-ENTRY(bad_from_user)
29919-bad_from_user:
29920- CFI_STARTPROC
29921- movl %edx,%ecx
29922- xorl %eax,%eax
29923- rep
29924- stosb
29925-bad_to_user:
29926- movl %edx,%eax
29927- ret
29928- CFI_ENDPROC
29929-ENDPROC(bad_from_user)
29930- .previous
29931-
29932 /*
29933 * copy_user_generic_unrolled - memory copy with exception handling.
29934 * This version is for CPUs like P4 that don't have efficient micro
29935@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29936 */
29937 ENTRY(copy_user_generic_unrolled)
29938 CFI_STARTPROC
29939+ ASM_PAX_OPEN_USERLAND
29940 ASM_STAC
29941 cmpl $8,%edx
29942 jb 20f /* less then 8 bytes, go to byte copy loop */
29943@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29944 jnz 21b
29945 23: xor %eax,%eax
29946 ASM_CLAC
29947+ ASM_PAX_CLOSE_USERLAND
29948+ pax_force_retaddr
29949 ret
29950
29951 .section .fixup,"ax"
29952@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29953 */
29954 ENTRY(copy_user_generic_string)
29955 CFI_STARTPROC
29956+ ASM_PAX_OPEN_USERLAND
29957 ASM_STAC
29958 cmpl $8,%edx
29959 jb 2f /* less than 8 bytes, go to byte copy loop */
29960@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29961 movsb
29962 xorl %eax,%eax
29963 ASM_CLAC
29964+ ASM_PAX_CLOSE_USERLAND
29965+ pax_force_retaddr
29966 ret
29967
29968 .section .fixup,"ax"
29969@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29970 */
29971 ENTRY(copy_user_enhanced_fast_string)
29972 CFI_STARTPROC
29973+ ASM_PAX_OPEN_USERLAND
29974 ASM_STAC
29975 movl %edx,%ecx
29976 1: rep
29977 movsb
29978 xorl %eax,%eax
29979 ASM_CLAC
29980+ ASM_PAX_CLOSE_USERLAND
29981+ pax_force_retaddr
29982 ret
29983
29984 .section .fixup,"ax"
29985diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29986index 6a4f43c..c70fb52 100644
29987--- a/arch/x86/lib/copy_user_nocache_64.S
29988+++ b/arch/x86/lib/copy_user_nocache_64.S
29989@@ -8,6 +8,7 @@
29990
29991 #include <linux/linkage.h>
29992 #include <asm/dwarf2.h>
29993+#include <asm/alternative-asm.h>
29994
29995 #define FIX_ALIGNMENT 1
29996
29997@@ -16,6 +17,7 @@
29998 #include <asm/thread_info.h>
29999 #include <asm/asm.h>
30000 #include <asm/smap.h>
30001+#include <asm/pgtable.h>
30002
30003 .macro ALIGN_DESTINATION
30004 #ifdef FIX_ALIGNMENT
30005@@ -49,6 +51,16 @@
30006 */
30007 ENTRY(__copy_user_nocache)
30008 CFI_STARTPROC
30009+
30010+#ifdef CONFIG_PAX_MEMORY_UDEREF
30011+ mov pax_user_shadow_base,%rcx
30012+ cmp %rcx,%rsi
30013+ jae 1f
30014+ add %rcx,%rsi
30015+1:
30016+#endif
30017+
30018+ ASM_PAX_OPEN_USERLAND
30019 ASM_STAC
30020 cmpl $8,%edx
30021 jb 20f /* less then 8 bytes, go to byte copy loop */
30022@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30023 jnz 21b
30024 23: xorl %eax,%eax
30025 ASM_CLAC
30026+ ASM_PAX_CLOSE_USERLAND
30027 sfence
30028+ pax_force_retaddr
30029 ret
30030
30031 .section .fixup,"ax"
30032diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30033index 2419d5f..fe52d0e 100644
30034--- a/arch/x86/lib/csum-copy_64.S
30035+++ b/arch/x86/lib/csum-copy_64.S
30036@@ -9,6 +9,7 @@
30037 #include <asm/dwarf2.h>
30038 #include <asm/errno.h>
30039 #include <asm/asm.h>
30040+#include <asm/alternative-asm.h>
30041
30042 /*
30043 * Checksum copy with exception handling.
30044@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30045 CFI_ADJUST_CFA_OFFSET 7*8
30046 movq %rbx, 2*8(%rsp)
30047 CFI_REL_OFFSET rbx, 2*8
30048- movq %r12, 3*8(%rsp)
30049- CFI_REL_OFFSET r12, 3*8
30050+ movq %r15, 3*8(%rsp)
30051+ CFI_REL_OFFSET r15, 3*8
30052 movq %r14, 4*8(%rsp)
30053 CFI_REL_OFFSET r14, 4*8
30054 movq %r13, 5*8(%rsp)
30055@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30056 movl %edx, %ecx
30057
30058 xorl %r9d, %r9d
30059- movq %rcx, %r12
30060+ movq %rcx, %r15
30061
30062- shrq $6, %r12
30063+ shrq $6, %r15
30064 jz .Lhandle_tail /* < 64 */
30065
30066 clc
30067
30068 /* main loop. clear in 64 byte blocks */
30069 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30070- /* r11: temp3, rdx: temp4, r12 loopcnt */
30071+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30072 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30073 .p2align 4
30074 .Lloop:
30075@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30076 adcq %r14, %rax
30077 adcq %r13, %rax
30078
30079- decl %r12d
30080+ decl %r15d
30081
30082 dest
30083 movq %rbx, (%rsi)
30084@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30085 .Lende:
30086 movq 2*8(%rsp), %rbx
30087 CFI_RESTORE rbx
30088- movq 3*8(%rsp), %r12
30089- CFI_RESTORE r12
30090+ movq 3*8(%rsp), %r15
30091+ CFI_RESTORE r15
30092 movq 4*8(%rsp), %r14
30093 CFI_RESTORE r14
30094 movq 5*8(%rsp), %r13
30095@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30096 CFI_RESTORE rbp
30097 addq $7*8, %rsp
30098 CFI_ADJUST_CFA_OFFSET -7*8
30099+ pax_force_retaddr
30100 ret
30101 CFI_RESTORE_STATE
30102
30103diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30104index 1318f75..44c30fd 100644
30105--- a/arch/x86/lib/csum-wrappers_64.c
30106+++ b/arch/x86/lib/csum-wrappers_64.c
30107@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30108 len -= 2;
30109 }
30110 }
30111+ pax_open_userland();
30112 stac();
30113- isum = csum_partial_copy_generic((__force const void *)src,
30114+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30115 dst, len, isum, errp, NULL);
30116 clac();
30117+ pax_close_userland();
30118 if (unlikely(*errp))
30119 goto out_err;
30120
30121@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30122 }
30123
30124 *errp = 0;
30125+ pax_open_userland();
30126 stac();
30127- ret = csum_partial_copy_generic(src, (void __force *)dst,
30128+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30129 len, isum, NULL, errp);
30130 clac();
30131+ pax_close_userland();
30132 return ret;
30133 }
30134 EXPORT_SYMBOL(csum_partial_copy_to_user);
30135diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30136index a451235..a74bfa3 100644
30137--- a/arch/x86/lib/getuser.S
30138+++ b/arch/x86/lib/getuser.S
30139@@ -33,17 +33,40 @@
30140 #include <asm/thread_info.h>
30141 #include <asm/asm.h>
30142 #include <asm/smap.h>
30143+#include <asm/segment.h>
30144+#include <asm/pgtable.h>
30145+#include <asm/alternative-asm.h>
30146+
30147+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30148+#define __copyuser_seg gs;
30149+#else
30150+#define __copyuser_seg
30151+#endif
30152
30153 .text
30154 ENTRY(__get_user_1)
30155 CFI_STARTPROC
30156+
30157+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30158 GET_THREAD_INFO(%_ASM_DX)
30159 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30160 jae bad_get_user
30161+
30162+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30163+ mov pax_user_shadow_base,%_ASM_DX
30164+ cmp %_ASM_DX,%_ASM_AX
30165+ jae 1234f
30166+ add %_ASM_DX,%_ASM_AX
30167+1234:
30168+#endif
30169+
30170+#endif
30171+
30172 ASM_STAC
30173-1: movzbl (%_ASM_AX),%edx
30174+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30175 xor %eax,%eax
30176 ASM_CLAC
30177+ pax_force_retaddr
30178 ret
30179 CFI_ENDPROC
30180 ENDPROC(__get_user_1)
30181@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30182 ENTRY(__get_user_2)
30183 CFI_STARTPROC
30184 add $1,%_ASM_AX
30185+
30186+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30187 jc bad_get_user
30188 GET_THREAD_INFO(%_ASM_DX)
30189 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30190 jae bad_get_user
30191+
30192+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30193+ mov pax_user_shadow_base,%_ASM_DX
30194+ cmp %_ASM_DX,%_ASM_AX
30195+ jae 1234f
30196+ add %_ASM_DX,%_ASM_AX
30197+1234:
30198+#endif
30199+
30200+#endif
30201+
30202 ASM_STAC
30203-2: movzwl -1(%_ASM_AX),%edx
30204+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30205 xor %eax,%eax
30206 ASM_CLAC
30207+ pax_force_retaddr
30208 ret
30209 CFI_ENDPROC
30210 ENDPROC(__get_user_2)
30211@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30212 ENTRY(__get_user_4)
30213 CFI_STARTPROC
30214 add $3,%_ASM_AX
30215+
30216+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30217 jc bad_get_user
30218 GET_THREAD_INFO(%_ASM_DX)
30219 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30220 jae bad_get_user
30221+
30222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30223+ mov pax_user_shadow_base,%_ASM_DX
30224+ cmp %_ASM_DX,%_ASM_AX
30225+ jae 1234f
30226+ add %_ASM_DX,%_ASM_AX
30227+1234:
30228+#endif
30229+
30230+#endif
30231+
30232 ASM_STAC
30233-3: movl -3(%_ASM_AX),%edx
30234+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30235 xor %eax,%eax
30236 ASM_CLAC
30237+ pax_force_retaddr
30238 ret
30239 CFI_ENDPROC
30240 ENDPROC(__get_user_4)
30241@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30242 GET_THREAD_INFO(%_ASM_DX)
30243 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30244 jae bad_get_user
30245+
30246+#ifdef CONFIG_PAX_MEMORY_UDEREF
30247+ mov pax_user_shadow_base,%_ASM_DX
30248+ cmp %_ASM_DX,%_ASM_AX
30249+ jae 1234f
30250+ add %_ASM_DX,%_ASM_AX
30251+1234:
30252+#endif
30253+
30254 ASM_STAC
30255 4: movq -7(%_ASM_AX),%rdx
30256 xor %eax,%eax
30257 ASM_CLAC
30258+ pax_force_retaddr
30259 ret
30260 #else
30261 add $7,%_ASM_AX
30262@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30263 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30264 jae bad_get_user_8
30265 ASM_STAC
30266-4: movl -7(%_ASM_AX),%edx
30267-5: movl -3(%_ASM_AX),%ecx
30268+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30269+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30270 xor %eax,%eax
30271 ASM_CLAC
30272+ pax_force_retaddr
30273 ret
30274 #endif
30275 CFI_ENDPROC
30276@@ -113,6 +175,7 @@ bad_get_user:
30277 xor %edx,%edx
30278 mov $(-EFAULT),%_ASM_AX
30279 ASM_CLAC
30280+ pax_force_retaddr
30281 ret
30282 CFI_ENDPROC
30283 END(bad_get_user)
30284@@ -124,6 +187,7 @@ bad_get_user_8:
30285 xor %ecx,%ecx
30286 mov $(-EFAULT),%_ASM_AX
30287 ASM_CLAC
30288+ pax_force_retaddr
30289 ret
30290 CFI_ENDPROC
30291 END(bad_get_user_8)
30292diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30293index 1313ae6..84f25ea 100644
30294--- a/arch/x86/lib/insn.c
30295+++ b/arch/x86/lib/insn.c
30296@@ -20,8 +20,10 @@
30297
30298 #ifdef __KERNEL__
30299 #include <linux/string.h>
30300+#include <asm/pgtable_types.h>
30301 #else
30302 #include <string.h>
30303+#define ktla_ktva(addr) addr
30304 #endif
30305 #include <asm/inat.h>
30306 #include <asm/insn.h>
30307@@ -53,9 +55,9 @@
30308 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30309 {
30310 memset(insn, 0, sizeof(*insn));
30311- insn->kaddr = kaddr;
30312- insn->end_kaddr = kaddr + buf_len;
30313- insn->next_byte = kaddr;
30314+ insn->kaddr = ktla_ktva(kaddr);
30315+ insn->end_kaddr = insn->kaddr + buf_len;
30316+ insn->next_byte = insn->kaddr;
30317 insn->x86_64 = x86_64 ? 1 : 0;
30318 insn->opnd_bytes = 4;
30319 if (x86_64)
30320diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30321index 05a95e7..326f2fa 100644
30322--- a/arch/x86/lib/iomap_copy_64.S
30323+++ b/arch/x86/lib/iomap_copy_64.S
30324@@ -17,6 +17,7 @@
30325
30326 #include <linux/linkage.h>
30327 #include <asm/dwarf2.h>
30328+#include <asm/alternative-asm.h>
30329
30330 /*
30331 * override generic version in lib/iomap_copy.c
30332@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30333 CFI_STARTPROC
30334 movl %edx,%ecx
30335 rep movsd
30336+ pax_force_retaddr
30337 ret
30338 CFI_ENDPROC
30339 ENDPROC(__iowrite32_copy)
30340diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30341index 89b53c9..97357ca 100644
30342--- a/arch/x86/lib/memcpy_64.S
30343+++ b/arch/x86/lib/memcpy_64.S
30344@@ -24,7 +24,7 @@
30345 * This gets patched over the unrolled variant (below) via the
30346 * alternative instructions framework:
30347 */
30348- .section .altinstr_replacement, "ax", @progbits
30349+ .section .altinstr_replacement, "a", @progbits
30350 .Lmemcpy_c:
30351 movq %rdi, %rax
30352 movq %rdx, %rcx
30353@@ -33,6 +33,7 @@
30354 rep movsq
30355 movl %edx, %ecx
30356 rep movsb
30357+ pax_force_retaddr
30358 ret
30359 .Lmemcpy_e:
30360 .previous
30361@@ -44,11 +45,12 @@
30362 * This gets patched over the unrolled variant (below) via the
30363 * alternative instructions framework:
30364 */
30365- .section .altinstr_replacement, "ax", @progbits
30366+ .section .altinstr_replacement, "a", @progbits
30367 .Lmemcpy_c_e:
30368 movq %rdi, %rax
30369 movq %rdx, %rcx
30370 rep movsb
30371+ pax_force_retaddr
30372 ret
30373 .Lmemcpy_e_e:
30374 .previous
30375@@ -138,6 +140,7 @@ ENTRY(memcpy)
30376 movq %r9, 1*8(%rdi)
30377 movq %r10, -2*8(%rdi, %rdx)
30378 movq %r11, -1*8(%rdi, %rdx)
30379+ pax_force_retaddr
30380 retq
30381 .p2align 4
30382 .Lless_16bytes:
30383@@ -150,6 +153,7 @@ ENTRY(memcpy)
30384 movq -1*8(%rsi, %rdx), %r9
30385 movq %r8, 0*8(%rdi)
30386 movq %r9, -1*8(%rdi, %rdx)
30387+ pax_force_retaddr
30388 retq
30389 .p2align 4
30390 .Lless_8bytes:
30391@@ -163,6 +167,7 @@ ENTRY(memcpy)
30392 movl -4(%rsi, %rdx), %r8d
30393 movl %ecx, (%rdi)
30394 movl %r8d, -4(%rdi, %rdx)
30395+ pax_force_retaddr
30396 retq
30397 .p2align 4
30398 .Lless_3bytes:
30399@@ -181,6 +186,7 @@ ENTRY(memcpy)
30400 movb %cl, (%rdi)
30401
30402 .Lend:
30403+ pax_force_retaddr
30404 retq
30405 CFI_ENDPROC
30406 ENDPROC(memcpy)
30407diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30408index 9c4b530..830b77a 100644
30409--- a/arch/x86/lib/memmove_64.S
30410+++ b/arch/x86/lib/memmove_64.S
30411@@ -205,14 +205,16 @@ ENTRY(__memmove)
30412 movb (%rsi), %r11b
30413 movb %r11b, (%rdi)
30414 13:
30415+ pax_force_retaddr
30416 retq
30417 CFI_ENDPROC
30418
30419- .section .altinstr_replacement,"ax"
30420+ .section .altinstr_replacement,"a"
30421 .Lmemmove_begin_forward_efs:
30422 /* Forward moving data. */
30423 movq %rdx, %rcx
30424 rep movsb
30425+ pax_force_retaddr
30426 retq
30427 .Lmemmove_end_forward_efs:
30428 .previous
30429diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30430index 6f44935..fbf5f6d 100644
30431--- a/arch/x86/lib/memset_64.S
30432+++ b/arch/x86/lib/memset_64.S
30433@@ -16,7 +16,7 @@
30434 *
30435 * rax original destination
30436 */
30437- .section .altinstr_replacement, "ax", @progbits
30438+ .section .altinstr_replacement, "a", @progbits
30439 .Lmemset_c:
30440 movq %rdi,%r9
30441 movq %rdx,%rcx
30442@@ -30,6 +30,7 @@
30443 movl %edx,%ecx
30444 rep stosb
30445 movq %r9,%rax
30446+ pax_force_retaddr
30447 ret
30448 .Lmemset_e:
30449 .previous
30450@@ -45,13 +46,14 @@
30451 *
30452 * rax original destination
30453 */
30454- .section .altinstr_replacement, "ax", @progbits
30455+ .section .altinstr_replacement, "a", @progbits
30456 .Lmemset_c_e:
30457 movq %rdi,%r9
30458 movb %sil,%al
30459 movq %rdx,%rcx
30460 rep stosb
30461 movq %r9,%rax
30462+ pax_force_retaddr
30463 ret
30464 .Lmemset_e_e:
30465 .previous
30466@@ -120,6 +122,7 @@ ENTRY(__memset)
30467
30468 .Lende:
30469 movq %r10,%rax
30470+ pax_force_retaddr
30471 ret
30472
30473 CFI_RESTORE_STATE
30474diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30475index c9f2d9b..e7fd2c0 100644
30476--- a/arch/x86/lib/mmx_32.c
30477+++ b/arch/x86/lib/mmx_32.c
30478@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30479 {
30480 void *p;
30481 int i;
30482+ unsigned long cr0;
30483
30484 if (unlikely(in_interrupt()))
30485 return __memcpy(to, from, len);
30486@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30487 kernel_fpu_begin();
30488
30489 __asm__ __volatile__ (
30490- "1: prefetch (%0)\n" /* This set is 28 bytes */
30491- " prefetch 64(%0)\n"
30492- " prefetch 128(%0)\n"
30493- " prefetch 192(%0)\n"
30494- " prefetch 256(%0)\n"
30495+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30496+ " prefetch 64(%1)\n"
30497+ " prefetch 128(%1)\n"
30498+ " prefetch 192(%1)\n"
30499+ " prefetch 256(%1)\n"
30500 "2: \n"
30501 ".section .fixup, \"ax\"\n"
30502- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30503+ "3: \n"
30504+
30505+#ifdef CONFIG_PAX_KERNEXEC
30506+ " movl %%cr0, %0\n"
30507+ " movl %0, %%eax\n"
30508+ " andl $0xFFFEFFFF, %%eax\n"
30509+ " movl %%eax, %%cr0\n"
30510+#endif
30511+
30512+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30513+
30514+#ifdef CONFIG_PAX_KERNEXEC
30515+ " movl %0, %%cr0\n"
30516+#endif
30517+
30518 " jmp 2b\n"
30519 ".previous\n"
30520 _ASM_EXTABLE(1b, 3b)
30521- : : "r" (from));
30522+ : "=&r" (cr0) : "r" (from) : "ax");
30523
30524 for ( ; i > 5; i--) {
30525 __asm__ __volatile__ (
30526- "1: prefetch 320(%0)\n"
30527- "2: movq (%0), %%mm0\n"
30528- " movq 8(%0), %%mm1\n"
30529- " movq 16(%0), %%mm2\n"
30530- " movq 24(%0), %%mm3\n"
30531- " movq %%mm0, (%1)\n"
30532- " movq %%mm1, 8(%1)\n"
30533- " movq %%mm2, 16(%1)\n"
30534- " movq %%mm3, 24(%1)\n"
30535- " movq 32(%0), %%mm0\n"
30536- " movq 40(%0), %%mm1\n"
30537- " movq 48(%0), %%mm2\n"
30538- " movq 56(%0), %%mm3\n"
30539- " movq %%mm0, 32(%1)\n"
30540- " movq %%mm1, 40(%1)\n"
30541- " movq %%mm2, 48(%1)\n"
30542- " movq %%mm3, 56(%1)\n"
30543+ "1: prefetch 320(%1)\n"
30544+ "2: movq (%1), %%mm0\n"
30545+ " movq 8(%1), %%mm1\n"
30546+ " movq 16(%1), %%mm2\n"
30547+ " movq 24(%1), %%mm3\n"
30548+ " movq %%mm0, (%2)\n"
30549+ " movq %%mm1, 8(%2)\n"
30550+ " movq %%mm2, 16(%2)\n"
30551+ " movq %%mm3, 24(%2)\n"
30552+ " movq 32(%1), %%mm0\n"
30553+ " movq 40(%1), %%mm1\n"
30554+ " movq 48(%1), %%mm2\n"
30555+ " movq 56(%1), %%mm3\n"
30556+ " movq %%mm0, 32(%2)\n"
30557+ " movq %%mm1, 40(%2)\n"
30558+ " movq %%mm2, 48(%2)\n"
30559+ " movq %%mm3, 56(%2)\n"
30560 ".section .fixup, \"ax\"\n"
30561- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30562+ "3:\n"
30563+
30564+#ifdef CONFIG_PAX_KERNEXEC
30565+ " movl %%cr0, %0\n"
30566+ " movl %0, %%eax\n"
30567+ " andl $0xFFFEFFFF, %%eax\n"
30568+ " movl %%eax, %%cr0\n"
30569+#endif
30570+
30571+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30572+
30573+#ifdef CONFIG_PAX_KERNEXEC
30574+ " movl %0, %%cr0\n"
30575+#endif
30576+
30577 " jmp 2b\n"
30578 ".previous\n"
30579 _ASM_EXTABLE(1b, 3b)
30580- : : "r" (from), "r" (to) : "memory");
30581+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30582
30583 from += 64;
30584 to += 64;
30585@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30586 static void fast_copy_page(void *to, void *from)
30587 {
30588 int i;
30589+ unsigned long cr0;
30590
30591 kernel_fpu_begin();
30592
30593@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30594 * but that is for later. -AV
30595 */
30596 __asm__ __volatile__(
30597- "1: prefetch (%0)\n"
30598- " prefetch 64(%0)\n"
30599- " prefetch 128(%0)\n"
30600- " prefetch 192(%0)\n"
30601- " prefetch 256(%0)\n"
30602+ "1: prefetch (%1)\n"
30603+ " prefetch 64(%1)\n"
30604+ " prefetch 128(%1)\n"
30605+ " prefetch 192(%1)\n"
30606+ " prefetch 256(%1)\n"
30607 "2: \n"
30608 ".section .fixup, \"ax\"\n"
30609- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30610+ "3: \n"
30611+
30612+#ifdef CONFIG_PAX_KERNEXEC
30613+ " movl %%cr0, %0\n"
30614+ " movl %0, %%eax\n"
30615+ " andl $0xFFFEFFFF, %%eax\n"
30616+ " movl %%eax, %%cr0\n"
30617+#endif
30618+
30619+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30620+
30621+#ifdef CONFIG_PAX_KERNEXEC
30622+ " movl %0, %%cr0\n"
30623+#endif
30624+
30625 " jmp 2b\n"
30626 ".previous\n"
30627- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30628+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30629
30630 for (i = 0; i < (4096-320)/64; i++) {
30631 __asm__ __volatile__ (
30632- "1: prefetch 320(%0)\n"
30633- "2: movq (%0), %%mm0\n"
30634- " movntq %%mm0, (%1)\n"
30635- " movq 8(%0), %%mm1\n"
30636- " movntq %%mm1, 8(%1)\n"
30637- " movq 16(%0), %%mm2\n"
30638- " movntq %%mm2, 16(%1)\n"
30639- " movq 24(%0), %%mm3\n"
30640- " movntq %%mm3, 24(%1)\n"
30641- " movq 32(%0), %%mm4\n"
30642- " movntq %%mm4, 32(%1)\n"
30643- " movq 40(%0), %%mm5\n"
30644- " movntq %%mm5, 40(%1)\n"
30645- " movq 48(%0), %%mm6\n"
30646- " movntq %%mm6, 48(%1)\n"
30647- " movq 56(%0), %%mm7\n"
30648- " movntq %%mm7, 56(%1)\n"
30649+ "1: prefetch 320(%1)\n"
30650+ "2: movq (%1), %%mm0\n"
30651+ " movntq %%mm0, (%2)\n"
30652+ " movq 8(%1), %%mm1\n"
30653+ " movntq %%mm1, 8(%2)\n"
30654+ " movq 16(%1), %%mm2\n"
30655+ " movntq %%mm2, 16(%2)\n"
30656+ " movq 24(%1), %%mm3\n"
30657+ " movntq %%mm3, 24(%2)\n"
30658+ " movq 32(%1), %%mm4\n"
30659+ " movntq %%mm4, 32(%2)\n"
30660+ " movq 40(%1), %%mm5\n"
30661+ " movntq %%mm5, 40(%2)\n"
30662+ " movq 48(%1), %%mm6\n"
30663+ " movntq %%mm6, 48(%2)\n"
30664+ " movq 56(%1), %%mm7\n"
30665+ " movntq %%mm7, 56(%2)\n"
30666 ".section .fixup, \"ax\"\n"
30667- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30668+ "3:\n"
30669+
30670+#ifdef CONFIG_PAX_KERNEXEC
30671+ " movl %%cr0, %0\n"
30672+ " movl %0, %%eax\n"
30673+ " andl $0xFFFEFFFF, %%eax\n"
30674+ " movl %%eax, %%cr0\n"
30675+#endif
30676+
30677+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30678+
30679+#ifdef CONFIG_PAX_KERNEXEC
30680+ " movl %0, %%cr0\n"
30681+#endif
30682+
30683 " jmp 2b\n"
30684 ".previous\n"
30685- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30686+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30687
30688 from += 64;
30689 to += 64;
30690@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30691 static void fast_copy_page(void *to, void *from)
30692 {
30693 int i;
30694+ unsigned long cr0;
30695
30696 kernel_fpu_begin();
30697
30698 __asm__ __volatile__ (
30699- "1: prefetch (%0)\n"
30700- " prefetch 64(%0)\n"
30701- " prefetch 128(%0)\n"
30702- " prefetch 192(%0)\n"
30703- " prefetch 256(%0)\n"
30704+ "1: prefetch (%1)\n"
30705+ " prefetch 64(%1)\n"
30706+ " prefetch 128(%1)\n"
30707+ " prefetch 192(%1)\n"
30708+ " prefetch 256(%1)\n"
30709 "2: \n"
30710 ".section .fixup, \"ax\"\n"
30711- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30712+ "3: \n"
30713+
30714+#ifdef CONFIG_PAX_KERNEXEC
30715+ " movl %%cr0, %0\n"
30716+ " movl %0, %%eax\n"
30717+ " andl $0xFFFEFFFF, %%eax\n"
30718+ " movl %%eax, %%cr0\n"
30719+#endif
30720+
30721+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30722+
30723+#ifdef CONFIG_PAX_KERNEXEC
30724+ " movl %0, %%cr0\n"
30725+#endif
30726+
30727 " jmp 2b\n"
30728 ".previous\n"
30729- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30730+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30731
30732 for (i = 0; i < 4096/64; i++) {
30733 __asm__ __volatile__ (
30734- "1: prefetch 320(%0)\n"
30735- "2: movq (%0), %%mm0\n"
30736- " movq 8(%0), %%mm1\n"
30737- " movq 16(%0), %%mm2\n"
30738- " movq 24(%0), %%mm3\n"
30739- " movq %%mm0, (%1)\n"
30740- " movq %%mm1, 8(%1)\n"
30741- " movq %%mm2, 16(%1)\n"
30742- " movq %%mm3, 24(%1)\n"
30743- " movq 32(%0), %%mm0\n"
30744- " movq 40(%0), %%mm1\n"
30745- " movq 48(%0), %%mm2\n"
30746- " movq 56(%0), %%mm3\n"
30747- " movq %%mm0, 32(%1)\n"
30748- " movq %%mm1, 40(%1)\n"
30749- " movq %%mm2, 48(%1)\n"
30750- " movq %%mm3, 56(%1)\n"
30751+ "1: prefetch 320(%1)\n"
30752+ "2: movq (%1), %%mm0\n"
30753+ " movq 8(%1), %%mm1\n"
30754+ " movq 16(%1), %%mm2\n"
30755+ " movq 24(%1), %%mm3\n"
30756+ " movq %%mm0, (%2)\n"
30757+ " movq %%mm1, 8(%2)\n"
30758+ " movq %%mm2, 16(%2)\n"
30759+ " movq %%mm3, 24(%2)\n"
30760+ " movq 32(%1), %%mm0\n"
30761+ " movq 40(%1), %%mm1\n"
30762+ " movq 48(%1), %%mm2\n"
30763+ " movq 56(%1), %%mm3\n"
30764+ " movq %%mm0, 32(%2)\n"
30765+ " movq %%mm1, 40(%2)\n"
30766+ " movq %%mm2, 48(%2)\n"
30767+ " movq %%mm3, 56(%2)\n"
30768 ".section .fixup, \"ax\"\n"
30769- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30770+ "3:\n"
30771+
30772+#ifdef CONFIG_PAX_KERNEXEC
30773+ " movl %%cr0, %0\n"
30774+ " movl %0, %%eax\n"
30775+ " andl $0xFFFEFFFF, %%eax\n"
30776+ " movl %%eax, %%cr0\n"
30777+#endif
30778+
30779+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30780+
30781+#ifdef CONFIG_PAX_KERNEXEC
30782+ " movl %0, %%cr0\n"
30783+#endif
30784+
30785 " jmp 2b\n"
30786 ".previous\n"
30787 _ASM_EXTABLE(1b, 3b)
30788- : : "r" (from), "r" (to) : "memory");
30789+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30790
30791 from += 64;
30792 to += 64;
30793diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30794index f6d13ee..d789440 100644
30795--- a/arch/x86/lib/msr-reg.S
30796+++ b/arch/x86/lib/msr-reg.S
30797@@ -3,6 +3,7 @@
30798 #include <asm/dwarf2.h>
30799 #include <asm/asm.h>
30800 #include <asm/msr.h>
30801+#include <asm/alternative-asm.h>
30802
30803 #ifdef CONFIG_X86_64
30804 /*
30805@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30806 movl %edi, 28(%r10)
30807 popq_cfi %rbp
30808 popq_cfi %rbx
30809+ pax_force_retaddr
30810 ret
30811 3:
30812 CFI_RESTORE_STATE
30813diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30814index fc6ba17..14ad9a5 100644
30815--- a/arch/x86/lib/putuser.S
30816+++ b/arch/x86/lib/putuser.S
30817@@ -16,7 +16,9 @@
30818 #include <asm/errno.h>
30819 #include <asm/asm.h>
30820 #include <asm/smap.h>
30821-
30822+#include <asm/segment.h>
30823+#include <asm/pgtable.h>
30824+#include <asm/alternative-asm.h>
30825
30826 /*
30827 * __put_user_X
30828@@ -30,57 +32,125 @@
30829 * as they get called from within inline assembly.
30830 */
30831
30832-#define ENTER CFI_STARTPROC ; \
30833- GET_THREAD_INFO(%_ASM_BX)
30834-#define EXIT ASM_CLAC ; \
30835- ret ; \
30836+#define ENTER CFI_STARTPROC
30837+#define EXIT ASM_CLAC ; \
30838+ pax_force_retaddr ; \
30839+ ret ; \
30840 CFI_ENDPROC
30841
30842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30843+#define _DEST %_ASM_CX,%_ASM_BX
30844+#else
30845+#define _DEST %_ASM_CX
30846+#endif
30847+
30848+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30849+#define __copyuser_seg gs;
30850+#else
30851+#define __copyuser_seg
30852+#endif
30853+
30854 .text
30855 ENTRY(__put_user_1)
30856 ENTER
30857+
30858+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30859+ GET_THREAD_INFO(%_ASM_BX)
30860 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30861 jae bad_put_user
30862+
30863+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30864+ mov pax_user_shadow_base,%_ASM_BX
30865+ cmp %_ASM_BX,%_ASM_CX
30866+ jb 1234f
30867+ xor %ebx,%ebx
30868+1234:
30869+#endif
30870+
30871+#endif
30872+
30873 ASM_STAC
30874-1: movb %al,(%_ASM_CX)
30875+1: __copyuser_seg movb %al,(_DEST)
30876 xor %eax,%eax
30877 EXIT
30878 ENDPROC(__put_user_1)
30879
30880 ENTRY(__put_user_2)
30881 ENTER
30882+
30883+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30884+ GET_THREAD_INFO(%_ASM_BX)
30885 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30886 sub $1,%_ASM_BX
30887 cmp %_ASM_BX,%_ASM_CX
30888 jae bad_put_user
30889+
30890+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30891+ mov pax_user_shadow_base,%_ASM_BX
30892+ cmp %_ASM_BX,%_ASM_CX
30893+ jb 1234f
30894+ xor %ebx,%ebx
30895+1234:
30896+#endif
30897+
30898+#endif
30899+
30900 ASM_STAC
30901-2: movw %ax,(%_ASM_CX)
30902+2: __copyuser_seg movw %ax,(_DEST)
30903 xor %eax,%eax
30904 EXIT
30905 ENDPROC(__put_user_2)
30906
30907 ENTRY(__put_user_4)
30908 ENTER
30909+
30910+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30911+ GET_THREAD_INFO(%_ASM_BX)
30912 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30913 sub $3,%_ASM_BX
30914 cmp %_ASM_BX,%_ASM_CX
30915 jae bad_put_user
30916+
30917+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30918+ mov pax_user_shadow_base,%_ASM_BX
30919+ cmp %_ASM_BX,%_ASM_CX
30920+ jb 1234f
30921+ xor %ebx,%ebx
30922+1234:
30923+#endif
30924+
30925+#endif
30926+
30927 ASM_STAC
30928-3: movl %eax,(%_ASM_CX)
30929+3: __copyuser_seg movl %eax,(_DEST)
30930 xor %eax,%eax
30931 EXIT
30932 ENDPROC(__put_user_4)
30933
30934 ENTRY(__put_user_8)
30935 ENTER
30936+
30937+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30938+ GET_THREAD_INFO(%_ASM_BX)
30939 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30940 sub $7,%_ASM_BX
30941 cmp %_ASM_BX,%_ASM_CX
30942 jae bad_put_user
30943+
30944+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30945+ mov pax_user_shadow_base,%_ASM_BX
30946+ cmp %_ASM_BX,%_ASM_CX
30947+ jb 1234f
30948+ xor %ebx,%ebx
30949+1234:
30950+#endif
30951+
30952+#endif
30953+
30954 ASM_STAC
30955-4: mov %_ASM_AX,(%_ASM_CX)
30956+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30957 #ifdef CONFIG_X86_32
30958-5: movl %edx,4(%_ASM_CX)
30959+5: __copyuser_seg movl %edx,4(_DEST)
30960 #endif
30961 xor %eax,%eax
30962 EXIT
30963diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30964index 5dff5f0..cadebf4 100644
30965--- a/arch/x86/lib/rwsem.S
30966+++ b/arch/x86/lib/rwsem.S
30967@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30968 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30969 CFI_RESTORE __ASM_REG(dx)
30970 restore_common_regs
30971+ pax_force_retaddr
30972 ret
30973 CFI_ENDPROC
30974 ENDPROC(call_rwsem_down_read_failed)
30975@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30976 movq %rax,%rdi
30977 call rwsem_down_write_failed
30978 restore_common_regs
30979+ pax_force_retaddr
30980 ret
30981 CFI_ENDPROC
30982 ENDPROC(call_rwsem_down_write_failed)
30983@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30984 movq %rax,%rdi
30985 call rwsem_wake
30986 restore_common_regs
30987-1: ret
30988+1: pax_force_retaddr
30989+ ret
30990 CFI_ENDPROC
30991 ENDPROC(call_rwsem_wake)
30992
30993@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30994 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30995 CFI_RESTORE __ASM_REG(dx)
30996 restore_common_regs
30997+ pax_force_retaddr
30998 ret
30999 CFI_ENDPROC
31000 ENDPROC(call_rwsem_downgrade_wake)
31001diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31002index b30b5eb..2b57052 100644
31003--- a/arch/x86/lib/thunk_64.S
31004+++ b/arch/x86/lib/thunk_64.S
31005@@ -9,6 +9,7 @@
31006 #include <asm/dwarf2.h>
31007 #include <asm/calling.h>
31008 #include <asm/asm.h>
31009+#include <asm/alternative-asm.h>
31010
31011 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31012 .macro THUNK name, func, put_ret_addr_in_rdi=0
31013@@ -16,11 +17,11 @@
31014 \name:
31015 CFI_STARTPROC
31016
31017- /* this one pushes 9 elems, the next one would be %rIP */
31018- SAVE_ARGS
31019+ /* this one pushes 15+1 elems, the next one would be %rIP */
31020+ SAVE_ARGS 8
31021
31022 .if \put_ret_addr_in_rdi
31023- movq_cfi_restore 9*8, rdi
31024+ movq_cfi_restore RIP, rdi
31025 .endif
31026
31027 call \func
31028@@ -47,9 +48,10 @@
31029
31030 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31031 CFI_STARTPROC
31032- SAVE_ARGS
31033+ SAVE_ARGS 8
31034 restore:
31035- RESTORE_ARGS
31036+ RESTORE_ARGS 1,8
31037+ pax_force_retaddr
31038 ret
31039 CFI_ENDPROC
31040 _ASM_NOKPROBE(restore)
31041diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31042index e2f5e21..4b22130 100644
31043--- a/arch/x86/lib/usercopy_32.c
31044+++ b/arch/x86/lib/usercopy_32.c
31045@@ -42,11 +42,13 @@ do { \
31046 int __d0; \
31047 might_fault(); \
31048 __asm__ __volatile__( \
31049+ __COPYUSER_SET_ES \
31050 ASM_STAC "\n" \
31051 "0: rep; stosl\n" \
31052 " movl %2,%0\n" \
31053 "1: rep; stosb\n" \
31054 "2: " ASM_CLAC "\n" \
31055+ __COPYUSER_RESTORE_ES \
31056 ".section .fixup,\"ax\"\n" \
31057 "3: lea 0(%2,%0,4),%0\n" \
31058 " jmp 2b\n" \
31059@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31060
31061 #ifdef CONFIG_X86_INTEL_USERCOPY
31062 static unsigned long
31063-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31064+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31065 {
31066 int d0, d1;
31067 __asm__ __volatile__(
31068@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31069 " .align 2,0x90\n"
31070 "3: movl 0(%4), %%eax\n"
31071 "4: movl 4(%4), %%edx\n"
31072- "5: movl %%eax, 0(%3)\n"
31073- "6: movl %%edx, 4(%3)\n"
31074+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31075+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31076 "7: movl 8(%4), %%eax\n"
31077 "8: movl 12(%4),%%edx\n"
31078- "9: movl %%eax, 8(%3)\n"
31079- "10: movl %%edx, 12(%3)\n"
31080+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31081+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31082 "11: movl 16(%4), %%eax\n"
31083 "12: movl 20(%4), %%edx\n"
31084- "13: movl %%eax, 16(%3)\n"
31085- "14: movl %%edx, 20(%3)\n"
31086+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31087+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31088 "15: movl 24(%4), %%eax\n"
31089 "16: movl 28(%4), %%edx\n"
31090- "17: movl %%eax, 24(%3)\n"
31091- "18: movl %%edx, 28(%3)\n"
31092+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31093+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31094 "19: movl 32(%4), %%eax\n"
31095 "20: movl 36(%4), %%edx\n"
31096- "21: movl %%eax, 32(%3)\n"
31097- "22: movl %%edx, 36(%3)\n"
31098+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31099+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31100 "23: movl 40(%4), %%eax\n"
31101 "24: movl 44(%4), %%edx\n"
31102- "25: movl %%eax, 40(%3)\n"
31103- "26: movl %%edx, 44(%3)\n"
31104+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31105+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31106 "27: movl 48(%4), %%eax\n"
31107 "28: movl 52(%4), %%edx\n"
31108- "29: movl %%eax, 48(%3)\n"
31109- "30: movl %%edx, 52(%3)\n"
31110+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31111+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31112 "31: movl 56(%4), %%eax\n"
31113 "32: movl 60(%4), %%edx\n"
31114- "33: movl %%eax, 56(%3)\n"
31115- "34: movl %%edx, 60(%3)\n"
31116+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31117+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31118 " addl $-64, %0\n"
31119 " addl $64, %4\n"
31120 " addl $64, %3\n"
31121@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31122 " shrl $2, %0\n"
31123 " andl $3, %%eax\n"
31124 " cld\n"
31125+ __COPYUSER_SET_ES
31126 "99: rep; movsl\n"
31127 "36: movl %%eax, %0\n"
31128 "37: rep; movsb\n"
31129 "100:\n"
31130+ __COPYUSER_RESTORE_ES
31131+ ".section .fixup,\"ax\"\n"
31132+ "101: lea 0(%%eax,%0,4),%0\n"
31133+ " jmp 100b\n"
31134+ ".previous\n"
31135+ _ASM_EXTABLE(1b,100b)
31136+ _ASM_EXTABLE(2b,100b)
31137+ _ASM_EXTABLE(3b,100b)
31138+ _ASM_EXTABLE(4b,100b)
31139+ _ASM_EXTABLE(5b,100b)
31140+ _ASM_EXTABLE(6b,100b)
31141+ _ASM_EXTABLE(7b,100b)
31142+ _ASM_EXTABLE(8b,100b)
31143+ _ASM_EXTABLE(9b,100b)
31144+ _ASM_EXTABLE(10b,100b)
31145+ _ASM_EXTABLE(11b,100b)
31146+ _ASM_EXTABLE(12b,100b)
31147+ _ASM_EXTABLE(13b,100b)
31148+ _ASM_EXTABLE(14b,100b)
31149+ _ASM_EXTABLE(15b,100b)
31150+ _ASM_EXTABLE(16b,100b)
31151+ _ASM_EXTABLE(17b,100b)
31152+ _ASM_EXTABLE(18b,100b)
31153+ _ASM_EXTABLE(19b,100b)
31154+ _ASM_EXTABLE(20b,100b)
31155+ _ASM_EXTABLE(21b,100b)
31156+ _ASM_EXTABLE(22b,100b)
31157+ _ASM_EXTABLE(23b,100b)
31158+ _ASM_EXTABLE(24b,100b)
31159+ _ASM_EXTABLE(25b,100b)
31160+ _ASM_EXTABLE(26b,100b)
31161+ _ASM_EXTABLE(27b,100b)
31162+ _ASM_EXTABLE(28b,100b)
31163+ _ASM_EXTABLE(29b,100b)
31164+ _ASM_EXTABLE(30b,100b)
31165+ _ASM_EXTABLE(31b,100b)
31166+ _ASM_EXTABLE(32b,100b)
31167+ _ASM_EXTABLE(33b,100b)
31168+ _ASM_EXTABLE(34b,100b)
31169+ _ASM_EXTABLE(35b,100b)
31170+ _ASM_EXTABLE(36b,100b)
31171+ _ASM_EXTABLE(37b,100b)
31172+ _ASM_EXTABLE(99b,101b)
31173+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31174+ : "1"(to), "2"(from), "0"(size)
31175+ : "eax", "edx", "memory");
31176+ return size;
31177+}
31178+
31179+static unsigned long
31180+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31181+{
31182+ int d0, d1;
31183+ __asm__ __volatile__(
31184+ " .align 2,0x90\n"
31185+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31186+ " cmpl $67, %0\n"
31187+ " jbe 3f\n"
31188+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31189+ " .align 2,0x90\n"
31190+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31191+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31192+ "5: movl %%eax, 0(%3)\n"
31193+ "6: movl %%edx, 4(%3)\n"
31194+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31195+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31196+ "9: movl %%eax, 8(%3)\n"
31197+ "10: movl %%edx, 12(%3)\n"
31198+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31199+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31200+ "13: movl %%eax, 16(%3)\n"
31201+ "14: movl %%edx, 20(%3)\n"
31202+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31203+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31204+ "17: movl %%eax, 24(%3)\n"
31205+ "18: movl %%edx, 28(%3)\n"
31206+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31207+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31208+ "21: movl %%eax, 32(%3)\n"
31209+ "22: movl %%edx, 36(%3)\n"
31210+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31211+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31212+ "25: movl %%eax, 40(%3)\n"
31213+ "26: movl %%edx, 44(%3)\n"
31214+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31215+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31216+ "29: movl %%eax, 48(%3)\n"
31217+ "30: movl %%edx, 52(%3)\n"
31218+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31219+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31220+ "33: movl %%eax, 56(%3)\n"
31221+ "34: movl %%edx, 60(%3)\n"
31222+ " addl $-64, %0\n"
31223+ " addl $64, %4\n"
31224+ " addl $64, %3\n"
31225+ " cmpl $63, %0\n"
31226+ " ja 1b\n"
31227+ "35: movl %0, %%eax\n"
31228+ " shrl $2, %0\n"
31229+ " andl $3, %%eax\n"
31230+ " cld\n"
31231+ "99: rep; "__copyuser_seg" movsl\n"
31232+ "36: movl %%eax, %0\n"
31233+ "37: rep; "__copyuser_seg" movsb\n"
31234+ "100:\n"
31235 ".section .fixup,\"ax\"\n"
31236 "101: lea 0(%%eax,%0,4),%0\n"
31237 " jmp 100b\n"
31238@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31239 int d0, d1;
31240 __asm__ __volatile__(
31241 " .align 2,0x90\n"
31242- "0: movl 32(%4), %%eax\n"
31243+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31244 " cmpl $67, %0\n"
31245 " jbe 2f\n"
31246- "1: movl 64(%4), %%eax\n"
31247+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31248 " .align 2,0x90\n"
31249- "2: movl 0(%4), %%eax\n"
31250- "21: movl 4(%4), %%edx\n"
31251+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31252+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31253 " movl %%eax, 0(%3)\n"
31254 " movl %%edx, 4(%3)\n"
31255- "3: movl 8(%4), %%eax\n"
31256- "31: movl 12(%4),%%edx\n"
31257+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31258+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31259 " movl %%eax, 8(%3)\n"
31260 " movl %%edx, 12(%3)\n"
31261- "4: movl 16(%4), %%eax\n"
31262- "41: movl 20(%4), %%edx\n"
31263+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31264+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31265 " movl %%eax, 16(%3)\n"
31266 " movl %%edx, 20(%3)\n"
31267- "10: movl 24(%4), %%eax\n"
31268- "51: movl 28(%4), %%edx\n"
31269+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31270+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31271 " movl %%eax, 24(%3)\n"
31272 " movl %%edx, 28(%3)\n"
31273- "11: movl 32(%4), %%eax\n"
31274- "61: movl 36(%4), %%edx\n"
31275+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31276+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31277 " movl %%eax, 32(%3)\n"
31278 " movl %%edx, 36(%3)\n"
31279- "12: movl 40(%4), %%eax\n"
31280- "71: movl 44(%4), %%edx\n"
31281+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31282+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31283 " movl %%eax, 40(%3)\n"
31284 " movl %%edx, 44(%3)\n"
31285- "13: movl 48(%4), %%eax\n"
31286- "81: movl 52(%4), %%edx\n"
31287+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31288+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31289 " movl %%eax, 48(%3)\n"
31290 " movl %%edx, 52(%3)\n"
31291- "14: movl 56(%4), %%eax\n"
31292- "91: movl 60(%4), %%edx\n"
31293+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31294+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31295 " movl %%eax, 56(%3)\n"
31296 " movl %%edx, 60(%3)\n"
31297 " addl $-64, %0\n"
31298@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31299 " shrl $2, %0\n"
31300 " andl $3, %%eax\n"
31301 " cld\n"
31302- "6: rep; movsl\n"
31303+ "6: rep; "__copyuser_seg" movsl\n"
31304 " movl %%eax,%0\n"
31305- "7: rep; movsb\n"
31306+ "7: rep; "__copyuser_seg" movsb\n"
31307 "8:\n"
31308 ".section .fixup,\"ax\"\n"
31309 "9: lea 0(%%eax,%0,4),%0\n"
31310@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31311
31312 __asm__ __volatile__(
31313 " .align 2,0x90\n"
31314- "0: movl 32(%4), %%eax\n"
31315+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31316 " cmpl $67, %0\n"
31317 " jbe 2f\n"
31318- "1: movl 64(%4), %%eax\n"
31319+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31320 " .align 2,0x90\n"
31321- "2: movl 0(%4), %%eax\n"
31322- "21: movl 4(%4), %%edx\n"
31323+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31324+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31325 " movnti %%eax, 0(%3)\n"
31326 " movnti %%edx, 4(%3)\n"
31327- "3: movl 8(%4), %%eax\n"
31328- "31: movl 12(%4),%%edx\n"
31329+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31330+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31331 " movnti %%eax, 8(%3)\n"
31332 " movnti %%edx, 12(%3)\n"
31333- "4: movl 16(%4), %%eax\n"
31334- "41: movl 20(%4), %%edx\n"
31335+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31336+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31337 " movnti %%eax, 16(%3)\n"
31338 " movnti %%edx, 20(%3)\n"
31339- "10: movl 24(%4), %%eax\n"
31340- "51: movl 28(%4), %%edx\n"
31341+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31342+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31343 " movnti %%eax, 24(%3)\n"
31344 " movnti %%edx, 28(%3)\n"
31345- "11: movl 32(%4), %%eax\n"
31346- "61: movl 36(%4), %%edx\n"
31347+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31348+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31349 " movnti %%eax, 32(%3)\n"
31350 " movnti %%edx, 36(%3)\n"
31351- "12: movl 40(%4), %%eax\n"
31352- "71: movl 44(%4), %%edx\n"
31353+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31354+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31355 " movnti %%eax, 40(%3)\n"
31356 " movnti %%edx, 44(%3)\n"
31357- "13: movl 48(%4), %%eax\n"
31358- "81: movl 52(%4), %%edx\n"
31359+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31360+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31361 " movnti %%eax, 48(%3)\n"
31362 " movnti %%edx, 52(%3)\n"
31363- "14: movl 56(%4), %%eax\n"
31364- "91: movl 60(%4), %%edx\n"
31365+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31366+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31367 " movnti %%eax, 56(%3)\n"
31368 " movnti %%edx, 60(%3)\n"
31369 " addl $-64, %0\n"
31370@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31371 " shrl $2, %0\n"
31372 " andl $3, %%eax\n"
31373 " cld\n"
31374- "6: rep; movsl\n"
31375+ "6: rep; "__copyuser_seg" movsl\n"
31376 " movl %%eax,%0\n"
31377- "7: rep; movsb\n"
31378+ "7: rep; "__copyuser_seg" movsb\n"
31379 "8:\n"
31380 ".section .fixup,\"ax\"\n"
31381 "9: lea 0(%%eax,%0,4),%0\n"
31382@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31383
31384 __asm__ __volatile__(
31385 " .align 2,0x90\n"
31386- "0: movl 32(%4), %%eax\n"
31387+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31388 " cmpl $67, %0\n"
31389 " jbe 2f\n"
31390- "1: movl 64(%4), %%eax\n"
31391+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31392 " .align 2,0x90\n"
31393- "2: movl 0(%4), %%eax\n"
31394- "21: movl 4(%4), %%edx\n"
31395+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31396+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31397 " movnti %%eax, 0(%3)\n"
31398 " movnti %%edx, 4(%3)\n"
31399- "3: movl 8(%4), %%eax\n"
31400- "31: movl 12(%4),%%edx\n"
31401+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31402+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31403 " movnti %%eax, 8(%3)\n"
31404 " movnti %%edx, 12(%3)\n"
31405- "4: movl 16(%4), %%eax\n"
31406- "41: movl 20(%4), %%edx\n"
31407+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31408+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31409 " movnti %%eax, 16(%3)\n"
31410 " movnti %%edx, 20(%3)\n"
31411- "10: movl 24(%4), %%eax\n"
31412- "51: movl 28(%4), %%edx\n"
31413+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31414+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31415 " movnti %%eax, 24(%3)\n"
31416 " movnti %%edx, 28(%3)\n"
31417- "11: movl 32(%4), %%eax\n"
31418- "61: movl 36(%4), %%edx\n"
31419+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31420+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31421 " movnti %%eax, 32(%3)\n"
31422 " movnti %%edx, 36(%3)\n"
31423- "12: movl 40(%4), %%eax\n"
31424- "71: movl 44(%4), %%edx\n"
31425+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31426+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31427 " movnti %%eax, 40(%3)\n"
31428 " movnti %%edx, 44(%3)\n"
31429- "13: movl 48(%4), %%eax\n"
31430- "81: movl 52(%4), %%edx\n"
31431+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31432+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31433 " movnti %%eax, 48(%3)\n"
31434 " movnti %%edx, 52(%3)\n"
31435- "14: movl 56(%4), %%eax\n"
31436- "91: movl 60(%4), %%edx\n"
31437+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31438+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31439 " movnti %%eax, 56(%3)\n"
31440 " movnti %%edx, 60(%3)\n"
31441 " addl $-64, %0\n"
31442@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31443 " shrl $2, %0\n"
31444 " andl $3, %%eax\n"
31445 " cld\n"
31446- "6: rep; movsl\n"
31447+ "6: rep; "__copyuser_seg" movsl\n"
31448 " movl %%eax,%0\n"
31449- "7: rep; movsb\n"
31450+ "7: rep; "__copyuser_seg" movsb\n"
31451 "8:\n"
31452 ".section .fixup,\"ax\"\n"
31453 "9: lea 0(%%eax,%0,4),%0\n"
31454@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31455 */
31456 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31457 unsigned long size);
31458-unsigned long __copy_user_intel(void __user *to, const void *from,
31459+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31460+ unsigned long size);
31461+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31462 unsigned long size);
31463 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31464 const void __user *from, unsigned long size);
31465 #endif /* CONFIG_X86_INTEL_USERCOPY */
31466
31467 /* Generic arbitrary sized copy. */
31468-#define __copy_user(to, from, size) \
31469+#define __copy_user(to, from, size, prefix, set, restore) \
31470 do { \
31471 int __d0, __d1, __d2; \
31472 __asm__ __volatile__( \
31473+ set \
31474 " cmp $7,%0\n" \
31475 " jbe 1f\n" \
31476 " movl %1,%0\n" \
31477 " negl %0\n" \
31478 " andl $7,%0\n" \
31479 " subl %0,%3\n" \
31480- "4: rep; movsb\n" \
31481+ "4: rep; "prefix"movsb\n" \
31482 " movl %3,%0\n" \
31483 " shrl $2,%0\n" \
31484 " andl $3,%3\n" \
31485 " .align 2,0x90\n" \
31486- "0: rep; movsl\n" \
31487+ "0: rep; "prefix"movsl\n" \
31488 " movl %3,%0\n" \
31489- "1: rep; movsb\n" \
31490+ "1: rep; "prefix"movsb\n" \
31491 "2:\n" \
31492+ restore \
31493 ".section .fixup,\"ax\"\n" \
31494 "5: addl %3,%0\n" \
31495 " jmp 2b\n" \
31496@@ -538,14 +650,14 @@ do { \
31497 " negl %0\n" \
31498 " andl $7,%0\n" \
31499 " subl %0,%3\n" \
31500- "4: rep; movsb\n" \
31501+ "4: rep; "__copyuser_seg"movsb\n" \
31502 " movl %3,%0\n" \
31503 " shrl $2,%0\n" \
31504 " andl $3,%3\n" \
31505 " .align 2,0x90\n" \
31506- "0: rep; movsl\n" \
31507+ "0: rep; "__copyuser_seg"movsl\n" \
31508 " movl %3,%0\n" \
31509- "1: rep; movsb\n" \
31510+ "1: rep; "__copyuser_seg"movsb\n" \
31511 "2:\n" \
31512 ".section .fixup,\"ax\"\n" \
31513 "5: addl %3,%0\n" \
31514@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31515 {
31516 stac();
31517 if (movsl_is_ok(to, from, n))
31518- __copy_user(to, from, n);
31519+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31520 else
31521- n = __copy_user_intel(to, from, n);
31522+ n = __generic_copy_to_user_intel(to, from, n);
31523 clac();
31524 return n;
31525 }
31526@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31527 {
31528 stac();
31529 if (movsl_is_ok(to, from, n))
31530- __copy_user(to, from, n);
31531+ __copy_user(to, from, n, __copyuser_seg, "", "");
31532 else
31533- n = __copy_user_intel((void __user *)to,
31534- (const void *)from, n);
31535+ n = __generic_copy_from_user_intel(to, from, n);
31536 clac();
31537 return n;
31538 }
31539@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31540 if (n > 64 && cpu_has_xmm2)
31541 n = __copy_user_intel_nocache(to, from, n);
31542 else
31543- __copy_user(to, from, n);
31544+ __copy_user(to, from, n, __copyuser_seg, "", "");
31545 #else
31546- __copy_user(to, from, n);
31547+ __copy_user(to, from, n, __copyuser_seg, "", "");
31548 #endif
31549 clac();
31550 return n;
31551 }
31552 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31553
31554-/**
31555- * copy_to_user: - Copy a block of data into user space.
31556- * @to: Destination address, in user space.
31557- * @from: Source address, in kernel space.
31558- * @n: Number of bytes to copy.
31559- *
31560- * Context: User context only. This function may sleep.
31561- *
31562- * Copy data from kernel space to user space.
31563- *
31564- * Returns number of bytes that could not be copied.
31565- * On success, this will be zero.
31566- */
31567-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31568+#ifdef CONFIG_PAX_MEMORY_UDEREF
31569+void __set_fs(mm_segment_t x)
31570 {
31571- if (access_ok(VERIFY_WRITE, to, n))
31572- n = __copy_to_user(to, from, n);
31573- return n;
31574+ switch (x.seg) {
31575+ case 0:
31576+ loadsegment(gs, 0);
31577+ break;
31578+ case TASK_SIZE_MAX:
31579+ loadsegment(gs, __USER_DS);
31580+ break;
31581+ case -1UL:
31582+ loadsegment(gs, __KERNEL_DS);
31583+ break;
31584+ default:
31585+ BUG();
31586+ }
31587 }
31588-EXPORT_SYMBOL(_copy_to_user);
31589+EXPORT_SYMBOL(__set_fs);
31590
31591-/**
31592- * copy_from_user: - Copy a block of data from user space.
31593- * @to: Destination address, in kernel space.
31594- * @from: Source address, in user space.
31595- * @n: Number of bytes to copy.
31596- *
31597- * Context: User context only. This function may sleep.
31598- *
31599- * Copy data from user space to kernel space.
31600- *
31601- * Returns number of bytes that could not be copied.
31602- * On success, this will be zero.
31603- *
31604- * If some data could not be copied, this function will pad the copied
31605- * data to the requested size using zero bytes.
31606- */
31607-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31608+void set_fs(mm_segment_t x)
31609 {
31610- if (access_ok(VERIFY_READ, from, n))
31611- n = __copy_from_user(to, from, n);
31612- else
31613- memset(to, 0, n);
31614- return n;
31615+ current_thread_info()->addr_limit = x;
31616+ __set_fs(x);
31617 }
31618-EXPORT_SYMBOL(_copy_from_user);
31619+EXPORT_SYMBOL(set_fs);
31620+#endif
31621diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31622index 1f33b3d..83c151d 100644
31623--- a/arch/x86/lib/usercopy_64.c
31624+++ b/arch/x86/lib/usercopy_64.c
31625@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31626 might_fault();
31627 /* no memory constraint because it doesn't change any memory gcc knows
31628 about */
31629+ pax_open_userland();
31630 stac();
31631 asm volatile(
31632 " testq %[size8],%[size8]\n"
31633@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31634 _ASM_EXTABLE(0b,3b)
31635 _ASM_EXTABLE(1b,2b)
31636 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31637- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31638+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31639 [zero] "r" (0UL), [eight] "r" (8UL));
31640 clac();
31641+ pax_close_userland();
31642 return size;
31643 }
31644 EXPORT_SYMBOL(__clear_user);
31645@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31646 }
31647 EXPORT_SYMBOL(clear_user);
31648
31649-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31650+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31651 {
31652- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31653- return copy_user_generic((__force void *)to, (__force void *)from, len);
31654- }
31655- return len;
31656+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31657+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31658+ return len;
31659 }
31660 EXPORT_SYMBOL(copy_in_user);
31661
31662@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31663 * it is not necessary to optimize tail handling.
31664 */
31665 __visible unsigned long
31666-copy_user_handle_tail(char *to, char *from, unsigned len)
31667+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31668 {
31669+ clac();
31670+ pax_close_userland();
31671 for (; len; --len, to++) {
31672 char c;
31673
31674@@ -79,7 +82,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31675 if (__put_user_nocheck(c, to, sizeof(char)))
31676 break;
31677 }
31678- clac();
31679
31680 /* If the destination is a kernel buffer, we always clear the end */
31681 if ((unsigned long)to >= TASK_SIZE_MAX)
31682diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31683index c4cc740..60a7362 100644
31684--- a/arch/x86/mm/Makefile
31685+++ b/arch/x86/mm/Makefile
31686@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31687 obj-$(CONFIG_MEMTEST) += memtest.o
31688
31689 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31690+
31691+quote:="
31692+obj-$(CONFIG_X86_64) += uderef_64.o
31693+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31694diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31695index 903ec1e..c4166b2 100644
31696--- a/arch/x86/mm/extable.c
31697+++ b/arch/x86/mm/extable.c
31698@@ -6,12 +6,24 @@
31699 static inline unsigned long
31700 ex_insn_addr(const struct exception_table_entry *x)
31701 {
31702- return (unsigned long)&x->insn + x->insn;
31703+ unsigned long reloc = 0;
31704+
31705+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31706+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31707+#endif
31708+
31709+ return (unsigned long)&x->insn + x->insn + reloc;
31710 }
31711 static inline unsigned long
31712 ex_fixup_addr(const struct exception_table_entry *x)
31713 {
31714- return (unsigned long)&x->fixup + x->fixup;
31715+ unsigned long reloc = 0;
31716+
31717+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31718+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31719+#endif
31720+
31721+ return (unsigned long)&x->fixup + x->fixup + reloc;
31722 }
31723
31724 int fixup_exception(struct pt_regs *regs)
31725@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31726 unsigned long new_ip;
31727
31728 #ifdef CONFIG_PNPBIOS
31729- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31730+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31731 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31732 extern u32 pnp_bios_is_utter_crap;
31733 pnp_bios_is_utter_crap = 1;
31734@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31735 i += 4;
31736 p->fixup -= i;
31737 i += 4;
31738+
31739+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31740+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31741+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31742+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31743+#endif
31744+
31745 }
31746 }
31747
31748diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31749index ede025f..380466b 100644
31750--- a/arch/x86/mm/fault.c
31751+++ b/arch/x86/mm/fault.c
31752@@ -13,12 +13,19 @@
31753 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31754 #include <linux/prefetch.h> /* prefetchw */
31755 #include <linux/context_tracking.h> /* exception_enter(), ... */
31756+#include <linux/unistd.h>
31757+#include <linux/compiler.h>
31758
31759 #include <asm/traps.h> /* dotraplinkage, ... */
31760 #include <asm/pgalloc.h> /* pgd_*(), ... */
31761 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31762 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31763 #include <asm/vsyscall.h> /* emulate_vsyscall */
31764+#include <asm/tlbflush.h>
31765+
31766+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31767+#include <asm/stacktrace.h>
31768+#endif
31769
31770 #define CREATE_TRACE_POINTS
31771 #include <asm/trace/exceptions.h>
31772@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31773 int ret = 0;
31774
31775 /* kprobe_running() needs smp_processor_id() */
31776- if (kprobes_built_in() && !user_mode_vm(regs)) {
31777+ if (kprobes_built_in() && !user_mode(regs)) {
31778 preempt_disable();
31779 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31780 ret = 1;
31781@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31782 return !instr_lo || (instr_lo>>1) == 1;
31783 case 0x00:
31784 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31785- if (probe_kernel_address(instr, opcode))
31786+ if (user_mode(regs)) {
31787+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31788+ return 0;
31789+ } else if (probe_kernel_address(instr, opcode))
31790 return 0;
31791
31792 *prefetch = (instr_lo == 0xF) &&
31793@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31794 while (instr < max_instr) {
31795 unsigned char opcode;
31796
31797- if (probe_kernel_address(instr, opcode))
31798+ if (user_mode(regs)) {
31799+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31800+ break;
31801+ } else if (probe_kernel_address(instr, opcode))
31802 break;
31803
31804 instr++;
31805@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31806 force_sig_info(si_signo, &info, tsk);
31807 }
31808
31809+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31810+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31811+#endif
31812+
31813+#ifdef CONFIG_PAX_EMUTRAMP
31814+static int pax_handle_fetch_fault(struct pt_regs *regs);
31815+#endif
31816+
31817+#ifdef CONFIG_PAX_PAGEEXEC
31818+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31819+{
31820+ pgd_t *pgd;
31821+ pud_t *pud;
31822+ pmd_t *pmd;
31823+
31824+ pgd = pgd_offset(mm, address);
31825+ if (!pgd_present(*pgd))
31826+ return NULL;
31827+ pud = pud_offset(pgd, address);
31828+ if (!pud_present(*pud))
31829+ return NULL;
31830+ pmd = pmd_offset(pud, address);
31831+ if (!pmd_present(*pmd))
31832+ return NULL;
31833+ return pmd;
31834+}
31835+#endif
31836+
31837 DEFINE_SPINLOCK(pgd_lock);
31838 LIST_HEAD(pgd_list);
31839
31840@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31841 for (address = VMALLOC_START & PMD_MASK;
31842 address >= TASK_SIZE && address < FIXADDR_TOP;
31843 address += PMD_SIZE) {
31844+
31845+#ifdef CONFIG_PAX_PER_CPU_PGD
31846+ unsigned long cpu;
31847+#else
31848 struct page *page;
31849+#endif
31850
31851 spin_lock(&pgd_lock);
31852+
31853+#ifdef CONFIG_PAX_PER_CPU_PGD
31854+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31855+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31856+ pmd_t *ret;
31857+
31858+ ret = vmalloc_sync_one(pgd, address);
31859+ if (!ret)
31860+ break;
31861+ pgd = get_cpu_pgd(cpu, kernel);
31862+#else
31863 list_for_each_entry(page, &pgd_list, lru) {
31864+ pgd_t *pgd;
31865 spinlock_t *pgt_lock;
31866 pmd_t *ret;
31867
31868@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31869 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31870
31871 spin_lock(pgt_lock);
31872- ret = vmalloc_sync_one(page_address(page), address);
31873+ pgd = page_address(page);
31874+#endif
31875+
31876+ ret = vmalloc_sync_one(pgd, address);
31877+
31878+#ifndef CONFIG_PAX_PER_CPU_PGD
31879 spin_unlock(pgt_lock);
31880+#endif
31881
31882 if (!ret)
31883 break;
31884@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31885 * an interrupt in the middle of a task switch..
31886 */
31887 pgd_paddr = read_cr3();
31888+
31889+#ifdef CONFIG_PAX_PER_CPU_PGD
31890+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31891+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31892+#endif
31893+
31894 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31895 if (!pmd_k)
31896 return -1;
31897@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31898 * happen within a race in page table update. In the later
31899 * case just flush:
31900 */
31901- pgd = pgd_offset(current->active_mm, address);
31902+
31903 pgd_ref = pgd_offset_k(address);
31904 if (pgd_none(*pgd_ref))
31905 return -1;
31906
31907+#ifdef CONFIG_PAX_PER_CPU_PGD
31908+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31909+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31910+ if (pgd_none(*pgd)) {
31911+ set_pgd(pgd, *pgd_ref);
31912+ arch_flush_lazy_mmu_mode();
31913+ } else {
31914+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31915+ }
31916+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31917+#else
31918+ pgd = pgd_offset(current->active_mm, address);
31919+#endif
31920+
31921 if (pgd_none(*pgd)) {
31922 set_pgd(pgd, *pgd_ref);
31923 arch_flush_lazy_mmu_mode();
31924@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31925 static int is_errata100(struct pt_regs *regs, unsigned long address)
31926 {
31927 #ifdef CONFIG_X86_64
31928- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31929+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31930 return 1;
31931 #endif
31932 return 0;
31933@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31934 }
31935
31936 static const char nx_warning[] = KERN_CRIT
31937-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31938+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31939 static const char smep_warning[] = KERN_CRIT
31940-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31941+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31942
31943 static void
31944 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31945@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31946 if (!oops_may_print())
31947 return;
31948
31949- if (error_code & PF_INSTR) {
31950+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31951 unsigned int level;
31952 pgd_t *pgd;
31953 pte_t *pte;
31954@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31955 pte = lookup_address_in_pgd(pgd, address, &level);
31956
31957 if (pte && pte_present(*pte) && !pte_exec(*pte))
31958- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31959+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31960 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31961 (pgd_flags(*pgd) & _PAGE_USER) &&
31962 (__read_cr4() & X86_CR4_SMEP))
31963- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31964+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31965 }
31966
31967+#ifdef CONFIG_PAX_KERNEXEC
31968+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31969+ if (current->signal->curr_ip)
31970+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31971+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31972+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31973+ else
31974+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31975+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31976+ }
31977+#endif
31978+
31979 printk(KERN_ALERT "BUG: unable to handle kernel ");
31980 if (address < PAGE_SIZE)
31981 printk(KERN_CONT "NULL pointer dereference");
31982@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31983 return;
31984 }
31985 #endif
31986+
31987+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31988+ if (pax_is_fetch_fault(regs, error_code, address)) {
31989+
31990+#ifdef CONFIG_PAX_EMUTRAMP
31991+ switch (pax_handle_fetch_fault(regs)) {
31992+ case 2:
31993+ return;
31994+ }
31995+#endif
31996+
31997+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31998+ do_group_exit(SIGKILL);
31999+ }
32000+#endif
32001+
32002 /* Kernel addresses are always protection faults: */
32003 if (address >= TASK_SIZE)
32004 error_code |= PF_PROT;
32005@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32006 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32007 printk(KERN_ERR
32008 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32009- tsk->comm, tsk->pid, address);
32010+ tsk->comm, task_pid_nr(tsk), address);
32011 code = BUS_MCEERR_AR;
32012 }
32013 #endif
32014@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32015 return 1;
32016 }
32017
32018+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32019+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32020+{
32021+ pte_t *pte;
32022+ pmd_t *pmd;
32023+ spinlock_t *ptl;
32024+ unsigned char pte_mask;
32025+
32026+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32027+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32028+ return 0;
32029+
32030+ /* PaX: it's our fault, let's handle it if we can */
32031+
32032+ /* PaX: take a look at read faults before acquiring any locks */
32033+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32034+ /* instruction fetch attempt from a protected page in user mode */
32035+ up_read(&mm->mmap_sem);
32036+
32037+#ifdef CONFIG_PAX_EMUTRAMP
32038+ switch (pax_handle_fetch_fault(regs)) {
32039+ case 2:
32040+ return 1;
32041+ }
32042+#endif
32043+
32044+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32045+ do_group_exit(SIGKILL);
32046+ }
32047+
32048+ pmd = pax_get_pmd(mm, address);
32049+ if (unlikely(!pmd))
32050+ return 0;
32051+
32052+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32053+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32054+ pte_unmap_unlock(pte, ptl);
32055+ return 0;
32056+ }
32057+
32058+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32059+ /* write attempt to a protected page in user mode */
32060+ pte_unmap_unlock(pte, ptl);
32061+ return 0;
32062+ }
32063+
32064+#ifdef CONFIG_SMP
32065+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32066+#else
32067+ if (likely(address > get_limit(regs->cs)))
32068+#endif
32069+ {
32070+ set_pte(pte, pte_mkread(*pte));
32071+ __flush_tlb_one(address);
32072+ pte_unmap_unlock(pte, ptl);
32073+ up_read(&mm->mmap_sem);
32074+ return 1;
32075+ }
32076+
32077+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32078+
32079+ /*
32080+ * PaX: fill DTLB with user rights and retry
32081+ */
32082+ __asm__ __volatile__ (
32083+ "orb %2,(%1)\n"
32084+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32085+/*
32086+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32087+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32088+ * page fault when examined during a TLB load attempt. this is true not only
32089+ * for PTEs holding a non-present entry but also present entries that will
32090+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32091+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32092+ * for our target pages since their PTEs are simply not in the TLBs at all.
32093+
32094+ * the best thing in omitting it is that we gain around 15-20% speed in the
32095+ * fast path of the page fault handler and can get rid of tracing since we
32096+ * can no longer flush unintended entries.
32097+ */
32098+ "invlpg (%0)\n"
32099+#endif
32100+ __copyuser_seg"testb $0,(%0)\n"
32101+ "xorb %3,(%1)\n"
32102+ :
32103+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32104+ : "memory", "cc");
32105+ pte_unmap_unlock(pte, ptl);
32106+ up_read(&mm->mmap_sem);
32107+ return 1;
32108+}
32109+#endif
32110+
32111 /*
32112 * Handle a spurious fault caused by a stale TLB entry.
32113 *
32114@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32115 static inline int
32116 access_error(unsigned long error_code, struct vm_area_struct *vma)
32117 {
32118+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32119+ return 1;
32120+
32121 if (error_code & PF_WRITE) {
32122 /* write, present and write, not present: */
32123 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32124@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32125 if (error_code & PF_USER)
32126 return false;
32127
32128- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32129+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32130 return false;
32131
32132 return true;
32133@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32134 tsk = current;
32135 mm = tsk->mm;
32136
32137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32138+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32139+ if (!search_exception_tables(regs->ip)) {
32140+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32141+ bad_area_nosemaphore(regs, error_code, address);
32142+ return;
32143+ }
32144+ if (address < pax_user_shadow_base) {
32145+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32146+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32147+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32148+ } else
32149+ address -= pax_user_shadow_base;
32150+ }
32151+#endif
32152+
32153 /*
32154 * Detect and handle instructions that would cause a page fault for
32155 * both a tracked kernel page and a userspace page.
32156@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32157 * User-mode registers count as a user access even for any
32158 * potential system fault or CPU buglet:
32159 */
32160- if (user_mode_vm(regs)) {
32161+ if (user_mode(regs)) {
32162 local_irq_enable();
32163 error_code |= PF_USER;
32164 flags |= FAULT_FLAG_USER;
32165@@ -1187,6 +1411,11 @@ retry:
32166 might_sleep();
32167 }
32168
32169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32170+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32171+ return;
32172+#endif
32173+
32174 vma = find_vma(mm, address);
32175 if (unlikely(!vma)) {
32176 bad_area(regs, error_code, address);
32177@@ -1198,18 +1427,24 @@ retry:
32178 bad_area(regs, error_code, address);
32179 return;
32180 }
32181- if (error_code & PF_USER) {
32182- /*
32183- * Accessing the stack below %sp is always a bug.
32184- * The large cushion allows instructions like enter
32185- * and pusha to work. ("enter $65535, $31" pushes
32186- * 32 pointers and then decrements %sp by 65535.)
32187- */
32188- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32189- bad_area(regs, error_code, address);
32190- return;
32191- }
32192+ /*
32193+ * Accessing the stack below %sp is always a bug.
32194+ * The large cushion allows instructions like enter
32195+ * and pusha to work. ("enter $65535, $31" pushes
32196+ * 32 pointers and then decrements %sp by 65535.)
32197+ */
32198+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32199+ bad_area(regs, error_code, address);
32200+ return;
32201 }
32202+
32203+#ifdef CONFIG_PAX_SEGMEXEC
32204+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32205+ bad_area(regs, error_code, address);
32206+ return;
32207+ }
32208+#endif
32209+
32210 if (unlikely(expand_stack(vma, address))) {
32211 bad_area(regs, error_code, address);
32212 return;
32213@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32214 }
32215 NOKPROBE_SYMBOL(trace_do_page_fault);
32216 #endif /* CONFIG_TRACING */
32217+
32218+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32219+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32220+{
32221+ struct mm_struct *mm = current->mm;
32222+ unsigned long ip = regs->ip;
32223+
32224+ if (v8086_mode(regs))
32225+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32226+
32227+#ifdef CONFIG_PAX_PAGEEXEC
32228+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32229+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32230+ return true;
32231+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32232+ return true;
32233+ return false;
32234+ }
32235+#endif
32236+
32237+#ifdef CONFIG_PAX_SEGMEXEC
32238+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32239+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32240+ return true;
32241+ return false;
32242+ }
32243+#endif
32244+
32245+ return false;
32246+}
32247+#endif
32248+
32249+#ifdef CONFIG_PAX_EMUTRAMP
32250+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32251+{
32252+ int err;
32253+
32254+ do { /* PaX: libffi trampoline emulation */
32255+ unsigned char mov, jmp;
32256+ unsigned int addr1, addr2;
32257+
32258+#ifdef CONFIG_X86_64
32259+ if ((regs->ip + 9) >> 32)
32260+ break;
32261+#endif
32262+
32263+ err = get_user(mov, (unsigned char __user *)regs->ip);
32264+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32265+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32266+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32267+
32268+ if (err)
32269+ break;
32270+
32271+ if (mov == 0xB8 && jmp == 0xE9) {
32272+ regs->ax = addr1;
32273+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32274+ return 2;
32275+ }
32276+ } while (0);
32277+
32278+ do { /* PaX: gcc trampoline emulation #1 */
32279+ unsigned char mov1, mov2;
32280+ unsigned short jmp;
32281+ unsigned int addr1, addr2;
32282+
32283+#ifdef CONFIG_X86_64
32284+ if ((regs->ip + 11) >> 32)
32285+ break;
32286+#endif
32287+
32288+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32289+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32290+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32291+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32292+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32293+
32294+ if (err)
32295+ break;
32296+
32297+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32298+ regs->cx = addr1;
32299+ regs->ax = addr2;
32300+ regs->ip = addr2;
32301+ return 2;
32302+ }
32303+ } while (0);
32304+
32305+ do { /* PaX: gcc trampoline emulation #2 */
32306+ unsigned char mov, jmp;
32307+ unsigned int addr1, addr2;
32308+
32309+#ifdef CONFIG_X86_64
32310+ if ((regs->ip + 9) >> 32)
32311+ break;
32312+#endif
32313+
32314+ err = get_user(mov, (unsigned char __user *)regs->ip);
32315+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32316+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32317+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32318+
32319+ if (err)
32320+ break;
32321+
32322+ if (mov == 0xB9 && jmp == 0xE9) {
32323+ regs->cx = addr1;
32324+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32325+ return 2;
32326+ }
32327+ } while (0);
32328+
32329+ return 1; /* PaX in action */
32330+}
32331+
32332+#ifdef CONFIG_X86_64
32333+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32334+{
32335+ int err;
32336+
32337+ do { /* PaX: libffi trampoline emulation */
32338+ unsigned short mov1, mov2, jmp1;
32339+ unsigned char stcclc, jmp2;
32340+ unsigned long addr1, addr2;
32341+
32342+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32343+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32344+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32345+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32346+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32347+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32348+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32349+
32350+ if (err)
32351+ break;
32352+
32353+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32354+ regs->r11 = addr1;
32355+ regs->r10 = addr2;
32356+ if (stcclc == 0xF8)
32357+ regs->flags &= ~X86_EFLAGS_CF;
32358+ else
32359+ regs->flags |= X86_EFLAGS_CF;
32360+ regs->ip = addr1;
32361+ return 2;
32362+ }
32363+ } while (0);
32364+
32365+ do { /* PaX: gcc trampoline emulation #1 */
32366+ unsigned short mov1, mov2, jmp1;
32367+ unsigned char jmp2;
32368+ unsigned int addr1;
32369+ unsigned long addr2;
32370+
32371+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32372+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32373+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32374+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32375+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32376+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32377+
32378+ if (err)
32379+ break;
32380+
32381+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32382+ regs->r11 = addr1;
32383+ regs->r10 = addr2;
32384+ regs->ip = addr1;
32385+ return 2;
32386+ }
32387+ } while (0);
32388+
32389+ do { /* PaX: gcc trampoline emulation #2 */
32390+ unsigned short mov1, mov2, jmp1;
32391+ unsigned char jmp2;
32392+ unsigned long addr1, addr2;
32393+
32394+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32395+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32396+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32397+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32398+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32399+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32400+
32401+ if (err)
32402+ break;
32403+
32404+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32405+ regs->r11 = addr1;
32406+ regs->r10 = addr2;
32407+ regs->ip = addr1;
32408+ return 2;
32409+ }
32410+ } while (0);
32411+
32412+ return 1; /* PaX in action */
32413+}
32414+#endif
32415+
32416+/*
32417+ * PaX: decide what to do with offenders (regs->ip = fault address)
32418+ *
32419+ * returns 1 when task should be killed
32420+ * 2 when gcc trampoline was detected
32421+ */
32422+static int pax_handle_fetch_fault(struct pt_regs *regs)
32423+{
32424+ if (v8086_mode(regs))
32425+ return 1;
32426+
32427+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32428+ return 1;
32429+
32430+#ifdef CONFIG_X86_32
32431+ return pax_handle_fetch_fault_32(regs);
32432+#else
32433+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32434+ return pax_handle_fetch_fault_32(regs);
32435+ else
32436+ return pax_handle_fetch_fault_64(regs);
32437+#endif
32438+}
32439+#endif
32440+
32441+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32442+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32443+{
32444+ long i;
32445+
32446+ printk(KERN_ERR "PAX: bytes at PC: ");
32447+ for (i = 0; i < 20; i++) {
32448+ unsigned char c;
32449+ if (get_user(c, (unsigned char __force_user *)pc+i))
32450+ printk(KERN_CONT "?? ");
32451+ else
32452+ printk(KERN_CONT "%02x ", c);
32453+ }
32454+ printk("\n");
32455+
32456+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32457+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32458+ unsigned long c;
32459+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32460+#ifdef CONFIG_X86_32
32461+ printk(KERN_CONT "???????? ");
32462+#else
32463+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32464+ printk(KERN_CONT "???????? ???????? ");
32465+ else
32466+ printk(KERN_CONT "???????????????? ");
32467+#endif
32468+ } else {
32469+#ifdef CONFIG_X86_64
32470+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32471+ printk(KERN_CONT "%08x ", (unsigned int)c);
32472+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32473+ } else
32474+#endif
32475+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32476+ }
32477+ }
32478+ printk("\n");
32479+}
32480+#endif
32481+
32482+/**
32483+ * probe_kernel_write(): safely attempt to write to a location
32484+ * @dst: address to write to
32485+ * @src: pointer to the data that shall be written
32486+ * @size: size of the data chunk
32487+ *
32488+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32489+ * happens, handle that and return -EFAULT.
32490+ */
32491+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32492+{
32493+ long ret;
32494+ mm_segment_t old_fs = get_fs();
32495+
32496+ set_fs(KERNEL_DS);
32497+ pagefault_disable();
32498+ pax_open_kernel();
32499+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32500+ pax_close_kernel();
32501+ pagefault_enable();
32502+ set_fs(old_fs);
32503+
32504+ return ret ? -EFAULT : 0;
32505+}
32506diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32507index 81bf3d2..7ef25c2 100644
32508--- a/arch/x86/mm/gup.c
32509+++ b/arch/x86/mm/gup.c
32510@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32511 addr = start;
32512 len = (unsigned long) nr_pages << PAGE_SHIFT;
32513 end = start + len;
32514- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32515+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32516 (void __user *)start, len)))
32517 return 0;
32518
32519@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32520 goto slow_irqon;
32521 #endif
32522
32523+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32524+ (void __user *)start, len)))
32525+ return 0;
32526+
32527 /*
32528 * XXX: batch / limit 'nr', to avoid large irq off latency
32529 * needs some instrumenting to determine the common sizes used by
32530diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32531index 4500142..53a363c 100644
32532--- a/arch/x86/mm/highmem_32.c
32533+++ b/arch/x86/mm/highmem_32.c
32534@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32535 idx = type + KM_TYPE_NR*smp_processor_id();
32536 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32537 BUG_ON(!pte_none(*(kmap_pte-idx)));
32538+
32539+ pax_open_kernel();
32540 set_pte(kmap_pte-idx, mk_pte(page, prot));
32541+ pax_close_kernel();
32542+
32543 arch_flush_lazy_mmu_mode();
32544
32545 return (void *)vaddr;
32546diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32547index 42982b2..7168fc3 100644
32548--- a/arch/x86/mm/hugetlbpage.c
32549+++ b/arch/x86/mm/hugetlbpage.c
32550@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32551 #ifdef CONFIG_HUGETLB_PAGE
32552 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32553 unsigned long addr, unsigned long len,
32554- unsigned long pgoff, unsigned long flags)
32555+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32556 {
32557 struct hstate *h = hstate_file(file);
32558 struct vm_unmapped_area_info info;
32559-
32560+
32561 info.flags = 0;
32562 info.length = len;
32563 info.low_limit = current->mm->mmap_legacy_base;
32564 info.high_limit = TASK_SIZE;
32565 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32566 info.align_offset = 0;
32567+ info.threadstack_offset = offset;
32568 return vm_unmapped_area(&info);
32569 }
32570
32571 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32572 unsigned long addr0, unsigned long len,
32573- unsigned long pgoff, unsigned long flags)
32574+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32575 {
32576 struct hstate *h = hstate_file(file);
32577 struct vm_unmapped_area_info info;
32578@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32579 info.high_limit = current->mm->mmap_base;
32580 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32581 info.align_offset = 0;
32582+ info.threadstack_offset = offset;
32583 addr = vm_unmapped_area(&info);
32584
32585 /*
32586@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32587 VM_BUG_ON(addr != -ENOMEM);
32588 info.flags = 0;
32589 info.low_limit = TASK_UNMAPPED_BASE;
32590+
32591+#ifdef CONFIG_PAX_RANDMMAP
32592+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32593+ info.low_limit += current->mm->delta_mmap;
32594+#endif
32595+
32596 info.high_limit = TASK_SIZE;
32597 addr = vm_unmapped_area(&info);
32598 }
32599@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32600 struct hstate *h = hstate_file(file);
32601 struct mm_struct *mm = current->mm;
32602 struct vm_area_struct *vma;
32603+ unsigned long pax_task_size = TASK_SIZE;
32604+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32605
32606 if (len & ~huge_page_mask(h))
32607 return -EINVAL;
32608- if (len > TASK_SIZE)
32609+
32610+#ifdef CONFIG_PAX_SEGMEXEC
32611+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32612+ pax_task_size = SEGMEXEC_TASK_SIZE;
32613+#endif
32614+
32615+ pax_task_size -= PAGE_SIZE;
32616+
32617+ if (len > pax_task_size)
32618 return -ENOMEM;
32619
32620 if (flags & MAP_FIXED) {
32621@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32622 return addr;
32623 }
32624
32625+#ifdef CONFIG_PAX_RANDMMAP
32626+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32627+#endif
32628+
32629 if (addr) {
32630 addr = ALIGN(addr, huge_page_size(h));
32631 vma = find_vma(mm, addr);
32632- if (TASK_SIZE - len >= addr &&
32633- (!vma || addr + len <= vma->vm_start))
32634+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32635 return addr;
32636 }
32637 if (mm->get_unmapped_area == arch_get_unmapped_area)
32638 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32639- pgoff, flags);
32640+ pgoff, flags, offset);
32641 else
32642 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32643- pgoff, flags);
32644+ pgoff, flags, offset);
32645 }
32646 #endif /* CONFIG_HUGETLB_PAGE */
32647
32648diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32649index a110efc..a31a18f 100644
32650--- a/arch/x86/mm/init.c
32651+++ b/arch/x86/mm/init.c
32652@@ -4,6 +4,7 @@
32653 #include <linux/swap.h>
32654 #include <linux/memblock.h>
32655 #include <linux/bootmem.h> /* for max_low_pfn */
32656+#include <linux/tboot.h>
32657
32658 #include <asm/cacheflush.h>
32659 #include <asm/e820.h>
32660@@ -17,6 +18,8 @@
32661 #include <asm/proto.h>
32662 #include <asm/dma.h> /* for MAX_DMA_PFN */
32663 #include <asm/microcode.h>
32664+#include <asm/desc.h>
32665+#include <asm/bios_ebda.h>
32666
32667 /*
32668 * We need to define the tracepoints somewhere, and tlb.c
32669@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32670 early_ioremap_page_table_range_init();
32671 #endif
32672
32673+#ifdef CONFIG_PAX_PER_CPU_PGD
32674+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32675+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32676+ KERNEL_PGD_PTRS);
32677+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32678+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32679+ KERNEL_PGD_PTRS);
32680+ load_cr3(get_cpu_pgd(0, kernel));
32681+#else
32682 load_cr3(swapper_pg_dir);
32683+#endif
32684+
32685 __flush_tlb_all();
32686
32687 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32688@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32689 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32690 * mmio resources as well as potential bios/acpi data regions.
32691 */
32692+
32693+#ifdef CONFIG_GRKERNSEC_KMEM
32694+static unsigned int ebda_start __read_only;
32695+static unsigned int ebda_end __read_only;
32696+#endif
32697+
32698 int devmem_is_allowed(unsigned long pagenr)
32699 {
32700- if (pagenr < 256)
32701+#ifdef CONFIG_GRKERNSEC_KMEM
32702+ /* allow BDA */
32703+ if (!pagenr)
32704 return 1;
32705+ /* allow EBDA */
32706+ if (pagenr >= ebda_start && pagenr < ebda_end)
32707+ return 1;
32708+ /* if tboot is in use, allow access to its hardcoded serial log range */
32709+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32710+ return 1;
32711+#else
32712+ if (!pagenr)
32713+ return 1;
32714+#ifdef CONFIG_VM86
32715+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32716+ return 1;
32717+#endif
32718+#endif
32719+
32720+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32721+ return 1;
32722+#ifdef CONFIG_GRKERNSEC_KMEM
32723+ /* throw out everything else below 1MB */
32724+ if (pagenr <= 256)
32725+ return 0;
32726+#endif
32727 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32728 return 0;
32729 if (!page_is_ram(pagenr))
32730@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32731 #endif
32732 }
32733
32734+#ifdef CONFIG_GRKERNSEC_KMEM
32735+static inline void gr_init_ebda(void)
32736+{
32737+ unsigned int ebda_addr;
32738+ unsigned int ebda_size = 0;
32739+
32740+ ebda_addr = get_bios_ebda();
32741+ if (ebda_addr) {
32742+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32743+ ebda_size <<= 10;
32744+ }
32745+ if (ebda_addr && ebda_size) {
32746+ ebda_start = ebda_addr >> PAGE_SHIFT;
32747+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32748+ } else {
32749+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32750+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32751+ }
32752+}
32753+#else
32754+static inline void gr_init_ebda(void) { }
32755+#endif
32756+
32757 void free_initmem(void)
32758 {
32759+#ifdef CONFIG_PAX_KERNEXEC
32760+#ifdef CONFIG_X86_32
32761+ /* PaX: limit KERNEL_CS to actual size */
32762+ unsigned long addr, limit;
32763+ struct desc_struct d;
32764+ int cpu;
32765+#else
32766+ pgd_t *pgd;
32767+ pud_t *pud;
32768+ pmd_t *pmd;
32769+ unsigned long addr, end;
32770+#endif
32771+#endif
32772+
32773+ gr_init_ebda();
32774+
32775+#ifdef CONFIG_PAX_KERNEXEC
32776+#ifdef CONFIG_X86_32
32777+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32778+ limit = (limit - 1UL) >> PAGE_SHIFT;
32779+
32780+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32781+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32782+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32783+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32784+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32785+ }
32786+
32787+ /* PaX: make KERNEL_CS read-only */
32788+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32789+ if (!paravirt_enabled())
32790+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32791+/*
32792+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32793+ pgd = pgd_offset_k(addr);
32794+ pud = pud_offset(pgd, addr);
32795+ pmd = pmd_offset(pud, addr);
32796+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32797+ }
32798+*/
32799+#ifdef CONFIG_X86_PAE
32800+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32801+/*
32802+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32803+ pgd = pgd_offset_k(addr);
32804+ pud = pud_offset(pgd, addr);
32805+ pmd = pmd_offset(pud, addr);
32806+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32807+ }
32808+*/
32809+#endif
32810+
32811+#ifdef CONFIG_MODULES
32812+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32813+#endif
32814+
32815+#else
32816+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32817+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32818+ pgd = pgd_offset_k(addr);
32819+ pud = pud_offset(pgd, addr);
32820+ pmd = pmd_offset(pud, addr);
32821+ if (!pmd_present(*pmd))
32822+ continue;
32823+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32824+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32825+ else
32826+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32827+ }
32828+
32829+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32830+ end = addr + KERNEL_IMAGE_SIZE;
32831+ for (; addr < end; addr += PMD_SIZE) {
32832+ pgd = pgd_offset_k(addr);
32833+ pud = pud_offset(pgd, addr);
32834+ pmd = pmd_offset(pud, addr);
32835+ if (!pmd_present(*pmd))
32836+ continue;
32837+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32838+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32839+ }
32840+#endif
32841+
32842+ flush_tlb_all();
32843+#endif
32844+
32845 free_init_pages("unused kernel",
32846 (unsigned long)(&__init_begin),
32847 (unsigned long)(&__init_end));
32848diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32849index c8140e1..59257fc 100644
32850--- a/arch/x86/mm/init_32.c
32851+++ b/arch/x86/mm/init_32.c
32852@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32853 bool __read_mostly __vmalloc_start_set = false;
32854
32855 /*
32856- * Creates a middle page table and puts a pointer to it in the
32857- * given global directory entry. This only returns the gd entry
32858- * in non-PAE compilation mode, since the middle layer is folded.
32859- */
32860-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32861-{
32862- pud_t *pud;
32863- pmd_t *pmd_table;
32864-
32865-#ifdef CONFIG_X86_PAE
32866- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32867- pmd_table = (pmd_t *)alloc_low_page();
32868- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32869- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32870- pud = pud_offset(pgd, 0);
32871- BUG_ON(pmd_table != pmd_offset(pud, 0));
32872-
32873- return pmd_table;
32874- }
32875-#endif
32876- pud = pud_offset(pgd, 0);
32877- pmd_table = pmd_offset(pud, 0);
32878-
32879- return pmd_table;
32880-}
32881-
32882-/*
32883 * Create a page table and place a pointer to it in a middle page
32884 * directory entry:
32885 */
32886@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32887 pte_t *page_table = (pte_t *)alloc_low_page();
32888
32889 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32890+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32891+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32892+#else
32893 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32894+#endif
32895 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32896 }
32897
32898 return pte_offset_kernel(pmd, 0);
32899 }
32900
32901+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32902+{
32903+ pud_t *pud;
32904+ pmd_t *pmd_table;
32905+
32906+ pud = pud_offset(pgd, 0);
32907+ pmd_table = pmd_offset(pud, 0);
32908+
32909+ return pmd_table;
32910+}
32911+
32912 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32913 {
32914 int pgd_idx = pgd_index(vaddr);
32915@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32916 int pgd_idx, pmd_idx;
32917 unsigned long vaddr;
32918 pgd_t *pgd;
32919+ pud_t *pud;
32920 pmd_t *pmd;
32921 pte_t *pte = NULL;
32922 unsigned long count = page_table_range_init_count(start, end);
32923@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32924 pgd = pgd_base + pgd_idx;
32925
32926 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32927- pmd = one_md_table_init(pgd);
32928- pmd = pmd + pmd_index(vaddr);
32929+ pud = pud_offset(pgd, vaddr);
32930+ pmd = pmd_offset(pud, vaddr);
32931+
32932+#ifdef CONFIG_X86_PAE
32933+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32934+#endif
32935+
32936 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32937 pmd++, pmd_idx++) {
32938 pte = page_table_kmap_check(one_page_table_init(pmd),
32939@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32940 }
32941 }
32942
32943-static inline int is_kernel_text(unsigned long addr)
32944+static inline int is_kernel_text(unsigned long start, unsigned long end)
32945 {
32946- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32947- return 1;
32948- return 0;
32949+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32950+ end <= ktla_ktva((unsigned long)_stext)) &&
32951+ (start >= ktla_ktva((unsigned long)_einittext) ||
32952+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32953+
32954+#ifdef CONFIG_ACPI_SLEEP
32955+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32956+#endif
32957+
32958+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32959+ return 0;
32960+ return 1;
32961 }
32962
32963 /*
32964@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32965 unsigned long last_map_addr = end;
32966 unsigned long start_pfn, end_pfn;
32967 pgd_t *pgd_base = swapper_pg_dir;
32968- int pgd_idx, pmd_idx, pte_ofs;
32969+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32970 unsigned long pfn;
32971 pgd_t *pgd;
32972+ pud_t *pud;
32973 pmd_t *pmd;
32974 pte_t *pte;
32975 unsigned pages_2m, pages_4k;
32976@@ -291,8 +295,13 @@ repeat:
32977 pfn = start_pfn;
32978 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32979 pgd = pgd_base + pgd_idx;
32980- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32981- pmd = one_md_table_init(pgd);
32982+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32983+ pud = pud_offset(pgd, 0);
32984+ pmd = pmd_offset(pud, 0);
32985+
32986+#ifdef CONFIG_X86_PAE
32987+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32988+#endif
32989
32990 if (pfn >= end_pfn)
32991 continue;
32992@@ -304,14 +313,13 @@ repeat:
32993 #endif
32994 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32995 pmd++, pmd_idx++) {
32996- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32997+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32998
32999 /*
33000 * Map with big pages if possible, otherwise
33001 * create normal page tables:
33002 */
33003 if (use_pse) {
33004- unsigned int addr2;
33005 pgprot_t prot = PAGE_KERNEL_LARGE;
33006 /*
33007 * first pass will use the same initial
33008@@ -322,11 +330,7 @@ repeat:
33009 _PAGE_PSE);
33010
33011 pfn &= PMD_MASK >> PAGE_SHIFT;
33012- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33013- PAGE_OFFSET + PAGE_SIZE-1;
33014-
33015- if (is_kernel_text(addr) ||
33016- is_kernel_text(addr2))
33017+ if (is_kernel_text(address, address + PMD_SIZE))
33018 prot = PAGE_KERNEL_LARGE_EXEC;
33019
33020 pages_2m++;
33021@@ -343,7 +347,7 @@ repeat:
33022 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33023 pte += pte_ofs;
33024 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33025- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33026+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33027 pgprot_t prot = PAGE_KERNEL;
33028 /*
33029 * first pass will use the same initial
33030@@ -351,7 +355,7 @@ repeat:
33031 */
33032 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33033
33034- if (is_kernel_text(addr))
33035+ if (is_kernel_text(address, address + PAGE_SIZE))
33036 prot = PAGE_KERNEL_EXEC;
33037
33038 pages_4k++;
33039@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33040
33041 pud = pud_offset(pgd, va);
33042 pmd = pmd_offset(pud, va);
33043- if (!pmd_present(*pmd))
33044+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33045 break;
33046
33047 /* should not be large page here */
33048@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33049
33050 static void __init pagetable_init(void)
33051 {
33052- pgd_t *pgd_base = swapper_pg_dir;
33053-
33054- permanent_kmaps_init(pgd_base);
33055+ permanent_kmaps_init(swapper_pg_dir);
33056 }
33057
33058-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33059+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33060 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33061
33062 /* user-defined highmem size */
33063@@ -787,10 +789,10 @@ void __init mem_init(void)
33064 ((unsigned long)&__init_end -
33065 (unsigned long)&__init_begin) >> 10,
33066
33067- (unsigned long)&_etext, (unsigned long)&_edata,
33068- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33069+ (unsigned long)&_sdata, (unsigned long)&_edata,
33070+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33071
33072- (unsigned long)&_text, (unsigned long)&_etext,
33073+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33074 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33075
33076 /*
33077@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33078 if (!kernel_set_to_readonly)
33079 return;
33080
33081+ start = ktla_ktva(start);
33082 pr_debug("Set kernel text: %lx - %lx for read write\n",
33083 start, start+size);
33084
33085@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33086 if (!kernel_set_to_readonly)
33087 return;
33088
33089+ start = ktla_ktva(start);
33090 pr_debug("Set kernel text: %lx - %lx for read only\n",
33091 start, start+size);
33092
33093@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33094 unsigned long start = PFN_ALIGN(_text);
33095 unsigned long size = PFN_ALIGN(_etext) - start;
33096
33097+ start = ktla_ktva(start);
33098 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33099 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33100 size >> 10);
33101diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33102index 30eb05a..ae671ac 100644
33103--- a/arch/x86/mm/init_64.c
33104+++ b/arch/x86/mm/init_64.c
33105@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33106 * around without checking the pgd every time.
33107 */
33108
33109-pteval_t __supported_pte_mask __read_mostly = ~0;
33110+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33111 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33112
33113 int force_personality32;
33114@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33115
33116 for (address = start; address <= end; address += PGDIR_SIZE) {
33117 const pgd_t *pgd_ref = pgd_offset_k(address);
33118+
33119+#ifdef CONFIG_PAX_PER_CPU_PGD
33120+ unsigned long cpu;
33121+#else
33122 struct page *page;
33123+#endif
33124
33125 /*
33126 * When it is called after memory hot remove, pgd_none()
33127@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33128 continue;
33129
33130 spin_lock(&pgd_lock);
33131+
33132+#ifdef CONFIG_PAX_PER_CPU_PGD
33133+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33134+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33135+
33136+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33137+ BUG_ON(pgd_page_vaddr(*pgd)
33138+ != pgd_page_vaddr(*pgd_ref));
33139+
33140+ if (removed) {
33141+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33142+ pgd_clear(pgd);
33143+ } else {
33144+ if (pgd_none(*pgd))
33145+ set_pgd(pgd, *pgd_ref);
33146+ }
33147+
33148+ pgd = pgd_offset_cpu(cpu, kernel, address);
33149+#else
33150 list_for_each_entry(page, &pgd_list, lru) {
33151 pgd_t *pgd;
33152 spinlock_t *pgt_lock;
33153@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33154 /* the pgt_lock only for Xen */
33155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33156 spin_lock(pgt_lock);
33157+#endif
33158
33159 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33160 BUG_ON(pgd_page_vaddr(*pgd)
33161@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33162 set_pgd(pgd, *pgd_ref);
33163 }
33164
33165+#ifndef CONFIG_PAX_PER_CPU_PGD
33166 spin_unlock(pgt_lock);
33167+#endif
33168+
33169 }
33170 spin_unlock(&pgd_lock);
33171 }
33172@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33173 {
33174 if (pgd_none(*pgd)) {
33175 pud_t *pud = (pud_t *)spp_getpage();
33176- pgd_populate(&init_mm, pgd, pud);
33177+ pgd_populate_kernel(&init_mm, pgd, pud);
33178 if (pud != pud_offset(pgd, 0))
33179 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33180 pud, pud_offset(pgd, 0));
33181@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33182 {
33183 if (pud_none(*pud)) {
33184 pmd_t *pmd = (pmd_t *) spp_getpage();
33185- pud_populate(&init_mm, pud, pmd);
33186+ pud_populate_kernel(&init_mm, pud, pmd);
33187 if (pmd != pmd_offset(pud, 0))
33188 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33189 pmd, pmd_offset(pud, 0));
33190@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33191 pmd = fill_pmd(pud, vaddr);
33192 pte = fill_pte(pmd, vaddr);
33193
33194+ pax_open_kernel();
33195 set_pte(pte, new_pte);
33196+ pax_close_kernel();
33197
33198 /*
33199 * It's enough to flush this one mapping.
33200@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33201 pgd = pgd_offset_k((unsigned long)__va(phys));
33202 if (pgd_none(*pgd)) {
33203 pud = (pud_t *) spp_getpage();
33204- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33205- _PAGE_USER));
33206+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33207 }
33208 pud = pud_offset(pgd, (unsigned long)__va(phys));
33209 if (pud_none(*pud)) {
33210 pmd = (pmd_t *) spp_getpage();
33211- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33212- _PAGE_USER));
33213+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33214 }
33215 pmd = pmd_offset(pud, phys);
33216 BUG_ON(!pmd_none(*pmd));
33217@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33218 prot);
33219
33220 spin_lock(&init_mm.page_table_lock);
33221- pud_populate(&init_mm, pud, pmd);
33222+ pud_populate_kernel(&init_mm, pud, pmd);
33223 spin_unlock(&init_mm.page_table_lock);
33224 }
33225 __flush_tlb_all();
33226@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33227 page_size_mask);
33228
33229 spin_lock(&init_mm.page_table_lock);
33230- pgd_populate(&init_mm, pgd, pud);
33231+ pgd_populate_kernel(&init_mm, pgd, pud);
33232 spin_unlock(&init_mm.page_table_lock);
33233 pgd_changed = true;
33234 }
33235diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33236index 9ca35fc..4b2b7b7 100644
33237--- a/arch/x86/mm/iomap_32.c
33238+++ b/arch/x86/mm/iomap_32.c
33239@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33240 type = kmap_atomic_idx_push();
33241 idx = type + KM_TYPE_NR * smp_processor_id();
33242 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33243+
33244+ pax_open_kernel();
33245 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33246+ pax_close_kernel();
33247+
33248 arch_flush_lazy_mmu_mode();
33249
33250 return (void *)vaddr;
33251diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33252index fdf617c..b9e85bc 100644
33253--- a/arch/x86/mm/ioremap.c
33254+++ b/arch/x86/mm/ioremap.c
33255@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33256 unsigned long i;
33257
33258 for (i = 0; i < nr_pages; ++i)
33259- if (pfn_valid(start_pfn + i) &&
33260- !PageReserved(pfn_to_page(start_pfn + i)))
33261+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33262+ !PageReserved(pfn_to_page(start_pfn + i))))
33263 return 1;
33264
33265 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33266@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33267 *
33268 * Caller must ensure there is only one unmapping for the same pointer.
33269 */
33270-void iounmap(volatile void __iomem *addr)
33271+void iounmap(const volatile void __iomem *addr)
33272 {
33273 struct vm_struct *p, *o;
33274
33275@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33276 */
33277 void *xlate_dev_mem_ptr(phys_addr_t phys)
33278 {
33279- void *addr;
33280- unsigned long start = phys & PAGE_MASK;
33281-
33282 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33283- if (page_is_ram(start >> PAGE_SHIFT))
33284+ if (page_is_ram(phys >> PAGE_SHIFT))
33285+#ifdef CONFIG_HIGHMEM
33286+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33287+#endif
33288 return __va(phys);
33289
33290- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33291- if (addr)
33292- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33293-
33294- return addr;
33295+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33296 }
33297
33298 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33299 {
33300 if (page_is_ram(phys >> PAGE_SHIFT))
33301+#ifdef CONFIG_HIGHMEM
33302+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33303+#endif
33304 return;
33305
33306 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33307 return;
33308 }
33309
33310-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33311+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33312
33313 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33314 {
33315@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33316 early_ioremap_setup();
33317
33318 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33319- memset(bm_pte, 0, sizeof(bm_pte));
33320- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33321+ pmd_populate_user(&init_mm, pmd, bm_pte);
33322
33323 /*
33324 * The boot-ioremap range spans multiple pmds, for which
33325diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33326index b4f2e7e..96c9c3e 100644
33327--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33328+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33329@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33330 * memory (e.g. tracked pages)? For now, we need this to avoid
33331 * invoking kmemcheck for PnP BIOS calls.
33332 */
33333- if (regs->flags & X86_VM_MASK)
33334+ if (v8086_mode(regs))
33335 return false;
33336- if (regs->cs != __KERNEL_CS)
33337+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33338 return false;
33339
33340 pte = kmemcheck_pte_lookup(address);
33341diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33342index df4552b..12c129c 100644
33343--- a/arch/x86/mm/mmap.c
33344+++ b/arch/x86/mm/mmap.c
33345@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33346 * Leave an at least ~128 MB hole with possible stack randomization.
33347 */
33348 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33349-#define MAX_GAP (TASK_SIZE/6*5)
33350+#define MAX_GAP (pax_task_size/6*5)
33351
33352 static int mmap_is_legacy(void)
33353 {
33354@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33355 return rnd << PAGE_SHIFT;
33356 }
33357
33358-static unsigned long mmap_base(void)
33359+static unsigned long mmap_base(struct mm_struct *mm)
33360 {
33361 unsigned long gap = rlimit(RLIMIT_STACK);
33362+ unsigned long pax_task_size = TASK_SIZE;
33363+
33364+#ifdef CONFIG_PAX_SEGMEXEC
33365+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33366+ pax_task_size = SEGMEXEC_TASK_SIZE;
33367+#endif
33368
33369 if (gap < MIN_GAP)
33370 gap = MIN_GAP;
33371 else if (gap > MAX_GAP)
33372 gap = MAX_GAP;
33373
33374- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33375+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33376 }
33377
33378 /*
33379 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33380 * does, but not when emulating X86_32
33381 */
33382-static unsigned long mmap_legacy_base(void)
33383+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33384 {
33385- if (mmap_is_ia32())
33386+ if (mmap_is_ia32()) {
33387+
33388+#ifdef CONFIG_PAX_SEGMEXEC
33389+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33390+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33391+ else
33392+#endif
33393+
33394 return TASK_UNMAPPED_BASE;
33395- else
33396+ } else
33397 return TASK_UNMAPPED_BASE + mmap_rnd();
33398 }
33399
33400@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33401 */
33402 void arch_pick_mmap_layout(struct mm_struct *mm)
33403 {
33404- mm->mmap_legacy_base = mmap_legacy_base();
33405- mm->mmap_base = mmap_base();
33406+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33407+ mm->mmap_base = mmap_base(mm);
33408+
33409+#ifdef CONFIG_PAX_RANDMMAP
33410+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33411+ mm->mmap_legacy_base += mm->delta_mmap;
33412+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33413+ }
33414+#endif
33415
33416 if (mmap_is_legacy()) {
33417 mm->mmap_base = mm->mmap_legacy_base;
33418diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33419index 0057a7a..95c7edd 100644
33420--- a/arch/x86/mm/mmio-mod.c
33421+++ b/arch/x86/mm/mmio-mod.c
33422@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33423 break;
33424 default:
33425 {
33426- unsigned char *ip = (unsigned char *)instptr;
33427+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33428 my_trace->opcode = MMIO_UNKNOWN_OP;
33429 my_trace->width = 0;
33430 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33431@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33432 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33433 void __iomem *addr)
33434 {
33435- static atomic_t next_id;
33436+ static atomic_unchecked_t next_id;
33437 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33438 /* These are page-unaligned. */
33439 struct mmiotrace_map map = {
33440@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33441 .private = trace
33442 },
33443 .phys = offset,
33444- .id = atomic_inc_return(&next_id)
33445+ .id = atomic_inc_return_unchecked(&next_id)
33446 };
33447 map.map_id = trace->id;
33448
33449@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33450 ioremap_trace_core(offset, size, addr);
33451 }
33452
33453-static void iounmap_trace_core(volatile void __iomem *addr)
33454+static void iounmap_trace_core(const volatile void __iomem *addr)
33455 {
33456 struct mmiotrace_map map = {
33457 .phys = 0,
33458@@ -328,7 +328,7 @@ not_enabled:
33459 }
33460 }
33461
33462-void mmiotrace_iounmap(volatile void __iomem *addr)
33463+void mmiotrace_iounmap(const volatile void __iomem *addr)
33464 {
33465 might_sleep();
33466 if (is_enabled()) /* recheck and proper locking in *_core() */
33467diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33468index cd4785b..25188b6 100644
33469--- a/arch/x86/mm/numa.c
33470+++ b/arch/x86/mm/numa.c
33471@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33472 }
33473 }
33474
33475-static int __init numa_register_memblks(struct numa_meminfo *mi)
33476+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33477 {
33478 unsigned long uninitialized_var(pfn_align);
33479 int i, nid;
33480diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33481index 536ea2f..f42c293 100644
33482--- a/arch/x86/mm/pageattr.c
33483+++ b/arch/x86/mm/pageattr.c
33484@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33485 */
33486 #ifdef CONFIG_PCI_BIOS
33487 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33488- pgprot_val(forbidden) |= _PAGE_NX;
33489+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33490 #endif
33491
33492 /*
33493@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33494 * Does not cover __inittext since that is gone later on. On
33495 * 64bit we do not enforce !NX on the low mapping
33496 */
33497- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33498- pgprot_val(forbidden) |= _PAGE_NX;
33499+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33500+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33501
33502+#ifdef CONFIG_DEBUG_RODATA
33503 /*
33504 * The .rodata section needs to be read-only. Using the pfn
33505 * catches all aliases.
33506@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33507 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33508 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33509 pgprot_val(forbidden) |= _PAGE_RW;
33510+#endif
33511
33512 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33513 /*
33514@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33515 }
33516 #endif
33517
33518+#ifdef CONFIG_PAX_KERNEXEC
33519+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33520+ pgprot_val(forbidden) |= _PAGE_RW;
33521+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33522+ }
33523+#endif
33524+
33525 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33526
33527 return prot;
33528@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33529 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33530 {
33531 /* change init_mm */
33532+ pax_open_kernel();
33533 set_pte_atomic(kpte, pte);
33534+
33535 #ifdef CONFIG_X86_32
33536 if (!SHARED_KERNEL_PMD) {
33537+
33538+#ifdef CONFIG_PAX_PER_CPU_PGD
33539+ unsigned long cpu;
33540+#else
33541 struct page *page;
33542+#endif
33543
33544+#ifdef CONFIG_PAX_PER_CPU_PGD
33545+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33546+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33547+#else
33548 list_for_each_entry(page, &pgd_list, lru) {
33549- pgd_t *pgd;
33550+ pgd_t *pgd = (pgd_t *)page_address(page);
33551+#endif
33552+
33553 pud_t *pud;
33554 pmd_t *pmd;
33555
33556- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33557+ pgd += pgd_index(address);
33558 pud = pud_offset(pgd, address);
33559 pmd = pmd_offset(pud, address);
33560 set_pte_atomic((pte_t *)pmd, pte);
33561 }
33562 }
33563 #endif
33564+ pax_close_kernel();
33565 }
33566
33567 static int
33568diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33569index 7ac6869..c0ba541 100644
33570--- a/arch/x86/mm/pat.c
33571+++ b/arch/x86/mm/pat.c
33572@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33573 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33574
33575 if (pg_flags == _PGMT_DEFAULT)
33576- return -1;
33577+ return _PAGE_CACHE_MODE_NUM;
33578 else if (pg_flags == _PGMT_WC)
33579 return _PAGE_CACHE_MODE_WC;
33580 else if (pg_flags == _PGMT_UC_MINUS)
33581@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33582
33583 page = pfn_to_page(pfn);
33584 type = get_page_memtype(page);
33585- if (type != -1) {
33586+ if (type != _PAGE_CACHE_MODE_NUM) {
33587 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33588 start, end - 1, type, req_type);
33589 if (new_type)
33590@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33591
33592 if (!entry) {
33593 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33594- current->comm, current->pid, start, end - 1);
33595+ current->comm, task_pid_nr(current), start, end - 1);
33596 return -EINVAL;
33597 }
33598
33599@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33600 page = pfn_to_page(paddr >> PAGE_SHIFT);
33601 rettype = get_page_memtype(page);
33602 /*
33603- * -1 from get_page_memtype() implies RAM page is in its
33604+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33605 * default state and not reserved, and hence of type WB
33606 */
33607- if (rettype == -1)
33608+ if (rettype == _PAGE_CACHE_MODE_NUM)
33609 rettype = _PAGE_CACHE_MODE_WB;
33610
33611 return rettype;
33612@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33613
33614 while (cursor < to) {
33615 if (!devmem_is_allowed(pfn)) {
33616- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33617- current->comm, from, to - 1);
33618+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33619+ current->comm, from, to - 1, cursor);
33620 return 0;
33621 }
33622 cursor += PAGE_SIZE;
33623@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33624 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33625 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33626 "for [mem %#010Lx-%#010Lx]\n",
33627- current->comm, current->pid,
33628+ current->comm, task_pid_nr(current),
33629 cattr_name(pcm),
33630 base, (unsigned long long)(base + size-1));
33631 return -EINVAL;
33632@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33633 pcm = lookup_memtype(paddr);
33634 if (want_pcm != pcm) {
33635 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33636- current->comm, current->pid,
33637+ current->comm, task_pid_nr(current),
33638 cattr_name(want_pcm),
33639 (unsigned long long)paddr,
33640 (unsigned long long)(paddr + size - 1),
33641@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33642 free_memtype(paddr, paddr + size);
33643 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33644 " for [mem %#010Lx-%#010Lx], got %s\n",
33645- current->comm, current->pid,
33646+ current->comm, task_pid_nr(current),
33647 cattr_name(want_pcm),
33648 (unsigned long long)paddr,
33649 (unsigned long long)(paddr + size - 1),
33650diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33651index 6582adc..fcc5d0b 100644
33652--- a/arch/x86/mm/pat_rbtree.c
33653+++ b/arch/x86/mm/pat_rbtree.c
33654@@ -161,7 +161,7 @@ success:
33655
33656 failure:
33657 printk(KERN_INFO "%s:%d conflicting memory types "
33658- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33659+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33660 end, cattr_name(found_type), cattr_name(match->type));
33661 return -EBUSY;
33662 }
33663diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33664index 9f0614d..92ae64a 100644
33665--- a/arch/x86/mm/pf_in.c
33666+++ b/arch/x86/mm/pf_in.c
33667@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33668 int i;
33669 enum reason_type rv = OTHERS;
33670
33671- p = (unsigned char *)ins_addr;
33672+ p = (unsigned char *)ktla_ktva(ins_addr);
33673 p += skip_prefix(p, &prf);
33674 p += get_opcode(p, &opcode);
33675
33676@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33677 struct prefix_bits prf;
33678 int i;
33679
33680- p = (unsigned char *)ins_addr;
33681+ p = (unsigned char *)ktla_ktva(ins_addr);
33682 p += skip_prefix(p, &prf);
33683 p += get_opcode(p, &opcode);
33684
33685@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33686 struct prefix_bits prf;
33687 int i;
33688
33689- p = (unsigned char *)ins_addr;
33690+ p = (unsigned char *)ktla_ktva(ins_addr);
33691 p += skip_prefix(p, &prf);
33692 p += get_opcode(p, &opcode);
33693
33694@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33695 struct prefix_bits prf;
33696 int i;
33697
33698- p = (unsigned char *)ins_addr;
33699+ p = (unsigned char *)ktla_ktva(ins_addr);
33700 p += skip_prefix(p, &prf);
33701 p += get_opcode(p, &opcode);
33702 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33703@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33704 struct prefix_bits prf;
33705 int i;
33706
33707- p = (unsigned char *)ins_addr;
33708+ p = (unsigned char *)ktla_ktva(ins_addr);
33709 p += skip_prefix(p, &prf);
33710 p += get_opcode(p, &opcode);
33711 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33712diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33713index 7b22ada..b11e66f 100644
33714--- a/arch/x86/mm/pgtable.c
33715+++ b/arch/x86/mm/pgtable.c
33716@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33717 list_del(&page->lru);
33718 }
33719
33720-#define UNSHARED_PTRS_PER_PGD \
33721- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33722+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33723+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33724
33725+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33726+{
33727+ unsigned int count = USER_PGD_PTRS;
33728
33729+ if (!pax_user_shadow_base)
33730+ return;
33731+
33732+ while (count--)
33733+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33734+}
33735+#endif
33736+
33737+#ifdef CONFIG_PAX_PER_CPU_PGD
33738+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33739+{
33740+ unsigned int count = USER_PGD_PTRS;
33741+
33742+ while (count--) {
33743+ pgd_t pgd;
33744+
33745+#ifdef CONFIG_X86_64
33746+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33747+#else
33748+ pgd = *src++;
33749+#endif
33750+
33751+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33752+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33753+#endif
33754+
33755+ *dst++ = pgd;
33756+ }
33757+
33758+}
33759+#endif
33760+
33761+#ifdef CONFIG_X86_64
33762+#define pxd_t pud_t
33763+#define pyd_t pgd_t
33764+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33765+#define pgtable_pxd_page_ctor(page) true
33766+#define pgtable_pxd_page_dtor(page) do {} while (0)
33767+#define pxd_free(mm, pud) pud_free((mm), (pud))
33768+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33769+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33770+#define PYD_SIZE PGDIR_SIZE
33771+#define mm_inc_nr_pxds(mm) do {} while (0)
33772+#define mm_dec_nr_pxds(mm) do {} while (0)
33773+#else
33774+#define pxd_t pmd_t
33775+#define pyd_t pud_t
33776+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33777+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33778+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33779+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33780+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33781+#define pyd_offset(mm, address) pud_offset((mm), (address))
33782+#define PYD_SIZE PUD_SIZE
33783+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33784+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33785+#endif
33786+
33787+#ifdef CONFIG_PAX_PER_CPU_PGD
33788+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33789+static inline void pgd_dtor(pgd_t *pgd) {}
33790+#else
33791 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33792 {
33793 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33794@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33795 pgd_list_del(pgd);
33796 spin_unlock(&pgd_lock);
33797 }
33798+#endif
33799
33800 /*
33801 * List of all pgd's needed for non-PAE so it can invalidate entries
33802@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33803 * -- nyc
33804 */
33805
33806-#ifdef CONFIG_X86_PAE
33807+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33808 /*
33809 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33810 * updating the top-level pagetable entries to guarantee the
33811@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33812 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33813 * and initialize the kernel pmds here.
33814 */
33815-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33816+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33817
33818 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33819 {
33820@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33821 */
33822 flush_tlb_mm(mm);
33823 }
33824+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33825+#define PREALLOCATED_PXDS USER_PGD_PTRS
33826 #else /* !CONFIG_X86_PAE */
33827
33828 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33829-#define PREALLOCATED_PMDS 0
33830+#define PREALLOCATED_PXDS 0
33831
33832 #endif /* CONFIG_X86_PAE */
33833
33834-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33835+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33836 {
33837 int i;
33838
33839- for(i = 0; i < PREALLOCATED_PMDS; i++)
33840- if (pmds[i]) {
33841- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33842- free_page((unsigned long)pmds[i]);
33843- mm_dec_nr_pmds(mm);
33844+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33845+ if (pxds[i]) {
33846+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33847+ free_page((unsigned long)pxds[i]);
33848+ mm_dec_nr_pxds(mm);
33849 }
33850 }
33851
33852-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33853+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33854 {
33855 int i;
33856 bool failed = false;
33857
33858- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33859- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33860- if (!pmd)
33861+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33862+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33863+ if (!pxd)
33864 failed = true;
33865- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33866- free_page((unsigned long)pmd);
33867- pmd = NULL;
33868+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33869+ free_page((unsigned long)pxd);
33870+ pxd = NULL;
33871 failed = true;
33872 }
33873- if (pmd)
33874- mm_inc_nr_pmds(mm);
33875- pmds[i] = pmd;
33876+ if (pxd)
33877+ mm_inc_nr_pxds(mm);
33878+ pxds[i] = pxd;
33879 }
33880
33881 if (failed) {
33882- free_pmds(mm, pmds);
33883+ free_pxds(mm, pxds);
33884 return -ENOMEM;
33885 }
33886
33887@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33888 * preallocate which never got a corresponding vma will need to be
33889 * freed manually.
33890 */
33891-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33892+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33893 {
33894 int i;
33895
33896- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33897+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33898 pgd_t pgd = pgdp[i];
33899
33900 if (pgd_val(pgd) != 0) {
33901- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33902+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33903
33904- pgdp[i] = native_make_pgd(0);
33905+ set_pgd(pgdp + i, native_make_pgd(0));
33906
33907- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33908- pmd_free(mm, pmd);
33909- mm_dec_nr_pmds(mm);
33910+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33911+ pxd_free(mm, pxd);
33912+ mm_dec_nr_pxds(mm);
33913 }
33914 }
33915 }
33916
33917-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33918+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33919 {
33920- pud_t *pud;
33921+ pyd_t *pyd;
33922 int i;
33923
33924- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33925+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33926 return;
33927
33928- pud = pud_offset(pgd, 0);
33929+#ifdef CONFIG_X86_64
33930+ pyd = pyd_offset(mm, 0L);
33931+#else
33932+ pyd = pyd_offset(pgd, 0L);
33933+#endif
33934
33935- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33936- pmd_t *pmd = pmds[i];
33937+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33938+ pxd_t *pxd = pxds[i];
33939
33940 if (i >= KERNEL_PGD_BOUNDARY)
33941- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33942- sizeof(pmd_t) * PTRS_PER_PMD);
33943+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33944+ sizeof(pxd_t) * PTRS_PER_PMD);
33945
33946- pud_populate(mm, pud, pmd);
33947+ pyd_populate(mm, pyd, pxd);
33948 }
33949 }
33950
33951 pgd_t *pgd_alloc(struct mm_struct *mm)
33952 {
33953 pgd_t *pgd;
33954- pmd_t *pmds[PREALLOCATED_PMDS];
33955+ pxd_t *pxds[PREALLOCATED_PXDS];
33956
33957 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33958
33959@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33960
33961 mm->pgd = pgd;
33962
33963- if (preallocate_pmds(mm, pmds) != 0)
33964+ if (preallocate_pxds(mm, pxds) != 0)
33965 goto out_free_pgd;
33966
33967 if (paravirt_pgd_alloc(mm) != 0)
33968- goto out_free_pmds;
33969+ goto out_free_pxds;
33970
33971 /*
33972 * Make sure that pre-populating the pmds is atomic with
33973@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33974 spin_lock(&pgd_lock);
33975
33976 pgd_ctor(mm, pgd);
33977- pgd_prepopulate_pmd(mm, pgd, pmds);
33978+ pgd_prepopulate_pxd(mm, pgd, pxds);
33979
33980 spin_unlock(&pgd_lock);
33981
33982 return pgd;
33983
33984-out_free_pmds:
33985- free_pmds(mm, pmds);
33986+out_free_pxds:
33987+ free_pxds(mm, pxds);
33988 out_free_pgd:
33989 free_page((unsigned long)pgd);
33990 out:
33991@@ -317,7 +389,7 @@ out:
33992
33993 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33994 {
33995- pgd_mop_up_pmds(mm, pgd);
33996+ pgd_mop_up_pxds(mm, pgd);
33997 pgd_dtor(pgd);
33998 paravirt_pgd_free(mm, pgd);
33999 free_page((unsigned long)pgd);
34000diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34001index 75cc097..79a097f 100644
34002--- a/arch/x86/mm/pgtable_32.c
34003+++ b/arch/x86/mm/pgtable_32.c
34004@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34005 return;
34006 }
34007 pte = pte_offset_kernel(pmd, vaddr);
34008+
34009+ pax_open_kernel();
34010 if (pte_val(pteval))
34011 set_pte_at(&init_mm, vaddr, pte, pteval);
34012 else
34013 pte_clear(&init_mm, vaddr, pte);
34014+ pax_close_kernel();
34015
34016 /*
34017 * It's enough to flush this one mapping.
34018diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34019index e666cbb..61788c45 100644
34020--- a/arch/x86/mm/physaddr.c
34021+++ b/arch/x86/mm/physaddr.c
34022@@ -10,7 +10,7 @@
34023 #ifdef CONFIG_X86_64
34024
34025 #ifdef CONFIG_DEBUG_VIRTUAL
34026-unsigned long __phys_addr(unsigned long x)
34027+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34028 {
34029 unsigned long y = x - __START_KERNEL_map;
34030
34031@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34032 #else
34033
34034 #ifdef CONFIG_DEBUG_VIRTUAL
34035-unsigned long __phys_addr(unsigned long x)
34036+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34037 {
34038 unsigned long phys_addr = x - PAGE_OFFSET;
34039 /* VMALLOC_* aren't constants */
34040diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34041index 90555bf..f5f1828 100644
34042--- a/arch/x86/mm/setup_nx.c
34043+++ b/arch/x86/mm/setup_nx.c
34044@@ -5,8 +5,10 @@
34045 #include <asm/pgtable.h>
34046 #include <asm/proto.h>
34047
34048+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34049 static int disable_nx;
34050
34051+#ifndef CONFIG_PAX_PAGEEXEC
34052 /*
34053 * noexec = on|off
34054 *
34055@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34056 return 0;
34057 }
34058 early_param("noexec", noexec_setup);
34059+#endif
34060+
34061+#endif
34062
34063 void x86_configure_nx(void)
34064 {
34065+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34066 if (cpu_has_nx && !disable_nx)
34067 __supported_pte_mask |= _PAGE_NX;
34068 else
34069+#endif
34070 __supported_pte_mask &= ~_PAGE_NX;
34071 }
34072
34073diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34074index 3250f23..7a97ba2 100644
34075--- a/arch/x86/mm/tlb.c
34076+++ b/arch/x86/mm/tlb.c
34077@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34078 BUG();
34079 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34080 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34081+
34082+#ifndef CONFIG_PAX_PER_CPU_PGD
34083 load_cr3(swapper_pg_dir);
34084+#endif
34085+
34086 /*
34087 * This gets called in the idle path where RCU
34088 * functions differently. Tracing normally
34089diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34090new file mode 100644
34091index 0000000..dace51c
34092--- /dev/null
34093+++ b/arch/x86/mm/uderef_64.c
34094@@ -0,0 +1,37 @@
34095+#include <linux/mm.h>
34096+#include <asm/pgtable.h>
34097+#include <asm/uaccess.h>
34098+
34099+#ifdef CONFIG_PAX_MEMORY_UDEREF
34100+/* PaX: due to the special call convention these functions must
34101+ * - remain leaf functions under all configurations,
34102+ * - never be called directly, only dereferenced from the wrappers.
34103+ */
34104+void __pax_open_userland(void)
34105+{
34106+ unsigned int cpu;
34107+
34108+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34109+ return;
34110+
34111+ cpu = raw_get_cpu();
34112+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34113+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34114+ raw_put_cpu_no_resched();
34115+}
34116+EXPORT_SYMBOL(__pax_open_userland);
34117+
34118+void __pax_close_userland(void)
34119+{
34120+ unsigned int cpu;
34121+
34122+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34123+ return;
34124+
34125+ cpu = raw_get_cpu();
34126+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34127+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34128+ raw_put_cpu_no_resched();
34129+}
34130+EXPORT_SYMBOL(__pax_close_userland);
34131+#endif
34132diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34133index 6440221..f84b5c7 100644
34134--- a/arch/x86/net/bpf_jit.S
34135+++ b/arch/x86/net/bpf_jit.S
34136@@ -9,6 +9,7 @@
34137 */
34138 #include <linux/linkage.h>
34139 #include <asm/dwarf2.h>
34140+#include <asm/alternative-asm.h>
34141
34142 /*
34143 * Calling convention :
34144@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34145 jle bpf_slow_path_word
34146 mov (SKBDATA,%rsi),%eax
34147 bswap %eax /* ntohl() */
34148+ pax_force_retaddr
34149 ret
34150
34151 sk_load_half:
34152@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34153 jle bpf_slow_path_half
34154 movzwl (SKBDATA,%rsi),%eax
34155 rol $8,%ax # ntohs()
34156+ pax_force_retaddr
34157 ret
34158
34159 sk_load_byte:
34160@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34161 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34162 jle bpf_slow_path_byte
34163 movzbl (SKBDATA,%rsi),%eax
34164+ pax_force_retaddr
34165 ret
34166
34167 /* rsi contains offset and can be scratched */
34168@@ -90,6 +94,7 @@ bpf_slow_path_word:
34169 js bpf_error
34170 mov - MAX_BPF_STACK + 32(%rbp),%eax
34171 bswap %eax
34172+ pax_force_retaddr
34173 ret
34174
34175 bpf_slow_path_half:
34176@@ -98,12 +103,14 @@ bpf_slow_path_half:
34177 mov - MAX_BPF_STACK + 32(%rbp),%ax
34178 rol $8,%ax
34179 movzwl %ax,%eax
34180+ pax_force_retaddr
34181 ret
34182
34183 bpf_slow_path_byte:
34184 bpf_slow_path_common(1)
34185 js bpf_error
34186 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34187+ pax_force_retaddr
34188 ret
34189
34190 #define sk_negative_common(SIZE) \
34191@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34192 sk_negative_common(4)
34193 mov (%rax), %eax
34194 bswap %eax
34195+ pax_force_retaddr
34196 ret
34197
34198 bpf_slow_path_half_neg:
34199@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34200 mov (%rax),%ax
34201 rol $8,%ax
34202 movzwl %ax,%eax
34203+ pax_force_retaddr
34204 ret
34205
34206 bpf_slow_path_byte_neg:
34207@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34208 .globl sk_load_byte_negative_offset
34209 sk_negative_common(1)
34210 movzbl (%rax), %eax
34211+ pax_force_retaddr
34212 ret
34213
34214 bpf_error:
34215@@ -156,4 +166,5 @@ bpf_error:
34216 mov - MAX_BPF_STACK + 16(%rbp),%r14
34217 mov - MAX_BPF_STACK + 24(%rbp),%r15
34218 leaveq
34219+ pax_force_retaddr
34220 ret
34221diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34222index 9875143..00f6656 100644
34223--- a/arch/x86/net/bpf_jit_comp.c
34224+++ b/arch/x86/net/bpf_jit_comp.c
34225@@ -13,7 +13,11 @@
34226 #include <linux/if_vlan.h>
34227 #include <asm/cacheflush.h>
34228
34229+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34230+int bpf_jit_enable __read_only;
34231+#else
34232 int bpf_jit_enable __read_mostly;
34233+#endif
34234
34235 /*
34236 * assembly code in arch/x86/net/bpf_jit.S
34237@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34238 static void jit_fill_hole(void *area, unsigned int size)
34239 {
34240 /* fill whole space with int3 instructions */
34241+ pax_open_kernel();
34242 memset(area, 0xcc, size);
34243+ pax_close_kernel();
34244 }
34245
34246 struct jit_context {
34247@@ -896,7 +902,9 @@ common_load:
34248 pr_err("bpf_jit_compile fatal error\n");
34249 return -EFAULT;
34250 }
34251+ pax_open_kernel();
34252 memcpy(image + proglen, temp, ilen);
34253+ pax_close_kernel();
34254 }
34255 proglen += ilen;
34256 addrs[i] = proglen;
34257@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34258
34259 if (image) {
34260 bpf_flush_icache(header, image + proglen);
34261- set_memory_ro((unsigned long)header, header->pages);
34262 prog->bpf_func = (void *)image;
34263 prog->jited = true;
34264 }
34265@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34266 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34267 struct bpf_binary_header *header = (void *)addr;
34268
34269- if (!fp->jited)
34270- goto free_filter;
34271+ if (fp->jited)
34272+ bpf_jit_binary_free(header);
34273
34274- set_memory_rw(addr, header->pages);
34275- bpf_jit_binary_free(header);
34276-
34277-free_filter:
34278 bpf_prog_unlock_free(fp);
34279 }
34280diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34281index 5d04be5..2beeaa2 100644
34282--- a/arch/x86/oprofile/backtrace.c
34283+++ b/arch/x86/oprofile/backtrace.c
34284@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34285 struct stack_frame_ia32 *fp;
34286 unsigned long bytes;
34287
34288- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34289+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34290 if (bytes != 0)
34291 return NULL;
34292
34293- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34294+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34295
34296 oprofile_add_trace(bufhead[0].return_address);
34297
34298@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34299 struct stack_frame bufhead[2];
34300 unsigned long bytes;
34301
34302- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34303+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34304 if (bytes != 0)
34305 return NULL;
34306
34307@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34308 {
34309 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34310
34311- if (!user_mode_vm(regs)) {
34312+ if (!user_mode(regs)) {
34313 unsigned long stack = kernel_stack_pointer(regs);
34314 if (depth)
34315 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34316diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34317index 1d2e639..f6ef82a 100644
34318--- a/arch/x86/oprofile/nmi_int.c
34319+++ b/arch/x86/oprofile/nmi_int.c
34320@@ -23,6 +23,7 @@
34321 #include <asm/nmi.h>
34322 #include <asm/msr.h>
34323 #include <asm/apic.h>
34324+#include <asm/pgtable.h>
34325
34326 #include "op_counter.h"
34327 #include "op_x86_model.h"
34328@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34329 if (ret)
34330 return ret;
34331
34332- if (!model->num_virt_counters)
34333- model->num_virt_counters = model->num_counters;
34334+ if (!model->num_virt_counters) {
34335+ pax_open_kernel();
34336+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34337+ pax_close_kernel();
34338+ }
34339
34340 mux_init(ops);
34341
34342diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34343index 50d86c0..7985318 100644
34344--- a/arch/x86/oprofile/op_model_amd.c
34345+++ b/arch/x86/oprofile/op_model_amd.c
34346@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34347 num_counters = AMD64_NUM_COUNTERS;
34348 }
34349
34350- op_amd_spec.num_counters = num_counters;
34351- op_amd_spec.num_controls = num_counters;
34352- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34353+ pax_open_kernel();
34354+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34355+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34356+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34357+ pax_close_kernel();
34358
34359 return 0;
34360 }
34361diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34362index d90528e..0127e2b 100644
34363--- a/arch/x86/oprofile/op_model_ppro.c
34364+++ b/arch/x86/oprofile/op_model_ppro.c
34365@@ -19,6 +19,7 @@
34366 #include <asm/msr.h>
34367 #include <asm/apic.h>
34368 #include <asm/nmi.h>
34369+#include <asm/pgtable.h>
34370
34371 #include "op_x86_model.h"
34372 #include "op_counter.h"
34373@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34374
34375 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34376
34377- op_arch_perfmon_spec.num_counters = num_counters;
34378- op_arch_perfmon_spec.num_controls = num_counters;
34379+ pax_open_kernel();
34380+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34381+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34382+ pax_close_kernel();
34383 }
34384
34385 static int arch_perfmon_init(struct oprofile_operations *ignore)
34386diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34387index 71e8a67..6a313bb 100644
34388--- a/arch/x86/oprofile/op_x86_model.h
34389+++ b/arch/x86/oprofile/op_x86_model.h
34390@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34391 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34392 struct op_msrs const * const msrs);
34393 #endif
34394-};
34395+} __do_const;
34396
34397 struct op_counter_config;
34398
34399diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34400index 852aa4c..71613f2 100644
34401--- a/arch/x86/pci/intel_mid_pci.c
34402+++ b/arch/x86/pci/intel_mid_pci.c
34403@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34404 pci_mmcfg_late_init();
34405 pcibios_enable_irq = intel_mid_pci_irq_enable;
34406 pcibios_disable_irq = intel_mid_pci_irq_disable;
34407- pci_root_ops = intel_mid_pci_ops;
34408+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34409 pci_soc_mode = 1;
34410 /* Continue with standard init */
34411 return 1;
34412diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34413index 5dc6ca5..25c03f5 100644
34414--- a/arch/x86/pci/irq.c
34415+++ b/arch/x86/pci/irq.c
34416@@ -51,7 +51,7 @@ struct irq_router {
34417 struct irq_router_handler {
34418 u16 vendor;
34419 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34420-};
34421+} __do_const;
34422
34423 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34424 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34425@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34426 return 0;
34427 }
34428
34429-static __initdata struct irq_router_handler pirq_routers[] = {
34430+static __initconst const struct irq_router_handler pirq_routers[] = {
34431 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34432 { PCI_VENDOR_ID_AL, ali_router_probe },
34433 { PCI_VENDOR_ID_ITE, ite_router_probe },
34434@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34435 static void __init pirq_find_router(struct irq_router *r)
34436 {
34437 struct irq_routing_table *rt = pirq_table;
34438- struct irq_router_handler *h;
34439+ const struct irq_router_handler *h;
34440
34441 #ifdef CONFIG_PCI_BIOS
34442 if (!rt->signature) {
34443@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34444 return 0;
34445 }
34446
34447-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34448+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34449 {
34450 .callback = fix_broken_hp_bios_irq9,
34451 .ident = "HP Pavilion N5400 Series Laptop",
34452diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34453index 9b83b90..4112152 100644
34454--- a/arch/x86/pci/pcbios.c
34455+++ b/arch/x86/pci/pcbios.c
34456@@ -79,7 +79,7 @@ union bios32 {
34457 static struct {
34458 unsigned long address;
34459 unsigned short segment;
34460-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34461+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34462
34463 /*
34464 * Returns the entry point for the given service, NULL on error
34465@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34466 unsigned long length; /* %ecx */
34467 unsigned long entry; /* %edx */
34468 unsigned long flags;
34469+ struct desc_struct d, *gdt;
34470
34471 local_irq_save(flags);
34472- __asm__("lcall *(%%edi); cld"
34473+
34474+ gdt = get_cpu_gdt_table(smp_processor_id());
34475+
34476+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34477+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34478+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34479+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34480+
34481+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34482 : "=a" (return_code),
34483 "=b" (address),
34484 "=c" (length),
34485 "=d" (entry)
34486 : "0" (service),
34487 "1" (0),
34488- "D" (&bios32_indirect));
34489+ "D" (&bios32_indirect),
34490+ "r"(__PCIBIOS_DS)
34491+ : "memory");
34492+
34493+ pax_open_kernel();
34494+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34495+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34496+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34497+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34498+ pax_close_kernel();
34499+
34500 local_irq_restore(flags);
34501
34502 switch (return_code) {
34503- case 0:
34504- return address + entry;
34505- case 0x80: /* Not present */
34506- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34507- return 0;
34508- default: /* Shouldn't happen */
34509- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34510- service, return_code);
34511+ case 0: {
34512+ int cpu;
34513+ unsigned char flags;
34514+
34515+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34516+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34517+ printk(KERN_WARNING "bios32_service: not valid\n");
34518 return 0;
34519+ }
34520+ address = address + PAGE_OFFSET;
34521+ length += 16UL; /* some BIOSs underreport this... */
34522+ flags = 4;
34523+ if (length >= 64*1024*1024) {
34524+ length >>= PAGE_SHIFT;
34525+ flags |= 8;
34526+ }
34527+
34528+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34529+ gdt = get_cpu_gdt_table(cpu);
34530+ pack_descriptor(&d, address, length, 0x9b, flags);
34531+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34532+ pack_descriptor(&d, address, length, 0x93, flags);
34533+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34534+ }
34535+ return entry;
34536+ }
34537+ case 0x80: /* Not present */
34538+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34539+ return 0;
34540+ default: /* Shouldn't happen */
34541+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34542+ service, return_code);
34543+ return 0;
34544 }
34545 }
34546
34547 static struct {
34548 unsigned long address;
34549 unsigned short segment;
34550-} pci_indirect = { 0, __KERNEL_CS };
34551+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34552
34553-static int pci_bios_present;
34554+static int pci_bios_present __read_only;
34555
34556 static int __init check_pcibios(void)
34557 {
34558@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34559 unsigned long flags, pcibios_entry;
34560
34561 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34562- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34563+ pci_indirect.address = pcibios_entry;
34564
34565 local_irq_save(flags);
34566- __asm__(
34567- "lcall *(%%edi); cld\n\t"
34568+ __asm__("movw %w6, %%ds\n\t"
34569+ "lcall *%%ss:(%%edi); cld\n\t"
34570+ "push %%ss\n\t"
34571+ "pop %%ds\n\t"
34572 "jc 1f\n\t"
34573 "xor %%ah, %%ah\n"
34574 "1:"
34575@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34576 "=b" (ebx),
34577 "=c" (ecx)
34578 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34579- "D" (&pci_indirect)
34580+ "D" (&pci_indirect),
34581+ "r" (__PCIBIOS_DS)
34582 : "memory");
34583 local_irq_restore(flags);
34584
34585@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34586
34587 switch (len) {
34588 case 1:
34589- __asm__("lcall *(%%esi); cld\n\t"
34590+ __asm__("movw %w6, %%ds\n\t"
34591+ "lcall *%%ss:(%%esi); cld\n\t"
34592+ "push %%ss\n\t"
34593+ "pop %%ds\n\t"
34594 "jc 1f\n\t"
34595 "xor %%ah, %%ah\n"
34596 "1:"
34597@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34598 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34599 "b" (bx),
34600 "D" ((long)reg),
34601- "S" (&pci_indirect));
34602+ "S" (&pci_indirect),
34603+ "r" (__PCIBIOS_DS));
34604 /*
34605 * Zero-extend the result beyond 8 bits, do not trust the
34606 * BIOS having done it:
34607@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34608 *value &= 0xff;
34609 break;
34610 case 2:
34611- __asm__("lcall *(%%esi); cld\n\t"
34612+ __asm__("movw %w6, %%ds\n\t"
34613+ "lcall *%%ss:(%%esi); cld\n\t"
34614+ "push %%ss\n\t"
34615+ "pop %%ds\n\t"
34616 "jc 1f\n\t"
34617 "xor %%ah, %%ah\n"
34618 "1:"
34619@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34620 : "1" (PCIBIOS_READ_CONFIG_WORD),
34621 "b" (bx),
34622 "D" ((long)reg),
34623- "S" (&pci_indirect));
34624+ "S" (&pci_indirect),
34625+ "r" (__PCIBIOS_DS));
34626 /*
34627 * Zero-extend the result beyond 16 bits, do not trust the
34628 * BIOS having done it:
34629@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34630 *value &= 0xffff;
34631 break;
34632 case 4:
34633- __asm__("lcall *(%%esi); cld\n\t"
34634+ __asm__("movw %w6, %%ds\n\t"
34635+ "lcall *%%ss:(%%esi); cld\n\t"
34636+ "push %%ss\n\t"
34637+ "pop %%ds\n\t"
34638 "jc 1f\n\t"
34639 "xor %%ah, %%ah\n"
34640 "1:"
34641@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34642 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34643 "b" (bx),
34644 "D" ((long)reg),
34645- "S" (&pci_indirect));
34646+ "S" (&pci_indirect),
34647+ "r" (__PCIBIOS_DS));
34648 break;
34649 }
34650
34651@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34652
34653 switch (len) {
34654 case 1:
34655- __asm__("lcall *(%%esi); cld\n\t"
34656+ __asm__("movw %w6, %%ds\n\t"
34657+ "lcall *%%ss:(%%esi); cld\n\t"
34658+ "push %%ss\n\t"
34659+ "pop %%ds\n\t"
34660 "jc 1f\n\t"
34661 "xor %%ah, %%ah\n"
34662 "1:"
34663@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34664 "c" (value),
34665 "b" (bx),
34666 "D" ((long)reg),
34667- "S" (&pci_indirect));
34668+ "S" (&pci_indirect),
34669+ "r" (__PCIBIOS_DS));
34670 break;
34671 case 2:
34672- __asm__("lcall *(%%esi); cld\n\t"
34673+ __asm__("movw %w6, %%ds\n\t"
34674+ "lcall *%%ss:(%%esi); cld\n\t"
34675+ "push %%ss\n\t"
34676+ "pop %%ds\n\t"
34677 "jc 1f\n\t"
34678 "xor %%ah, %%ah\n"
34679 "1:"
34680@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34681 "c" (value),
34682 "b" (bx),
34683 "D" ((long)reg),
34684- "S" (&pci_indirect));
34685+ "S" (&pci_indirect),
34686+ "r" (__PCIBIOS_DS));
34687 break;
34688 case 4:
34689- __asm__("lcall *(%%esi); cld\n\t"
34690+ __asm__("movw %w6, %%ds\n\t"
34691+ "lcall *%%ss:(%%esi); cld\n\t"
34692+ "push %%ss\n\t"
34693+ "pop %%ds\n\t"
34694 "jc 1f\n\t"
34695 "xor %%ah, %%ah\n"
34696 "1:"
34697@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34698 "c" (value),
34699 "b" (bx),
34700 "D" ((long)reg),
34701- "S" (&pci_indirect));
34702+ "S" (&pci_indirect),
34703+ "r" (__PCIBIOS_DS));
34704 break;
34705 }
34706
34707@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34708
34709 DBG("PCI: Fetching IRQ routing table... ");
34710 __asm__("push %%es\n\t"
34711+ "movw %w8, %%ds\n\t"
34712 "push %%ds\n\t"
34713 "pop %%es\n\t"
34714- "lcall *(%%esi); cld\n\t"
34715+ "lcall *%%ss:(%%esi); cld\n\t"
34716 "pop %%es\n\t"
34717+ "push %%ss\n\t"
34718+ "pop %%ds\n"
34719 "jc 1f\n\t"
34720 "xor %%ah, %%ah\n"
34721 "1:"
34722@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34723 "1" (0),
34724 "D" ((long) &opt),
34725 "S" (&pci_indirect),
34726- "m" (opt)
34727+ "m" (opt),
34728+ "r" (__PCIBIOS_DS)
34729 : "memory");
34730 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34731 if (ret & 0xff00)
34732@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34733 {
34734 int ret;
34735
34736- __asm__("lcall *(%%esi); cld\n\t"
34737+ __asm__("movw %w5, %%ds\n\t"
34738+ "lcall *%%ss:(%%esi); cld\n\t"
34739+ "push %%ss\n\t"
34740+ "pop %%ds\n"
34741 "jc 1f\n\t"
34742 "xor %%ah, %%ah\n"
34743 "1:"
34744@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34745 : "0" (PCIBIOS_SET_PCI_HW_INT),
34746 "b" ((dev->bus->number << 8) | dev->devfn),
34747 "c" ((irq << 8) | (pin + 10)),
34748- "S" (&pci_indirect));
34749+ "S" (&pci_indirect),
34750+ "r" (__PCIBIOS_DS));
34751 return !(ret & 0xff00);
34752 }
34753 EXPORT_SYMBOL(pcibios_set_irq_routing);
34754diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34755index 40e7cda..c7e6672 100644
34756--- a/arch/x86/platform/efi/efi_32.c
34757+++ b/arch/x86/platform/efi/efi_32.c
34758@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34759 {
34760 struct desc_ptr gdt_descr;
34761
34762+#ifdef CONFIG_PAX_KERNEXEC
34763+ struct desc_struct d;
34764+#endif
34765+
34766 local_irq_save(efi_rt_eflags);
34767
34768 load_cr3(initial_page_table);
34769 __flush_tlb_all();
34770
34771+#ifdef CONFIG_PAX_KERNEXEC
34772+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34773+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34774+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34775+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34776+#endif
34777+
34778 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34779 gdt_descr.size = GDT_SIZE - 1;
34780 load_gdt(&gdt_descr);
34781@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34782 {
34783 struct desc_ptr gdt_descr;
34784
34785+#ifdef CONFIG_PAX_KERNEXEC
34786+ struct desc_struct d;
34787+
34788+ memset(&d, 0, sizeof d);
34789+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34790+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34791+#endif
34792+
34793 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34794 gdt_descr.size = GDT_SIZE - 1;
34795 load_gdt(&gdt_descr);
34796
34797+#ifdef CONFIG_PAX_PER_CPU_PGD
34798+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34799+#else
34800 load_cr3(swapper_pg_dir);
34801+#endif
34802+
34803 __flush_tlb_all();
34804
34805 local_irq_restore(efi_rt_eflags);
34806diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34807index 17e80d8..9fa6e41 100644
34808--- a/arch/x86/platform/efi/efi_64.c
34809+++ b/arch/x86/platform/efi/efi_64.c
34810@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34811 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34812 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34813 }
34814+
34815+#ifdef CONFIG_PAX_PER_CPU_PGD
34816+ load_cr3(swapper_pg_dir);
34817+#endif
34818+
34819 __flush_tlb_all();
34820 }
34821
34822@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34823 for (pgd = 0; pgd < n_pgds; pgd++)
34824 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34825 kfree(save_pgd);
34826+
34827+#ifdef CONFIG_PAX_PER_CPU_PGD
34828+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34829+#endif
34830+
34831 __flush_tlb_all();
34832 local_irq_restore(efi_flags);
34833 early_code_mapping_set_exec(0);
34834@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34835 unsigned npages;
34836 pgd_t *pgd;
34837
34838- if (efi_enabled(EFI_OLD_MEMMAP))
34839+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34840+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34841+ * able to execute the EFI services.
34842+ */
34843+ if (__supported_pte_mask & _PAGE_NX) {
34844+ unsigned long addr = (unsigned long) __va(0);
34845+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34846+
34847+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34848+#ifdef CONFIG_PAX_PER_CPU_PGD
34849+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34850+#endif
34851+ set_pgd(pgd_offset_k(addr), pe);
34852+ }
34853+
34854 return 0;
34855+ }
34856
34857 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34858 pgd = __va(efi_scratch.efi_pgt);
34859diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34860index 040192b..7d3300f 100644
34861--- a/arch/x86/platform/efi/efi_stub_32.S
34862+++ b/arch/x86/platform/efi/efi_stub_32.S
34863@@ -6,7 +6,9 @@
34864 */
34865
34866 #include <linux/linkage.h>
34867+#include <linux/init.h>
34868 #include <asm/page_types.h>
34869+#include <asm/segment.h>
34870
34871 /*
34872 * efi_call_phys(void *, ...) is a function with variable parameters.
34873@@ -20,7 +22,7 @@
34874 * service functions will comply with gcc calling convention, too.
34875 */
34876
34877-.text
34878+__INIT
34879 ENTRY(efi_call_phys)
34880 /*
34881 * 0. The function can only be called in Linux kernel. So CS has been
34882@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34883 * The mapping of lower virtual memory has been created in prolog and
34884 * epilog.
34885 */
34886- movl $1f, %edx
34887- subl $__PAGE_OFFSET, %edx
34888- jmp *%edx
34889+#ifdef CONFIG_PAX_KERNEXEC
34890+ movl $(__KERNEXEC_EFI_DS), %edx
34891+ mov %edx, %ds
34892+ mov %edx, %es
34893+ mov %edx, %ss
34894+ addl $2f,(1f)
34895+ ljmp *(1f)
34896+
34897+__INITDATA
34898+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34899+.previous
34900+
34901+2:
34902+ subl $2b,(1b)
34903+#else
34904+ jmp 1f-__PAGE_OFFSET
34905 1:
34906+#endif
34907
34908 /*
34909 * 2. Now on the top of stack is the return
34910@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34911 * parameter 2, ..., param n. To make things easy, we save the return
34912 * address of efi_call_phys in a global variable.
34913 */
34914- popl %edx
34915- movl %edx, saved_return_addr
34916- /* get the function pointer into ECX*/
34917- popl %ecx
34918- movl %ecx, efi_rt_function_ptr
34919- movl $2f, %edx
34920- subl $__PAGE_OFFSET, %edx
34921- pushl %edx
34922+ popl (saved_return_addr)
34923+ popl (efi_rt_function_ptr)
34924
34925 /*
34926 * 3. Clear PG bit in %CR0.
34927@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34928 /*
34929 * 5. Call the physical function.
34930 */
34931- jmp *%ecx
34932+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34933
34934-2:
34935 /*
34936 * 6. After EFI runtime service returns, control will return to
34937 * following instruction. We'd better readjust stack pointer first.
34938@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34939 movl %cr0, %edx
34940 orl $0x80000000, %edx
34941 movl %edx, %cr0
34942- jmp 1f
34943-1:
34944+
34945 /*
34946 * 8. Now restore the virtual mode from flat mode by
34947 * adding EIP with PAGE_OFFSET.
34948 */
34949- movl $1f, %edx
34950- jmp *%edx
34951+#ifdef CONFIG_PAX_KERNEXEC
34952+ movl $(__KERNEL_DS), %edx
34953+ mov %edx, %ds
34954+ mov %edx, %es
34955+ mov %edx, %ss
34956+ ljmp $(__KERNEL_CS),$1f
34957+#else
34958+ jmp 1f+__PAGE_OFFSET
34959+#endif
34960 1:
34961
34962 /*
34963 * 9. Balance the stack. And because EAX contain the return value,
34964 * we'd better not clobber it.
34965 */
34966- leal efi_rt_function_ptr, %edx
34967- movl (%edx), %ecx
34968- pushl %ecx
34969+ pushl (efi_rt_function_ptr)
34970
34971 /*
34972- * 10. Push the saved return address onto the stack and return.
34973+ * 10. Return to the saved return address.
34974 */
34975- leal saved_return_addr, %edx
34976- movl (%edx), %ecx
34977- pushl %ecx
34978- ret
34979+ jmpl *(saved_return_addr)
34980 ENDPROC(efi_call_phys)
34981 .previous
34982
34983-.data
34984+__INITDATA
34985 saved_return_addr:
34986 .long 0
34987 efi_rt_function_ptr:
34988diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34989index 86d0f9e..6d499f4 100644
34990--- a/arch/x86/platform/efi/efi_stub_64.S
34991+++ b/arch/x86/platform/efi/efi_stub_64.S
34992@@ -11,6 +11,7 @@
34993 #include <asm/msr.h>
34994 #include <asm/processor-flags.h>
34995 #include <asm/page_types.h>
34996+#include <asm/alternative-asm.h>
34997
34998 #define SAVE_XMM \
34999 mov %rsp, %rax; \
35000@@ -88,6 +89,7 @@ ENTRY(efi_call)
35001 RESTORE_PGT
35002 addq $48, %rsp
35003 RESTORE_XMM
35004+ pax_force_retaddr 0, 1
35005 ret
35006 ENDPROC(efi_call)
35007
35008diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35009index 3005f0c..d06aeb0 100644
35010--- a/arch/x86/platform/intel-mid/intel-mid.c
35011+++ b/arch/x86/platform/intel-mid/intel-mid.c
35012@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35013 /* intel_mid_ops to store sub arch ops */
35014 struct intel_mid_ops *intel_mid_ops;
35015 /* getter function for sub arch ops*/
35016-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35017+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35018 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35019 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35020
35021@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35022 {
35023 };
35024
35025-static void intel_mid_reboot(void)
35026+static void __noreturn intel_mid_reboot(void)
35027 {
35028 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35029+ BUG();
35030 }
35031
35032 static unsigned long __init intel_mid_calibrate_tsc(void)
35033diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35034index 3c1c386..59a68ed 100644
35035--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35036+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35037@@ -13,6 +13,6 @@
35038 /* For every CPU addition a new get_<cpuname>_ops interface needs
35039 * to be added.
35040 */
35041-extern void *get_penwell_ops(void);
35042-extern void *get_cloverview_ops(void);
35043-extern void *get_tangier_ops(void);
35044+extern const void *get_penwell_ops(void);
35045+extern const void *get_cloverview_ops(void);
35046+extern const void *get_tangier_ops(void);
35047diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35048index 23381d2..8ddc10e 100644
35049--- a/arch/x86/platform/intel-mid/mfld.c
35050+++ b/arch/x86/platform/intel-mid/mfld.c
35051@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35052 pm_power_off = mfld_power_off;
35053 }
35054
35055-void *get_penwell_ops(void)
35056+const void *get_penwell_ops(void)
35057 {
35058 return &penwell_ops;
35059 }
35060
35061-void *get_cloverview_ops(void)
35062+const void *get_cloverview_ops(void)
35063 {
35064 return &penwell_ops;
35065 }
35066diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35067index aaca917..66eadbc 100644
35068--- a/arch/x86/platform/intel-mid/mrfl.c
35069+++ b/arch/x86/platform/intel-mid/mrfl.c
35070@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35071 .arch_setup = tangier_arch_setup,
35072 };
35073
35074-void *get_tangier_ops(void)
35075+const void *get_tangier_ops(void)
35076 {
35077 return &tangier_ops;
35078 }
35079diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35080index c9a0838..fae0977 100644
35081--- a/arch/x86/platform/intel-quark/imr_selftest.c
35082+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35083@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35084 */
35085 static void __init imr_self_test(void)
35086 {
35087- phys_addr_t base = virt_to_phys(&_text);
35088+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35089 size_t size = virt_to_phys(&__end_rodata) - base;
35090 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35091 int ret;
35092diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35093index d6ee929..3637cb5 100644
35094--- a/arch/x86/platform/olpc/olpc_dt.c
35095+++ b/arch/x86/platform/olpc/olpc_dt.c
35096@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35097 return res;
35098 }
35099
35100-static struct of_pdt_ops prom_olpc_ops __initdata = {
35101+static struct of_pdt_ops prom_olpc_ops __initconst = {
35102 .nextprop = olpc_dt_nextprop,
35103 .getproplen = olpc_dt_getproplen,
35104 .getproperty = olpc_dt_getproperty,
35105diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35106index 3e32ed5..cc0adc5 100644
35107--- a/arch/x86/power/cpu.c
35108+++ b/arch/x86/power/cpu.c
35109@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35110 static void fix_processor_context(void)
35111 {
35112 int cpu = smp_processor_id();
35113- struct tss_struct *t = &per_cpu(init_tss, cpu);
35114-#ifdef CONFIG_X86_64
35115- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35116- tss_desc tss;
35117-#endif
35118+ struct tss_struct *t = init_tss + cpu;
35119+
35120 set_tss_desc(cpu, t); /*
35121 * This just modifies memory; should not be
35122 * necessary. But... This is necessary, because
35123@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35124 */
35125
35126 #ifdef CONFIG_X86_64
35127- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35128- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35129- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35130-
35131 syscall_init(); /* This sets MSR_*STAR and related */
35132 #endif
35133 load_TR_desc(); /* This does ltr */
35134diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35135index 0b7a63d..0d0f2c2 100644
35136--- a/arch/x86/realmode/init.c
35137+++ b/arch/x86/realmode/init.c
35138@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35139 __va(real_mode_header->trampoline_header);
35140
35141 #ifdef CONFIG_X86_32
35142- trampoline_header->start = __pa_symbol(startup_32_smp);
35143+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35144+
35145+#ifdef CONFIG_PAX_KERNEXEC
35146+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35147+#endif
35148+
35149+ trampoline_header->boot_cs = __BOOT_CS;
35150 trampoline_header->gdt_limit = __BOOT_DS + 7;
35151 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35152 #else
35153@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35154 *trampoline_cr4_features = __read_cr4();
35155
35156 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35157- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35158+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35159 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35160 #endif
35161 }
35162diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35163index 2730d77..2e4cd19 100644
35164--- a/arch/x86/realmode/rm/Makefile
35165+++ b/arch/x86/realmode/rm/Makefile
35166@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35167
35168 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35169 -I$(srctree)/arch/x86/boot
35170+ifdef CONSTIFY_PLUGIN
35171+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35172+endif
35173 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35174 GCOV_PROFILE := n
35175diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35176index a28221d..93c40f1 100644
35177--- a/arch/x86/realmode/rm/header.S
35178+++ b/arch/x86/realmode/rm/header.S
35179@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35180 #endif
35181 /* APM/BIOS reboot */
35182 .long pa_machine_real_restart_asm
35183-#ifdef CONFIG_X86_64
35184+#ifdef CONFIG_X86_32
35185+ .long __KERNEL_CS
35186+#else
35187 .long __KERNEL32_CS
35188 #endif
35189 END(real_mode_header)
35190diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35191index 48ddd76..c26749f 100644
35192--- a/arch/x86/realmode/rm/trampoline_32.S
35193+++ b/arch/x86/realmode/rm/trampoline_32.S
35194@@ -24,6 +24,12 @@
35195 #include <asm/page_types.h>
35196 #include "realmode.h"
35197
35198+#ifdef CONFIG_PAX_KERNEXEC
35199+#define ta(X) (X)
35200+#else
35201+#define ta(X) (pa_ ## X)
35202+#endif
35203+
35204 .text
35205 .code16
35206
35207@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35208
35209 cli # We should be safe anyway
35210
35211- movl tr_start, %eax # where we need to go
35212-
35213 movl $0xA5A5A5A5, trampoline_status
35214 # write marker for master knows we're running
35215
35216@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35217 movw $1, %dx # protected mode (PE) bit
35218 lmsw %dx # into protected mode
35219
35220- ljmpl $__BOOT_CS, $pa_startup_32
35221+ ljmpl *(trampoline_header)
35222
35223 .section ".text32","ax"
35224 .code32
35225@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35226 .balign 8
35227 GLOBAL(trampoline_header)
35228 tr_start: .space 4
35229- tr_gdt_pad: .space 2
35230+ tr_boot_cs: .space 2
35231 tr_gdt: .space 6
35232 END(trampoline_header)
35233
35234diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35235index dac7b20..72dbaca 100644
35236--- a/arch/x86/realmode/rm/trampoline_64.S
35237+++ b/arch/x86/realmode/rm/trampoline_64.S
35238@@ -93,6 +93,7 @@ ENTRY(startup_32)
35239 movl %edx, %gs
35240
35241 movl pa_tr_cr4, %eax
35242+ andl $~X86_CR4_PCIDE, %eax
35243 movl %eax, %cr4 # Enable PAE mode
35244
35245 # Setup trampoline 4 level pagetables
35246@@ -106,7 +107,7 @@ ENTRY(startup_32)
35247 wrmsr
35248
35249 # Enable paging and in turn activate Long Mode
35250- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35251+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35252 movl %eax, %cr0
35253
35254 /*
35255diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35256index 9e7e147..25a4158 100644
35257--- a/arch/x86/realmode/rm/wakeup_asm.S
35258+++ b/arch/x86/realmode/rm/wakeup_asm.S
35259@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35260 lgdtl pmode_gdt
35261
35262 /* This really couldn't... */
35263- movl pmode_entry, %eax
35264 movl pmode_cr0, %ecx
35265 movl %ecx, %cr0
35266- ljmpl $__KERNEL_CS, $pa_startup_32
35267- /* -> jmp *%eax in trampoline_32.S */
35268+
35269+ ljmpl *pmode_entry
35270 #else
35271 jmp trampoline_start
35272 #endif
35273diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35274index 604a37e..e49702a 100644
35275--- a/arch/x86/tools/Makefile
35276+++ b/arch/x86/tools/Makefile
35277@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35278
35279 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35280
35281-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35282+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35283 hostprogs-y += relocs
35284 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35285 PHONY += relocs
35286diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35287index 0c2fae8..88036b7 100644
35288--- a/arch/x86/tools/relocs.c
35289+++ b/arch/x86/tools/relocs.c
35290@@ -1,5 +1,7 @@
35291 /* This is included from relocs_32/64.c */
35292
35293+#include "../../../include/generated/autoconf.h"
35294+
35295 #define ElfW(type) _ElfW(ELF_BITS, type)
35296 #define _ElfW(bits, type) __ElfW(bits, type)
35297 #define __ElfW(bits, type) Elf##bits##_##type
35298@@ -11,6 +13,7 @@
35299 #define Elf_Sym ElfW(Sym)
35300
35301 static Elf_Ehdr ehdr;
35302+static Elf_Phdr *phdr;
35303
35304 struct relocs {
35305 uint32_t *offset;
35306@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35307 }
35308 }
35309
35310+static void read_phdrs(FILE *fp)
35311+{
35312+ unsigned int i;
35313+
35314+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35315+ if (!phdr) {
35316+ die("Unable to allocate %d program headers\n",
35317+ ehdr.e_phnum);
35318+ }
35319+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35320+ die("Seek to %d failed: %s\n",
35321+ ehdr.e_phoff, strerror(errno));
35322+ }
35323+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35324+ die("Cannot read ELF program headers: %s\n",
35325+ strerror(errno));
35326+ }
35327+ for(i = 0; i < ehdr.e_phnum; i++) {
35328+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35329+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35330+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35331+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35332+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35333+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35334+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35335+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35336+ }
35337+
35338+}
35339+
35340 static void read_shdrs(FILE *fp)
35341 {
35342- int i;
35343+ unsigned int i;
35344 Elf_Shdr shdr;
35345
35346 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35347@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35348
35349 static void read_strtabs(FILE *fp)
35350 {
35351- int i;
35352+ unsigned int i;
35353 for (i = 0; i < ehdr.e_shnum; i++) {
35354 struct section *sec = &secs[i];
35355 if (sec->shdr.sh_type != SHT_STRTAB) {
35356@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35357
35358 static void read_symtabs(FILE *fp)
35359 {
35360- int i,j;
35361+ unsigned int i,j;
35362 for (i = 0; i < ehdr.e_shnum; i++) {
35363 struct section *sec = &secs[i];
35364 if (sec->shdr.sh_type != SHT_SYMTAB) {
35365@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35366 }
35367
35368
35369-static void read_relocs(FILE *fp)
35370+static void read_relocs(FILE *fp, int use_real_mode)
35371 {
35372- int i,j;
35373+ unsigned int i,j;
35374+ uint32_t base;
35375+
35376 for (i = 0; i < ehdr.e_shnum; i++) {
35377 struct section *sec = &secs[i];
35378 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35379@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35380 die("Cannot read symbol table: %s\n",
35381 strerror(errno));
35382 }
35383+ base = 0;
35384+
35385+#ifdef CONFIG_X86_32
35386+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35387+ if (phdr[j].p_type != PT_LOAD )
35388+ continue;
35389+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35390+ continue;
35391+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35392+ break;
35393+ }
35394+#endif
35395+
35396 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35397 Elf_Rel *rel = &sec->reltab[j];
35398- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35399+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35400 rel->r_info = elf_xword_to_cpu(rel->r_info);
35401 #if (SHT_REL_TYPE == SHT_RELA)
35402 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35403@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35404
35405 static void print_absolute_symbols(void)
35406 {
35407- int i;
35408+ unsigned int i;
35409 const char *format;
35410
35411 if (ELF_BITS == 64)
35412@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35413 for (i = 0; i < ehdr.e_shnum; i++) {
35414 struct section *sec = &secs[i];
35415 char *sym_strtab;
35416- int j;
35417+ unsigned int j;
35418
35419 if (sec->shdr.sh_type != SHT_SYMTAB) {
35420 continue;
35421@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35422
35423 static void print_absolute_relocs(void)
35424 {
35425- int i, printed = 0;
35426+ unsigned int i, printed = 0;
35427 const char *format;
35428
35429 if (ELF_BITS == 64)
35430@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35431 struct section *sec_applies, *sec_symtab;
35432 char *sym_strtab;
35433 Elf_Sym *sh_symtab;
35434- int j;
35435+ unsigned int j;
35436 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35437 continue;
35438 }
35439@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35440 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35441 Elf_Sym *sym, const char *symname))
35442 {
35443- int i;
35444+ unsigned int i;
35445 /* Walk through the relocations */
35446 for (i = 0; i < ehdr.e_shnum; i++) {
35447 char *sym_strtab;
35448 Elf_Sym *sh_symtab;
35449 struct section *sec_applies, *sec_symtab;
35450- int j;
35451+ unsigned int j;
35452 struct section *sec = &secs[i];
35453
35454 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35455@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35456 {
35457 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35458 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35459+ char *sym_strtab = sec->link->link->strtab;
35460+
35461+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35462+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35463+ return 0;
35464+
35465+#ifdef CONFIG_PAX_KERNEXEC
35466+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35467+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35468+ return 0;
35469+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35470+ return 0;
35471+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35472+ return 0;
35473+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35474+ return 0;
35475+#endif
35476
35477 switch (r_type) {
35478 case R_386_NONE:
35479@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35480
35481 static void emit_relocs(int as_text, int use_real_mode)
35482 {
35483- int i;
35484+ unsigned int i;
35485 int (*write_reloc)(uint32_t, FILE *) = write32;
35486 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35487 const char *symname);
35488@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35489 {
35490 regex_init(use_real_mode);
35491 read_ehdr(fp);
35492+ read_phdrs(fp);
35493 read_shdrs(fp);
35494 read_strtabs(fp);
35495 read_symtabs(fp);
35496- read_relocs(fp);
35497+ read_relocs(fp, use_real_mode);
35498 if (ELF_BITS == 64)
35499 percpu_init();
35500 if (show_absolute_syms) {
35501diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35502index f40281e..92728c9 100644
35503--- a/arch/x86/um/mem_32.c
35504+++ b/arch/x86/um/mem_32.c
35505@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35506 gate_vma.vm_start = FIXADDR_USER_START;
35507 gate_vma.vm_end = FIXADDR_USER_END;
35508 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35509- gate_vma.vm_page_prot = __P101;
35510+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35511
35512 return 0;
35513 }
35514diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35515index 80ffa5b..a33bd15 100644
35516--- a/arch/x86/um/tls_32.c
35517+++ b/arch/x86/um/tls_32.c
35518@@ -260,7 +260,7 @@ out:
35519 if (unlikely(task == current &&
35520 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35521 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35522- "without flushed TLS.", current->pid);
35523+ "without flushed TLS.", task_pid_nr(current));
35524 }
35525
35526 return 0;
35527diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35528index 7b9be98..39bb57f 100644
35529--- a/arch/x86/vdso/Makefile
35530+++ b/arch/x86/vdso/Makefile
35531@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35532 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35533 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35534
35535-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35536+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35537 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35538 GCOV_PROFILE := n
35539
35540diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35541index 0224987..c7d65a5 100644
35542--- a/arch/x86/vdso/vdso2c.h
35543+++ b/arch/x86/vdso/vdso2c.h
35544@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35545 unsigned long load_size = -1; /* Work around bogus warning */
35546 unsigned long mapping_size;
35547 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35548- int i;
35549+ unsigned int i;
35550 unsigned long j;
35551 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35552 *alt_sec = NULL;
35553diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35554index e904c27..b9eaa03 100644
35555--- a/arch/x86/vdso/vdso32-setup.c
35556+++ b/arch/x86/vdso/vdso32-setup.c
35557@@ -14,6 +14,7 @@
35558 #include <asm/cpufeature.h>
35559 #include <asm/processor.h>
35560 #include <asm/vdso.h>
35561+#include <asm/mman.h>
35562
35563 #ifdef CONFIG_COMPAT_VDSO
35564 #define VDSO_DEFAULT 0
35565diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35566index 1c9f750..cfddb1a 100644
35567--- a/arch/x86/vdso/vma.c
35568+++ b/arch/x86/vdso/vma.c
35569@@ -19,10 +19,7 @@
35570 #include <asm/page.h>
35571 #include <asm/hpet.h>
35572 #include <asm/desc.h>
35573-
35574-#if defined(CONFIG_X86_64)
35575-unsigned int __read_mostly vdso64_enabled = 1;
35576-#endif
35577+#include <asm/mman.h>
35578
35579 void __init init_vdso_image(const struct vdso_image *image)
35580 {
35581@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35582 .pages = no_pages,
35583 };
35584
35585+#ifdef CONFIG_PAX_RANDMMAP
35586+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35587+ calculate_addr = false;
35588+#endif
35589+
35590 if (calculate_addr) {
35591 addr = vdso_addr(current->mm->start_stack,
35592 image->size - image->sym_vvar_start);
35593@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35594 down_write(&mm->mmap_sem);
35595
35596 addr = get_unmapped_area(NULL, addr,
35597- image->size - image->sym_vvar_start, 0, 0);
35598+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35599 if (IS_ERR_VALUE(addr)) {
35600 ret = addr;
35601 goto up_fail;
35602 }
35603
35604 text_start = addr - image->sym_vvar_start;
35605- current->mm->context.vdso = (void __user *)text_start;
35606+ mm->context.vdso = text_start;
35607
35608 /*
35609 * MAYWRITE to allow gdb to COW and set breakpoints
35610@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35611 hpet_address >> PAGE_SHIFT,
35612 PAGE_SIZE,
35613 pgprot_noncached(PAGE_READONLY));
35614-
35615- if (ret)
35616- goto up_fail;
35617 }
35618 #endif
35619
35620 up_fail:
35621 if (ret)
35622- current->mm->context.vdso = NULL;
35623+ current->mm->context.vdso = 0;
35624
35625 up_write(&mm->mmap_sem);
35626 return ret;
35627@@ -191,8 +190,8 @@ static int load_vdso32(void)
35628
35629 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35630 current_thread_info()->sysenter_return =
35631- current->mm->context.vdso +
35632- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35633+ (void __force_user *)(current->mm->context.vdso +
35634+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35635
35636 return 0;
35637 }
35638@@ -201,9 +200,6 @@ static int load_vdso32(void)
35639 #ifdef CONFIG_X86_64
35640 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35641 {
35642- if (!vdso64_enabled)
35643- return 0;
35644-
35645 return map_vdso(&vdso_image_64, true);
35646 }
35647
35648@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35649 int uses_interp)
35650 {
35651 #ifdef CONFIG_X86_X32_ABI
35652- if (test_thread_flag(TIF_X32)) {
35653- if (!vdso64_enabled)
35654- return 0;
35655-
35656+ if (test_thread_flag(TIF_X32))
35657 return map_vdso(&vdso_image_x32, true);
35658- }
35659 #endif
35660
35661 return load_vdso32();
35662@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35663 #endif
35664
35665 #ifdef CONFIG_X86_64
35666-static __init int vdso_setup(char *s)
35667-{
35668- vdso64_enabled = simple_strtoul(s, NULL, 0);
35669- return 0;
35670-}
35671-__setup("vdso=", vdso_setup);
35672-#endif
35673-
35674-#ifdef CONFIG_X86_64
35675 static void vgetcpu_cpu_init(void *arg)
35676 {
35677 int cpu = smp_processor_id();
35678diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35679index e88fda8..76ce7ce 100644
35680--- a/arch/x86/xen/Kconfig
35681+++ b/arch/x86/xen/Kconfig
35682@@ -9,6 +9,7 @@ config XEN
35683 select XEN_HAVE_PVMMU
35684 depends on X86_64 || (X86_32 && X86_PAE)
35685 depends on X86_TSC
35686+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35687 help
35688 This is the Linux Xen port. Enabling this will allow the
35689 kernel to boot in a paravirtualized environment under the
35690diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35691index 5240f56..0c12163 100644
35692--- a/arch/x86/xen/enlighten.c
35693+++ b/arch/x86/xen/enlighten.c
35694@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35695
35696 struct shared_info xen_dummy_shared_info;
35697
35698-void *xen_initial_gdt;
35699-
35700 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35701 __read_mostly int xen_have_vector_callback;
35702 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35703@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35704 {
35705 unsigned long va = dtr->address;
35706 unsigned int size = dtr->size + 1;
35707- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35708- unsigned long frames[pages];
35709+ unsigned long frames[65536 / PAGE_SIZE];
35710 int f;
35711
35712 /*
35713@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35714 {
35715 unsigned long va = dtr->address;
35716 unsigned int size = dtr->size + 1;
35717- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35718- unsigned long frames[pages];
35719+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35720 int f;
35721
35722 /*
35723@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35724 * 8-byte entries, or 16 4k pages..
35725 */
35726
35727- BUG_ON(size > 65536);
35728+ BUG_ON(size > GDT_SIZE);
35729 BUG_ON(va & ~PAGE_MASK);
35730
35731 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35732@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35733 return 0;
35734 }
35735
35736-static void set_xen_basic_apic_ops(void)
35737+static void __init set_xen_basic_apic_ops(void)
35738 {
35739 apic->read = xen_apic_read;
35740 apic->write = xen_apic_write;
35741@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35742 #endif
35743 };
35744
35745-static void xen_reboot(int reason)
35746+static __noreturn void xen_reboot(int reason)
35747 {
35748 struct sched_shutdown r = { .reason = reason };
35749
35750- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35751- BUG();
35752+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35753+ BUG();
35754 }
35755
35756-static void xen_restart(char *msg)
35757+static __noreturn void xen_restart(char *msg)
35758 {
35759 xen_reboot(SHUTDOWN_reboot);
35760 }
35761
35762-static void xen_emergency_restart(void)
35763+static __noreturn void xen_emergency_restart(void)
35764 {
35765 xen_reboot(SHUTDOWN_reboot);
35766 }
35767
35768-static void xen_machine_halt(void)
35769+static __noreturn void xen_machine_halt(void)
35770 {
35771 xen_reboot(SHUTDOWN_poweroff);
35772 }
35773
35774-static void xen_machine_power_off(void)
35775+static __noreturn void xen_machine_power_off(void)
35776 {
35777 if (pm_power_off)
35778 pm_power_off();
35779@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35780 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35781 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35782
35783- setup_stack_canary_segment(0);
35784- switch_to_new_gdt(0);
35785+ setup_stack_canary_segment(cpu);
35786+#ifdef CONFIG_X86_64
35787+ load_percpu_segment(cpu);
35788+#endif
35789+ switch_to_new_gdt(cpu);
35790
35791 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35792 pv_cpu_ops.load_gdt = xen_load_gdt;
35793@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35794 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35795
35796 /* Work out if we support NX */
35797- x86_configure_nx();
35798+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35799+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35800+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35801+ unsigned l, h;
35802+
35803+ __supported_pte_mask |= _PAGE_NX;
35804+ rdmsr(MSR_EFER, l, h);
35805+ l |= EFER_NX;
35806+ wrmsr(MSR_EFER, l, h);
35807+ }
35808+#endif
35809
35810 /* Get mfn list */
35811 xen_build_dynamic_phys_to_machine();
35812@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35813
35814 machine_ops = xen_machine_ops;
35815
35816- /*
35817- * The only reliable way to retain the initial address of the
35818- * percpu gdt_page is to remember it here, so we can go and
35819- * mark it RW later, when the initial percpu area is freed.
35820- */
35821- xen_initial_gdt = &per_cpu(gdt_page, 0);
35822-
35823 xen_smp_init();
35824
35825 #ifdef CONFIG_ACPI_NUMA
35826diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35827index adca9e2..35d6a98 100644
35828--- a/arch/x86/xen/mmu.c
35829+++ b/arch/x86/xen/mmu.c
35830@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35831 return val;
35832 }
35833
35834-static pteval_t pte_pfn_to_mfn(pteval_t val)
35835+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35836 {
35837 if (val & _PAGE_PRESENT) {
35838 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35839@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35840 * L3_k[511] -> level2_fixmap_pgt */
35841 convert_pfn_mfn(level3_kernel_pgt);
35842
35843+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35844+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35845+ convert_pfn_mfn(level3_vmemmap_pgt);
35846 /* L3_k[511][506] -> level1_fixmap_pgt */
35847+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35848 convert_pfn_mfn(level2_fixmap_pgt);
35849 }
35850 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35851@@ -1860,11 +1864,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35852 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35853 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35854 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35855+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35856+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35857+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35858 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35859 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35860+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35861 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35862 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35863 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35864+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35865
35866 /* Pin down new L4 */
35867 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35868@@ -2048,6 +2057,7 @@ static void __init xen_post_allocator_init(void)
35869 pv_mmu_ops.set_pud = xen_set_pud;
35870 #if PAGETABLE_LEVELS == 4
35871 pv_mmu_ops.set_pgd = xen_set_pgd;
35872+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35873 #endif
35874
35875 /* This will work as long as patching hasn't happened yet
35876@@ -2126,6 +2136,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35877 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35878 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35879 .set_pgd = xen_set_pgd_hyper,
35880+ .set_pgd_batched = xen_set_pgd_hyper,
35881
35882 .alloc_pud = xen_alloc_pmd_init,
35883 .release_pud = xen_release_pmd_init,
35884diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35885index 08e8489..b1e182f 100644
35886--- a/arch/x86/xen/smp.c
35887+++ b/arch/x86/xen/smp.c
35888@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35889
35890 if (xen_pv_domain()) {
35891 if (!xen_feature(XENFEAT_writable_page_tables))
35892- /* We've switched to the "real" per-cpu gdt, so make
35893- * sure the old memory can be recycled. */
35894- make_lowmem_page_readwrite(xen_initial_gdt);
35895-
35896 #ifdef CONFIG_X86_32
35897 /*
35898 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35899 * expects __USER_DS
35900 */
35901- loadsegment(ds, __USER_DS);
35902- loadsegment(es, __USER_DS);
35903+ loadsegment(ds, __KERNEL_DS);
35904+ loadsegment(es, __KERNEL_DS);
35905 #endif
35906
35907 xen_filter_cpu_maps();
35908@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35909 #ifdef CONFIG_X86_32
35910 /* Note: PVH is not yet supported on x86_32. */
35911 ctxt->user_regs.fs = __KERNEL_PERCPU;
35912- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35913+ savesegment(gs, ctxt->user_regs.gs);
35914 #endif
35915 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35916
35917@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35918 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35919 ctxt->flags = VGCF_IN_KERNEL;
35920 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35921- ctxt->user_regs.ds = __USER_DS;
35922- ctxt->user_regs.es = __USER_DS;
35923+ ctxt->user_regs.ds = __KERNEL_DS;
35924+ ctxt->user_regs.es = __KERNEL_DS;
35925 ctxt->user_regs.ss = __KERNEL_DS;
35926
35927 xen_copy_trap_info(ctxt->trap_ctxt);
35928@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35929 int rc;
35930
35931 per_cpu(current_task, cpu) = idle;
35932+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35933 #ifdef CONFIG_X86_32
35934 irq_ctx_init(cpu);
35935 #else
35936 clear_tsk_thread_flag(idle, TIF_FORK);
35937 #endif
35938- per_cpu(kernel_stack, cpu) =
35939- (unsigned long)task_stack_page(idle) -
35940- KERNEL_STACK_OFFSET + THREAD_SIZE;
35941+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35942
35943 xen_setup_runstate_info(cpu);
35944 xen_setup_timer(cpu);
35945@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35946
35947 void __init xen_smp_init(void)
35948 {
35949- smp_ops = xen_smp_ops;
35950+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35951 xen_fill_possible_map();
35952 }
35953
35954diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35955index fd92a64..1f72641 100644
35956--- a/arch/x86/xen/xen-asm_32.S
35957+++ b/arch/x86/xen/xen-asm_32.S
35958@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35959 pushw %fs
35960 movl $(__KERNEL_PERCPU), %eax
35961 movl %eax, %fs
35962- movl %fs:xen_vcpu, %eax
35963+ mov PER_CPU_VAR(xen_vcpu), %eax
35964 POP_FS
35965 #else
35966 movl %ss:xen_vcpu, %eax
35967diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35968index 674b2225..f1f5dc1 100644
35969--- a/arch/x86/xen/xen-head.S
35970+++ b/arch/x86/xen/xen-head.S
35971@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35972 #ifdef CONFIG_X86_32
35973 mov %esi,xen_start_info
35974 mov $init_thread_union+THREAD_SIZE,%esp
35975+#ifdef CONFIG_SMP
35976+ movl $cpu_gdt_table,%edi
35977+ movl $__per_cpu_load,%eax
35978+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35979+ rorl $16,%eax
35980+ movb %al,__KERNEL_PERCPU + 4(%edi)
35981+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35982+ movl $__per_cpu_end - 1,%eax
35983+ subl $__per_cpu_start,%eax
35984+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35985+#endif
35986 #else
35987 mov %rsi,xen_start_info
35988 mov $init_thread_union+THREAD_SIZE,%rsp
35989diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35990index 9e195c6..523ed36 100644
35991--- a/arch/x86/xen/xen-ops.h
35992+++ b/arch/x86/xen/xen-ops.h
35993@@ -16,8 +16,6 @@ void xen_syscall_target(void);
35994 void xen_syscall32_target(void);
35995 #endif
35996
35997-extern void *xen_initial_gdt;
35998-
35999 struct trap_info;
36000 void xen_copy_trap_info(struct trap_info *traps);
36001
36002diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36003index 525bd3d..ef888b1 100644
36004--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36005+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36006@@ -119,9 +119,9 @@
36007 ----------------------------------------------------------------------*/
36008
36009 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36010-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36011 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36012 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36013+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36014
36015 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36016 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36017diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36018index 2f33760..835e50a 100644
36019--- a/arch/xtensa/variants/fsf/include/variant/core.h
36020+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36021@@ -11,6 +11,7 @@
36022 #ifndef _XTENSA_CORE_H
36023 #define _XTENSA_CORE_H
36024
36025+#include <linux/const.h>
36026
36027 /****************************************************************************
36028 Parameters Useful for Any Code, USER or PRIVILEGED
36029@@ -112,9 +113,9 @@
36030 ----------------------------------------------------------------------*/
36031
36032 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36033-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36034 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36035 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36036+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36037
36038 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36039 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36040diff --git a/block/bio.c b/block/bio.c
36041index f66a4ea..73ddf55 100644
36042--- a/block/bio.c
36043+++ b/block/bio.c
36044@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36045 /*
36046 * Overflow, abort
36047 */
36048- if (end < start)
36049+ if (end < start || end - start > INT_MAX - nr_pages)
36050 return ERR_PTR(-EINVAL);
36051
36052 nr_pages += end - start;
36053@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36054 /*
36055 * Overflow, abort
36056 */
36057- if (end < start)
36058+ if (end < start || end - start > INT_MAX - nr_pages)
36059 return ERR_PTR(-EINVAL);
36060
36061 nr_pages += end - start;
36062diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36063index 0736729..2ec3b48 100644
36064--- a/block/blk-iopoll.c
36065+++ b/block/blk-iopoll.c
36066@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36067 }
36068 EXPORT_SYMBOL(blk_iopoll_complete);
36069
36070-static void blk_iopoll_softirq(struct softirq_action *h)
36071+static __latent_entropy void blk_iopoll_softirq(void)
36072 {
36073 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36074 int rearm = 0, budget = blk_iopoll_budget;
36075diff --git a/block/blk-map.c b/block/blk-map.c
36076index b8d2725..08c52b0 100644
36077--- a/block/blk-map.c
36078+++ b/block/blk-map.c
36079@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36080 if (!len || !kbuf)
36081 return -EINVAL;
36082
36083- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36084+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36085 if (do_copy)
36086 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36087 else
36088diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36089index 53b1737..08177d2e 100644
36090--- a/block/blk-softirq.c
36091+++ b/block/blk-softirq.c
36092@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36093 * Softirq action handler - move entries to local list and loop over them
36094 * while passing them to the queue registered handler.
36095 */
36096-static void blk_done_softirq(struct softirq_action *h)
36097+static __latent_entropy void blk_done_softirq(void)
36098 {
36099 struct list_head *cpu_list, local_list;
36100
36101diff --git a/block/bsg.c b/block/bsg.c
36102index d214e92..9649863 100644
36103--- a/block/bsg.c
36104+++ b/block/bsg.c
36105@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36106 struct sg_io_v4 *hdr, struct bsg_device *bd,
36107 fmode_t has_write_perm)
36108 {
36109+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36110+ unsigned char *cmdptr;
36111+
36112 if (hdr->request_len > BLK_MAX_CDB) {
36113 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36114 if (!rq->cmd)
36115 return -ENOMEM;
36116- }
36117+ cmdptr = rq->cmd;
36118+ } else
36119+ cmdptr = tmpcmd;
36120
36121- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36122+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36123 hdr->request_len))
36124 return -EFAULT;
36125
36126+ if (cmdptr != rq->cmd)
36127+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36128+
36129 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36130 if (blk_verify_command(rq->cmd, has_write_perm))
36131 return -EPERM;
36132diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36133index f678c73..f35aa18 100644
36134--- a/block/compat_ioctl.c
36135+++ b/block/compat_ioctl.c
36136@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36137 cgc = compat_alloc_user_space(sizeof(*cgc));
36138 cgc32 = compat_ptr(arg);
36139
36140- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36141+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36142 get_user(data, &cgc32->buffer) ||
36143 put_user(compat_ptr(data), &cgc->buffer) ||
36144 copy_in_user(&cgc->buflen, &cgc32->buflen,
36145@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36146 err |= __get_user(f->spec1, &uf->spec1);
36147 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36148 err |= __get_user(name, &uf->name);
36149- f->name = compat_ptr(name);
36150+ f->name = (void __force_kernel *)compat_ptr(name);
36151 if (err) {
36152 err = -EFAULT;
36153 goto out;
36154diff --git a/block/genhd.c b/block/genhd.c
36155index 0a536dc..b8f7aca 100644
36156--- a/block/genhd.c
36157+++ b/block/genhd.c
36158@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36159
36160 /*
36161 * Register device numbers dev..(dev+range-1)
36162- * range must be nonzero
36163+ * Noop if @range is zero.
36164 * The hash chain is sorted on range, so that subranges can override.
36165 */
36166 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36167 struct kobject *(*probe)(dev_t, int *, void *),
36168 int (*lock)(dev_t, void *), void *data)
36169 {
36170- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36171+ if (range)
36172+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36173 }
36174
36175 EXPORT_SYMBOL(blk_register_region);
36176
36177+/* undo blk_register_region(), noop if @range is zero */
36178 void blk_unregister_region(dev_t devt, unsigned long range)
36179 {
36180- kobj_unmap(bdev_map, devt, range);
36181+ if (range)
36182+ kobj_unmap(bdev_map, devt, range);
36183 }
36184
36185 EXPORT_SYMBOL(blk_unregister_region);
36186diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36187index 26cb624..a49c3a5 100644
36188--- a/block/partitions/efi.c
36189+++ b/block/partitions/efi.c
36190@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36191 if (!gpt)
36192 return NULL;
36193
36194+ if (!le32_to_cpu(gpt->num_partition_entries))
36195+ return NULL;
36196+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36197+ if (!pte)
36198+ return NULL;
36199+
36200 count = le32_to_cpu(gpt->num_partition_entries) *
36201 le32_to_cpu(gpt->sizeof_partition_entry);
36202- if (!count)
36203- return NULL;
36204- pte = kmalloc(count, GFP_KERNEL);
36205- if (!pte)
36206- return NULL;
36207-
36208 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36209 (u8 *) pte, count) < count) {
36210 kfree(pte);
36211diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36212index e1f71c3..02d295a 100644
36213--- a/block/scsi_ioctl.c
36214+++ b/block/scsi_ioctl.c
36215@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36216 return put_user(0, p);
36217 }
36218
36219-static int sg_get_timeout(struct request_queue *q)
36220+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36221 {
36222 return jiffies_to_clock_t(q->sg_timeout);
36223 }
36224@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36225 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36226 struct sg_io_hdr *hdr, fmode_t mode)
36227 {
36228- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36229+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36230+ unsigned char *cmdptr;
36231+
36232+ if (rq->cmd != rq->__cmd)
36233+ cmdptr = rq->cmd;
36234+ else
36235+ cmdptr = tmpcmd;
36236+
36237+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36238 return -EFAULT;
36239+
36240+ if (cmdptr != rq->cmd)
36241+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36242+
36243 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36244 return -EPERM;
36245
36246@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36247 int err;
36248 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36249 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36250+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36251+ unsigned char *cmdptr;
36252
36253 if (!sic)
36254 return -EINVAL;
36255@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36256 */
36257 err = -EFAULT;
36258 rq->cmd_len = cmdlen;
36259- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36260+
36261+ if (rq->cmd != rq->__cmd)
36262+ cmdptr = rq->cmd;
36263+ else
36264+ cmdptr = tmpcmd;
36265+
36266+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36267 goto error;
36268
36269+ if (rq->cmd != cmdptr)
36270+ memcpy(rq->cmd, cmdptr, cmdlen);
36271+
36272 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36273 goto error;
36274
36275diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36276index 650afac1..f3307de 100644
36277--- a/crypto/cryptd.c
36278+++ b/crypto/cryptd.c
36279@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36280
36281 struct cryptd_blkcipher_request_ctx {
36282 crypto_completion_t complete;
36283-};
36284+} __no_const;
36285
36286 struct cryptd_hash_ctx {
36287 struct crypto_shash *child;
36288@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36289
36290 struct cryptd_aead_request_ctx {
36291 crypto_completion_t complete;
36292-};
36293+} __no_const;
36294
36295 static void cryptd_queue_worker(struct work_struct *work);
36296
36297diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36298index c305d41..a96de79 100644
36299--- a/crypto/pcrypt.c
36300+++ b/crypto/pcrypt.c
36301@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36302 int ret;
36303
36304 pinst->kobj.kset = pcrypt_kset;
36305- ret = kobject_add(&pinst->kobj, NULL, name);
36306+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36307 if (!ret)
36308 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36309
36310diff --git a/crypto/zlib.c b/crypto/zlib.c
36311index 0eefa9d..0fa3d29 100644
36312--- a/crypto/zlib.c
36313+++ b/crypto/zlib.c
36314@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36315 zlib_comp_exit(ctx);
36316
36317 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36318- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36319+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36320 : MAX_WBITS;
36321 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36322- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36323+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36324 : DEF_MEM_LEVEL;
36325
36326 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36327diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36328index 3b37676..898edfa 100644
36329--- a/drivers/acpi/acpica/hwxfsleep.c
36330+++ b/drivers/acpi/acpica/hwxfsleep.c
36331@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36332 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36333
36334 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36335- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36336- acpi_hw_extended_sleep},
36337- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36338- acpi_hw_extended_wake_prep},
36339- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36340+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36341+ .extended_function = acpi_hw_extended_sleep},
36342+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36343+ .extended_function = acpi_hw_extended_wake_prep},
36344+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36345+ .extended_function = acpi_hw_extended_wake}
36346 };
36347
36348 /*
36349diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36350index 16129c7..8b675cd 100644
36351--- a/drivers/acpi/apei/apei-internal.h
36352+++ b/drivers/acpi/apei/apei-internal.h
36353@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36354 struct apei_exec_ins_type {
36355 u32 flags;
36356 apei_exec_ins_func_t run;
36357-};
36358+} __do_const;
36359
36360 struct apei_exec_context {
36361 u32 ip;
36362diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36363index e82d097..0c855c1 100644
36364--- a/drivers/acpi/apei/ghes.c
36365+++ b/drivers/acpi/apei/ghes.c
36366@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36367 const struct acpi_hest_generic *generic,
36368 const struct acpi_hest_generic_status *estatus)
36369 {
36370- static atomic_t seqno;
36371+ static atomic_unchecked_t seqno;
36372 unsigned int curr_seqno;
36373 char pfx_seq[64];
36374
36375@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36376 else
36377 pfx = KERN_ERR;
36378 }
36379- curr_seqno = atomic_inc_return(&seqno);
36380+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36381 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36382 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36383 pfx_seq, generic->header.source_id);
36384diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36385index a83e3c6..c3d617f 100644
36386--- a/drivers/acpi/bgrt.c
36387+++ b/drivers/acpi/bgrt.c
36388@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36389 if (!bgrt_image)
36390 return -ENODEV;
36391
36392- bin_attr_image.private = bgrt_image;
36393- bin_attr_image.size = bgrt_image_size;
36394+ pax_open_kernel();
36395+ *(void **)&bin_attr_image.private = bgrt_image;
36396+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36397+ pax_close_kernel();
36398
36399 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36400 if (!bgrt_kobj)
36401diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36402index 9b693d5..8953d54 100644
36403--- a/drivers/acpi/blacklist.c
36404+++ b/drivers/acpi/blacklist.c
36405@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36406 u32 is_critical_error;
36407 };
36408
36409-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36410+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36411
36412 /*
36413 * POLICY: If *anything* doesn't work, put it on the blacklist.
36414@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36415 return 0;
36416 }
36417
36418-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36419+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36420 {
36421 .callback = dmi_disable_osi_vista,
36422 .ident = "Fujitsu Siemens",
36423diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36424index 8b67bd0..b59593e 100644
36425--- a/drivers/acpi/bus.c
36426+++ b/drivers/acpi/bus.c
36427@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36428 }
36429 #endif
36430
36431-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36432+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36433 /*
36434 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36435 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36436@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36437 {}
36438 };
36439 #else
36440-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36441+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36442 {}
36443 };
36444 #endif
36445diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36446index c68e724..e863008 100644
36447--- a/drivers/acpi/custom_method.c
36448+++ b/drivers/acpi/custom_method.c
36449@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36450 struct acpi_table_header table;
36451 acpi_status status;
36452
36453+#ifdef CONFIG_GRKERNSEC_KMEM
36454+ return -EPERM;
36455+#endif
36456+
36457 if (!(*ppos)) {
36458 /* parse the table header to get the table length */
36459 if (count <= sizeof(struct acpi_table_header))
36460diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36461index 735db11..91e07ff 100644
36462--- a/drivers/acpi/device_pm.c
36463+++ b/drivers/acpi/device_pm.c
36464@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36465
36466 #endif /* CONFIG_PM_SLEEP */
36467
36468+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36469+
36470 static struct dev_pm_domain acpi_general_pm_domain = {
36471 .ops = {
36472 .runtime_suspend = acpi_subsys_runtime_suspend,
36473@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36474 .restore_early = acpi_subsys_resume_early,
36475 #endif
36476 },
36477+ .detach = acpi_dev_pm_detach
36478 };
36479
36480 /**
36481@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36482 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36483 }
36484
36485- dev->pm_domain->detach = acpi_dev_pm_detach;
36486 return 0;
36487 }
36488 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36489diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36490index a8dd2f7..e15950e 100644
36491--- a/drivers/acpi/ec.c
36492+++ b/drivers/acpi/ec.c
36493@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36494 return 0;
36495 }
36496
36497-static struct dmi_system_id ec_dmi_table[] __initdata = {
36498+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36499 {
36500 ec_skip_dsdt_scan, "Compal JFL92", {
36501 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36502diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36503index 139d9e4..9a9d799 100644
36504--- a/drivers/acpi/pci_slot.c
36505+++ b/drivers/acpi/pci_slot.c
36506@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36507 return 0;
36508 }
36509
36510-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36511+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36512 /*
36513 * Fujitsu Primequest machines will return 1023 to indicate an
36514 * error if the _SUN method is evaluated on SxFy objects that
36515diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36516index d9f7158..168e742 100644
36517--- a/drivers/acpi/processor_driver.c
36518+++ b/drivers/acpi/processor_driver.c
36519@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36520 return NOTIFY_OK;
36521 }
36522
36523-static struct notifier_block __refdata acpi_cpu_notifier = {
36524+static struct notifier_block __refconst acpi_cpu_notifier = {
36525 .notifier_call = acpi_cpu_soft_notify,
36526 };
36527
36528diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36529index f98db0b..8309c83 100644
36530--- a/drivers/acpi/processor_idle.c
36531+++ b/drivers/acpi/processor_idle.c
36532@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36533 {
36534 int i, count = CPUIDLE_DRIVER_STATE_START;
36535 struct acpi_processor_cx *cx;
36536- struct cpuidle_state *state;
36537+ cpuidle_state_no_const *state;
36538 struct cpuidle_driver *drv = &acpi_idle_driver;
36539
36540 if (!pr->flags.power_setup_done)
36541diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36542index e5dd808..1eceed1 100644
36543--- a/drivers/acpi/processor_pdc.c
36544+++ b/drivers/acpi/processor_pdc.c
36545@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36546 return 0;
36547 }
36548
36549-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36550+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36551 {
36552 set_no_mwait, "Extensa 5220", {
36553 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36554diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36555index 7f251dd..47b262c 100644
36556--- a/drivers/acpi/sleep.c
36557+++ b/drivers/acpi/sleep.c
36558@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36559 return 0;
36560 }
36561
36562-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36563+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36564 {
36565 .callback = init_old_suspend_ordering,
36566 .ident = "Abit KN9 (nForce4 variant)",
36567diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36568index 13e577c..cef11ee 100644
36569--- a/drivers/acpi/sysfs.c
36570+++ b/drivers/acpi/sysfs.c
36571@@ -423,11 +423,11 @@ static u32 num_counters;
36572 static struct attribute **all_attrs;
36573 static u32 acpi_gpe_count;
36574
36575-static struct attribute_group interrupt_stats_attr_group = {
36576+static attribute_group_no_const interrupt_stats_attr_group = {
36577 .name = "interrupts",
36578 };
36579
36580-static struct kobj_attribute *counter_attrs;
36581+static kobj_attribute_no_const *counter_attrs;
36582
36583 static void delete_gpe_attr_array(void)
36584 {
36585diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36586index d24fa19..782f1e6 100644
36587--- a/drivers/acpi/thermal.c
36588+++ b/drivers/acpi/thermal.c
36589@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36590 return 0;
36591 }
36592
36593-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36594+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36595 /*
36596 * Award BIOS on this AOpen makes thermal control almost worthless.
36597 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36598diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36599index 26eb70c..4d66ddf 100644
36600--- a/drivers/acpi/video.c
36601+++ b/drivers/acpi/video.c
36602@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36603 return 0;
36604 }
36605
36606-static struct dmi_system_id video_dmi_table[] __initdata = {
36607+static const struct dmi_system_id video_dmi_table[] __initconst = {
36608 /*
36609 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36610 */
36611diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36612index 61a9c07..ea98fa1 100644
36613--- a/drivers/ata/libahci.c
36614+++ b/drivers/ata/libahci.c
36615@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36616 }
36617 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36618
36619-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36620+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36621 struct ata_taskfile *tf, int is_cmd, u16 flags,
36622 unsigned long timeout_msec)
36623 {
36624diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36625index 23dac3b..89ada44 100644
36626--- a/drivers/ata/libata-core.c
36627+++ b/drivers/ata/libata-core.c
36628@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36629 static void ata_dev_xfermask(struct ata_device *dev);
36630 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36631
36632-atomic_t ata_print_id = ATOMIC_INIT(0);
36633+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36634
36635 struct ata_force_param {
36636 const char *name;
36637@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36638 struct ata_port *ap;
36639 unsigned int tag;
36640
36641- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36642+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36643 ap = qc->ap;
36644
36645 qc->flags = 0;
36646@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36647 struct ata_port *ap;
36648 struct ata_link *link;
36649
36650- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36651+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36652 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36653 ap = qc->ap;
36654 link = qc->dev->link;
36655@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36656 return;
36657
36658 spin_lock(&lock);
36659+ pax_open_kernel();
36660
36661 for (cur = ops->inherits; cur; cur = cur->inherits) {
36662 void **inherit = (void **)cur;
36663@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36664 if (IS_ERR(*pp))
36665 *pp = NULL;
36666
36667- ops->inherits = NULL;
36668+ *(struct ata_port_operations **)&ops->inherits = NULL;
36669
36670+ pax_close_kernel();
36671 spin_unlock(&lock);
36672 }
36673
36674@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36675
36676 /* give ports names and add SCSI hosts */
36677 for (i = 0; i < host->n_ports; i++) {
36678- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36679+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36680 host->ports[i]->local_port_no = i + 1;
36681 }
36682
36683diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36684index b061ba2..fdcd85f 100644
36685--- a/drivers/ata/libata-scsi.c
36686+++ b/drivers/ata/libata-scsi.c
36687@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36688
36689 if (rc)
36690 return rc;
36691- ap->print_id = atomic_inc_return(&ata_print_id);
36692+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36693 return 0;
36694 }
36695 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36696diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36697index f840ca1..edd6ef3 100644
36698--- a/drivers/ata/libata.h
36699+++ b/drivers/ata/libata.h
36700@@ -53,7 +53,7 @@ enum {
36701 ATA_DNXFER_QUIET = (1 << 31),
36702 };
36703
36704-extern atomic_t ata_print_id;
36705+extern atomic_unchecked_t ata_print_id;
36706 extern int atapi_passthru16;
36707 extern int libata_fua;
36708 extern int libata_noacpi;
36709diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36710index a9b0c82..207d97d 100644
36711--- a/drivers/ata/pata_arasan_cf.c
36712+++ b/drivers/ata/pata_arasan_cf.c
36713@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36714 /* Handle platform specific quirks */
36715 if (quirk) {
36716 if (quirk & CF_BROKEN_PIO) {
36717- ap->ops->set_piomode = NULL;
36718+ pax_open_kernel();
36719+ *(void **)&ap->ops->set_piomode = NULL;
36720+ pax_close_kernel();
36721 ap->pio_mask = 0;
36722 }
36723 if (quirk & CF_BROKEN_MWDMA)
36724diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36725index f9b983a..887b9d8 100644
36726--- a/drivers/atm/adummy.c
36727+++ b/drivers/atm/adummy.c
36728@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36729 vcc->pop(vcc, skb);
36730 else
36731 dev_kfree_skb_any(skb);
36732- atomic_inc(&vcc->stats->tx);
36733+ atomic_inc_unchecked(&vcc->stats->tx);
36734
36735 return 0;
36736 }
36737diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36738index f1a9198..f466a4a 100644
36739--- a/drivers/atm/ambassador.c
36740+++ b/drivers/atm/ambassador.c
36741@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36742 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36743
36744 // VC layer stats
36745- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36746+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36747
36748 // free the descriptor
36749 kfree (tx_descr);
36750@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36751 dump_skb ("<<<", vc, skb);
36752
36753 // VC layer stats
36754- atomic_inc(&atm_vcc->stats->rx);
36755+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36756 __net_timestamp(skb);
36757 // end of our responsibility
36758 atm_vcc->push (atm_vcc, skb);
36759@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36760 } else {
36761 PRINTK (KERN_INFO, "dropped over-size frame");
36762 // should we count this?
36763- atomic_inc(&atm_vcc->stats->rx_drop);
36764+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36765 }
36766
36767 } else {
36768@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36769 }
36770
36771 if (check_area (skb->data, skb->len)) {
36772- atomic_inc(&atm_vcc->stats->tx_err);
36773+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36774 return -ENOMEM; // ?
36775 }
36776
36777diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36778index 480fa6f..947067c 100644
36779--- a/drivers/atm/atmtcp.c
36780+++ b/drivers/atm/atmtcp.c
36781@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36782 if (vcc->pop) vcc->pop(vcc,skb);
36783 else dev_kfree_skb(skb);
36784 if (dev_data) return 0;
36785- atomic_inc(&vcc->stats->tx_err);
36786+ atomic_inc_unchecked(&vcc->stats->tx_err);
36787 return -ENOLINK;
36788 }
36789 size = skb->len+sizeof(struct atmtcp_hdr);
36790@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36791 if (!new_skb) {
36792 if (vcc->pop) vcc->pop(vcc,skb);
36793 else dev_kfree_skb(skb);
36794- atomic_inc(&vcc->stats->tx_err);
36795+ atomic_inc_unchecked(&vcc->stats->tx_err);
36796 return -ENOBUFS;
36797 }
36798 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36799@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36800 if (vcc->pop) vcc->pop(vcc,skb);
36801 else dev_kfree_skb(skb);
36802 out_vcc->push(out_vcc,new_skb);
36803- atomic_inc(&vcc->stats->tx);
36804- atomic_inc(&out_vcc->stats->rx);
36805+ atomic_inc_unchecked(&vcc->stats->tx);
36806+ atomic_inc_unchecked(&out_vcc->stats->rx);
36807 return 0;
36808 }
36809
36810@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36811 read_unlock(&vcc_sklist_lock);
36812 if (!out_vcc) {
36813 result = -EUNATCH;
36814- atomic_inc(&vcc->stats->tx_err);
36815+ atomic_inc_unchecked(&vcc->stats->tx_err);
36816 goto done;
36817 }
36818 skb_pull(skb,sizeof(struct atmtcp_hdr));
36819@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36820 __net_timestamp(new_skb);
36821 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36822 out_vcc->push(out_vcc,new_skb);
36823- atomic_inc(&vcc->stats->tx);
36824- atomic_inc(&out_vcc->stats->rx);
36825+ atomic_inc_unchecked(&vcc->stats->tx);
36826+ atomic_inc_unchecked(&out_vcc->stats->rx);
36827 done:
36828 if (vcc->pop) vcc->pop(vcc,skb);
36829 else dev_kfree_skb(skb);
36830diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36831index 6339efd..2b441d5 100644
36832--- a/drivers/atm/eni.c
36833+++ b/drivers/atm/eni.c
36834@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36835 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36836 vcc->dev->number);
36837 length = 0;
36838- atomic_inc(&vcc->stats->rx_err);
36839+ atomic_inc_unchecked(&vcc->stats->rx_err);
36840 }
36841 else {
36842 length = ATM_CELL_SIZE-1; /* no HEC */
36843@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36844 size);
36845 }
36846 eff = length = 0;
36847- atomic_inc(&vcc->stats->rx_err);
36848+ atomic_inc_unchecked(&vcc->stats->rx_err);
36849 }
36850 else {
36851 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36852@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36853 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36854 vcc->dev->number,vcc->vci,length,size << 2,descr);
36855 length = eff = 0;
36856- atomic_inc(&vcc->stats->rx_err);
36857+ atomic_inc_unchecked(&vcc->stats->rx_err);
36858 }
36859 }
36860 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36861@@ -770,7 +770,7 @@ rx_dequeued++;
36862 vcc->push(vcc,skb);
36863 pushed++;
36864 }
36865- atomic_inc(&vcc->stats->rx);
36866+ atomic_inc_unchecked(&vcc->stats->rx);
36867 }
36868 wake_up(&eni_dev->rx_wait);
36869 }
36870@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36871 DMA_TO_DEVICE);
36872 if (vcc->pop) vcc->pop(vcc,skb);
36873 else dev_kfree_skb_irq(skb);
36874- atomic_inc(&vcc->stats->tx);
36875+ atomic_inc_unchecked(&vcc->stats->tx);
36876 wake_up(&eni_dev->tx_wait);
36877 dma_complete++;
36878 }
36879diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36880index 82f2ae0..f205c02 100644
36881--- a/drivers/atm/firestream.c
36882+++ b/drivers/atm/firestream.c
36883@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36884 }
36885 }
36886
36887- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36888+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36889
36890 fs_dprintk (FS_DEBUG_TXMEM, "i");
36891 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36892@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36893 #endif
36894 skb_put (skb, qe->p1 & 0xffff);
36895 ATM_SKB(skb)->vcc = atm_vcc;
36896- atomic_inc(&atm_vcc->stats->rx);
36897+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36898 __net_timestamp(skb);
36899 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36900 atm_vcc->push (atm_vcc, skb);
36901@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36902 kfree (pe);
36903 }
36904 if (atm_vcc)
36905- atomic_inc(&atm_vcc->stats->rx_drop);
36906+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36907 break;
36908 case 0x1f: /* Reassembly abort: no buffers. */
36909 /* Silently increment error counter. */
36910 if (atm_vcc)
36911- atomic_inc(&atm_vcc->stats->rx_drop);
36912+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36913 break;
36914 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36915 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36916diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36917index 75dde90..4309ead 100644
36918--- a/drivers/atm/fore200e.c
36919+++ b/drivers/atm/fore200e.c
36920@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36921 #endif
36922 /* check error condition */
36923 if (*entry->status & STATUS_ERROR)
36924- atomic_inc(&vcc->stats->tx_err);
36925+ atomic_inc_unchecked(&vcc->stats->tx_err);
36926 else
36927- atomic_inc(&vcc->stats->tx);
36928+ atomic_inc_unchecked(&vcc->stats->tx);
36929 }
36930 }
36931
36932@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36933 if (skb == NULL) {
36934 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36935
36936- atomic_inc(&vcc->stats->rx_drop);
36937+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36938 return -ENOMEM;
36939 }
36940
36941@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36942
36943 dev_kfree_skb_any(skb);
36944
36945- atomic_inc(&vcc->stats->rx_drop);
36946+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36947 return -ENOMEM;
36948 }
36949
36950 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36951
36952 vcc->push(vcc, skb);
36953- atomic_inc(&vcc->stats->rx);
36954+ atomic_inc_unchecked(&vcc->stats->rx);
36955
36956 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36957
36958@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36959 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36960 fore200e->atm_dev->number,
36961 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36962- atomic_inc(&vcc->stats->rx_err);
36963+ atomic_inc_unchecked(&vcc->stats->rx_err);
36964 }
36965 }
36966
36967@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36968 goto retry_here;
36969 }
36970
36971- atomic_inc(&vcc->stats->tx_err);
36972+ atomic_inc_unchecked(&vcc->stats->tx_err);
36973
36974 fore200e->tx_sat++;
36975 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36976diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36977index 93dca2e..c5daa69 100644
36978--- a/drivers/atm/he.c
36979+++ b/drivers/atm/he.c
36980@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36981
36982 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36983 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36984- atomic_inc(&vcc->stats->rx_drop);
36985+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36986 goto return_host_buffers;
36987 }
36988
36989@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36990 RBRQ_LEN_ERR(he_dev->rbrq_head)
36991 ? "LEN_ERR" : "",
36992 vcc->vpi, vcc->vci);
36993- atomic_inc(&vcc->stats->rx_err);
36994+ atomic_inc_unchecked(&vcc->stats->rx_err);
36995 goto return_host_buffers;
36996 }
36997
36998@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36999 vcc->push(vcc, skb);
37000 spin_lock(&he_dev->global_lock);
37001
37002- atomic_inc(&vcc->stats->rx);
37003+ atomic_inc_unchecked(&vcc->stats->rx);
37004
37005 return_host_buffers:
37006 ++pdus_assembled;
37007@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37008 tpd->vcc->pop(tpd->vcc, tpd->skb);
37009 else
37010 dev_kfree_skb_any(tpd->skb);
37011- atomic_inc(&tpd->vcc->stats->tx_err);
37012+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37013 }
37014 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37015 return;
37016@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37017 vcc->pop(vcc, skb);
37018 else
37019 dev_kfree_skb_any(skb);
37020- atomic_inc(&vcc->stats->tx_err);
37021+ atomic_inc_unchecked(&vcc->stats->tx_err);
37022 return -EINVAL;
37023 }
37024
37025@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37026 vcc->pop(vcc, skb);
37027 else
37028 dev_kfree_skb_any(skb);
37029- atomic_inc(&vcc->stats->tx_err);
37030+ atomic_inc_unchecked(&vcc->stats->tx_err);
37031 return -EINVAL;
37032 }
37033 #endif
37034@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37035 vcc->pop(vcc, skb);
37036 else
37037 dev_kfree_skb_any(skb);
37038- atomic_inc(&vcc->stats->tx_err);
37039+ atomic_inc_unchecked(&vcc->stats->tx_err);
37040 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37041 return -ENOMEM;
37042 }
37043@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37044 vcc->pop(vcc, skb);
37045 else
37046 dev_kfree_skb_any(skb);
37047- atomic_inc(&vcc->stats->tx_err);
37048+ atomic_inc_unchecked(&vcc->stats->tx_err);
37049 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37050 return -ENOMEM;
37051 }
37052@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37053 __enqueue_tpd(he_dev, tpd, cid);
37054 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37055
37056- atomic_inc(&vcc->stats->tx);
37057+ atomic_inc_unchecked(&vcc->stats->tx);
37058
37059 return 0;
37060 }
37061diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37062index 527bbd5..96570c8 100644
37063--- a/drivers/atm/horizon.c
37064+++ b/drivers/atm/horizon.c
37065@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37066 {
37067 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37068 // VC layer stats
37069- atomic_inc(&vcc->stats->rx);
37070+ atomic_inc_unchecked(&vcc->stats->rx);
37071 __net_timestamp(skb);
37072 // end of our responsibility
37073 vcc->push (vcc, skb);
37074@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37075 dev->tx_iovec = NULL;
37076
37077 // VC layer stats
37078- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37079+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37080
37081 // free the skb
37082 hrz_kfree_skb (skb);
37083diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37084index 074616b..d6b3d5f 100644
37085--- a/drivers/atm/idt77252.c
37086+++ b/drivers/atm/idt77252.c
37087@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37088 else
37089 dev_kfree_skb(skb);
37090
37091- atomic_inc(&vcc->stats->tx);
37092+ atomic_inc_unchecked(&vcc->stats->tx);
37093 }
37094
37095 atomic_dec(&scq->used);
37096@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37097 if ((sb = dev_alloc_skb(64)) == NULL) {
37098 printk("%s: Can't allocate buffers for aal0.\n",
37099 card->name);
37100- atomic_add(i, &vcc->stats->rx_drop);
37101+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37102 break;
37103 }
37104 if (!atm_charge(vcc, sb->truesize)) {
37105 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37106 card->name);
37107- atomic_add(i - 1, &vcc->stats->rx_drop);
37108+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37109 dev_kfree_skb(sb);
37110 break;
37111 }
37112@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37113 ATM_SKB(sb)->vcc = vcc;
37114 __net_timestamp(sb);
37115 vcc->push(vcc, sb);
37116- atomic_inc(&vcc->stats->rx);
37117+ atomic_inc_unchecked(&vcc->stats->rx);
37118
37119 cell += ATM_CELL_PAYLOAD;
37120 }
37121@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37122 "(CDC: %08x)\n",
37123 card->name, len, rpp->len, readl(SAR_REG_CDC));
37124 recycle_rx_pool_skb(card, rpp);
37125- atomic_inc(&vcc->stats->rx_err);
37126+ atomic_inc_unchecked(&vcc->stats->rx_err);
37127 return;
37128 }
37129 if (stat & SAR_RSQE_CRC) {
37130 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37131 recycle_rx_pool_skb(card, rpp);
37132- atomic_inc(&vcc->stats->rx_err);
37133+ atomic_inc_unchecked(&vcc->stats->rx_err);
37134 return;
37135 }
37136 if (skb_queue_len(&rpp->queue) > 1) {
37137@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37138 RXPRINTK("%s: Can't alloc RX skb.\n",
37139 card->name);
37140 recycle_rx_pool_skb(card, rpp);
37141- atomic_inc(&vcc->stats->rx_err);
37142+ atomic_inc_unchecked(&vcc->stats->rx_err);
37143 return;
37144 }
37145 if (!atm_charge(vcc, skb->truesize)) {
37146@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37147 __net_timestamp(skb);
37148
37149 vcc->push(vcc, skb);
37150- atomic_inc(&vcc->stats->rx);
37151+ atomic_inc_unchecked(&vcc->stats->rx);
37152
37153 return;
37154 }
37155@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37156 __net_timestamp(skb);
37157
37158 vcc->push(vcc, skb);
37159- atomic_inc(&vcc->stats->rx);
37160+ atomic_inc_unchecked(&vcc->stats->rx);
37161
37162 if (skb->truesize > SAR_FB_SIZE_3)
37163 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37164@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37165 if (vcc->qos.aal != ATM_AAL0) {
37166 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37167 card->name, vpi, vci);
37168- atomic_inc(&vcc->stats->rx_drop);
37169+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37170 goto drop;
37171 }
37172
37173 if ((sb = dev_alloc_skb(64)) == NULL) {
37174 printk("%s: Can't allocate buffers for AAL0.\n",
37175 card->name);
37176- atomic_inc(&vcc->stats->rx_err);
37177+ atomic_inc_unchecked(&vcc->stats->rx_err);
37178 goto drop;
37179 }
37180
37181@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37182 ATM_SKB(sb)->vcc = vcc;
37183 __net_timestamp(sb);
37184 vcc->push(vcc, sb);
37185- atomic_inc(&vcc->stats->rx);
37186+ atomic_inc_unchecked(&vcc->stats->rx);
37187
37188 drop:
37189 skb_pull(queue, 64);
37190@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37191
37192 if (vc == NULL) {
37193 printk("%s: NULL connection in send().\n", card->name);
37194- atomic_inc(&vcc->stats->tx_err);
37195+ atomic_inc_unchecked(&vcc->stats->tx_err);
37196 dev_kfree_skb(skb);
37197 return -EINVAL;
37198 }
37199 if (!test_bit(VCF_TX, &vc->flags)) {
37200 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37201- atomic_inc(&vcc->stats->tx_err);
37202+ atomic_inc_unchecked(&vcc->stats->tx_err);
37203 dev_kfree_skb(skb);
37204 return -EINVAL;
37205 }
37206@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37207 break;
37208 default:
37209 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37210- atomic_inc(&vcc->stats->tx_err);
37211+ atomic_inc_unchecked(&vcc->stats->tx_err);
37212 dev_kfree_skb(skb);
37213 return -EINVAL;
37214 }
37215
37216 if (skb_shinfo(skb)->nr_frags != 0) {
37217 printk("%s: No scatter-gather yet.\n", card->name);
37218- atomic_inc(&vcc->stats->tx_err);
37219+ atomic_inc_unchecked(&vcc->stats->tx_err);
37220 dev_kfree_skb(skb);
37221 return -EINVAL;
37222 }
37223@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37224
37225 err = queue_skb(card, vc, skb, oam);
37226 if (err) {
37227- atomic_inc(&vcc->stats->tx_err);
37228+ atomic_inc_unchecked(&vcc->stats->tx_err);
37229 dev_kfree_skb(skb);
37230 return err;
37231 }
37232@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37233 skb = dev_alloc_skb(64);
37234 if (!skb) {
37235 printk("%s: Out of memory in send_oam().\n", card->name);
37236- atomic_inc(&vcc->stats->tx_err);
37237+ atomic_inc_unchecked(&vcc->stats->tx_err);
37238 return -ENOMEM;
37239 }
37240 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37241diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37242index 924f8e2..3375a3e 100644
37243--- a/drivers/atm/iphase.c
37244+++ b/drivers/atm/iphase.c
37245@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37246 status = (u_short) (buf_desc_ptr->desc_mode);
37247 if (status & (RX_CER | RX_PTE | RX_OFL))
37248 {
37249- atomic_inc(&vcc->stats->rx_err);
37250+ atomic_inc_unchecked(&vcc->stats->rx_err);
37251 IF_ERR(printk("IA: bad packet, dropping it");)
37252 if (status & RX_CER) {
37253 IF_ERR(printk(" cause: packet CRC error\n");)
37254@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37255 len = dma_addr - buf_addr;
37256 if (len > iadev->rx_buf_sz) {
37257 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37258- atomic_inc(&vcc->stats->rx_err);
37259+ atomic_inc_unchecked(&vcc->stats->rx_err);
37260 goto out_free_desc;
37261 }
37262
37263@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37264 ia_vcc = INPH_IA_VCC(vcc);
37265 if (ia_vcc == NULL)
37266 {
37267- atomic_inc(&vcc->stats->rx_err);
37268+ atomic_inc_unchecked(&vcc->stats->rx_err);
37269 atm_return(vcc, skb->truesize);
37270 dev_kfree_skb_any(skb);
37271 goto INCR_DLE;
37272@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37273 if ((length > iadev->rx_buf_sz) || (length >
37274 (skb->len - sizeof(struct cpcs_trailer))))
37275 {
37276- atomic_inc(&vcc->stats->rx_err);
37277+ atomic_inc_unchecked(&vcc->stats->rx_err);
37278 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37279 length, skb->len);)
37280 atm_return(vcc, skb->truesize);
37281@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37282
37283 IF_RX(printk("rx_dle_intr: skb push");)
37284 vcc->push(vcc,skb);
37285- atomic_inc(&vcc->stats->rx);
37286+ atomic_inc_unchecked(&vcc->stats->rx);
37287 iadev->rx_pkt_cnt++;
37288 }
37289 INCR_DLE:
37290@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37291 {
37292 struct k_sonet_stats *stats;
37293 stats = &PRIV(_ia_dev[board])->sonet_stats;
37294- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37295- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37296- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37297- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37298- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37299- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37300- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37301- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37302- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37303+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37304+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37305+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37306+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37307+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37308+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37309+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37310+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37311+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37312 }
37313 ia_cmds.status = 0;
37314 break;
37315@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37316 if ((desc == 0) || (desc > iadev->num_tx_desc))
37317 {
37318 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37319- atomic_inc(&vcc->stats->tx);
37320+ atomic_inc_unchecked(&vcc->stats->tx);
37321 if (vcc->pop)
37322 vcc->pop(vcc, skb);
37323 else
37324@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37325 ATM_DESC(skb) = vcc->vci;
37326 skb_queue_tail(&iadev->tx_dma_q, skb);
37327
37328- atomic_inc(&vcc->stats->tx);
37329+ atomic_inc_unchecked(&vcc->stats->tx);
37330 iadev->tx_pkt_cnt++;
37331 /* Increment transaction counter */
37332 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37333
37334 #if 0
37335 /* add flow control logic */
37336- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37337+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37338 if (iavcc->vc_desc_cnt > 10) {
37339 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37340 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37341diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37342index ce43ae3..969de38 100644
37343--- a/drivers/atm/lanai.c
37344+++ b/drivers/atm/lanai.c
37345@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37346 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37347 lanai_endtx(lanai, lvcc);
37348 lanai_free_skb(lvcc->tx.atmvcc, skb);
37349- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37350+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37351 }
37352
37353 /* Try to fill the buffer - don't call unless there is backlog */
37354@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37355 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37356 __net_timestamp(skb);
37357 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37358- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37359+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37360 out:
37361 lvcc->rx.buf.ptr = end;
37362 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37363@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37364 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37365 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37366 lanai->stats.service_rxnotaal5++;
37367- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37368+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37369 return 0;
37370 }
37371 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37372@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37373 int bytes;
37374 read_unlock(&vcc_sklist_lock);
37375 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37376- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37377+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37378 lvcc->stats.x.aal5.service_trash++;
37379 bytes = (SERVICE_GET_END(s) * 16) -
37380 (((unsigned long) lvcc->rx.buf.ptr) -
37381@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37382 }
37383 if (s & SERVICE_STREAM) {
37384 read_unlock(&vcc_sklist_lock);
37385- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37386+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37387 lvcc->stats.x.aal5.service_stream++;
37388 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37389 "PDU on VCI %d!\n", lanai->number, vci);
37390@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37391 return 0;
37392 }
37393 DPRINTK("got rx crc error on vci %d\n", vci);
37394- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37395+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37396 lvcc->stats.x.aal5.service_rxcrc++;
37397 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37398 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37399diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37400index b7e1cc0..eb336bfe 100644
37401--- a/drivers/atm/nicstar.c
37402+++ b/drivers/atm/nicstar.c
37403@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37404 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37405 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37406 card->index);
37407- atomic_inc(&vcc->stats->tx_err);
37408+ atomic_inc_unchecked(&vcc->stats->tx_err);
37409 dev_kfree_skb_any(skb);
37410 return -EINVAL;
37411 }
37412@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37413 if (!vc->tx) {
37414 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37415 card->index);
37416- atomic_inc(&vcc->stats->tx_err);
37417+ atomic_inc_unchecked(&vcc->stats->tx_err);
37418 dev_kfree_skb_any(skb);
37419 return -EINVAL;
37420 }
37421@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37422 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37423 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37424 card->index);
37425- atomic_inc(&vcc->stats->tx_err);
37426+ atomic_inc_unchecked(&vcc->stats->tx_err);
37427 dev_kfree_skb_any(skb);
37428 return -EINVAL;
37429 }
37430
37431 if (skb_shinfo(skb)->nr_frags != 0) {
37432 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37433- atomic_inc(&vcc->stats->tx_err);
37434+ atomic_inc_unchecked(&vcc->stats->tx_err);
37435 dev_kfree_skb_any(skb);
37436 return -EINVAL;
37437 }
37438@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37439 }
37440
37441 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37442- atomic_inc(&vcc->stats->tx_err);
37443+ atomic_inc_unchecked(&vcc->stats->tx_err);
37444 dev_kfree_skb_any(skb);
37445 return -EIO;
37446 }
37447- atomic_inc(&vcc->stats->tx);
37448+ atomic_inc_unchecked(&vcc->stats->tx);
37449
37450 return 0;
37451 }
37452@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37453 printk
37454 ("nicstar%d: Can't allocate buffers for aal0.\n",
37455 card->index);
37456- atomic_add(i, &vcc->stats->rx_drop);
37457+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37458 break;
37459 }
37460 if (!atm_charge(vcc, sb->truesize)) {
37461 RXPRINTK
37462 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37463 card->index);
37464- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37465+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37466 dev_kfree_skb_any(sb);
37467 break;
37468 }
37469@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37470 ATM_SKB(sb)->vcc = vcc;
37471 __net_timestamp(sb);
37472 vcc->push(vcc, sb);
37473- atomic_inc(&vcc->stats->rx);
37474+ atomic_inc_unchecked(&vcc->stats->rx);
37475 cell += ATM_CELL_PAYLOAD;
37476 }
37477
37478@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37479 if (iovb == NULL) {
37480 printk("nicstar%d: Out of iovec buffers.\n",
37481 card->index);
37482- atomic_inc(&vcc->stats->rx_drop);
37483+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37484 recycle_rx_buf(card, skb);
37485 return;
37486 }
37487@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37488 small or large buffer itself. */
37489 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37490 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37491- atomic_inc(&vcc->stats->rx_err);
37492+ atomic_inc_unchecked(&vcc->stats->rx_err);
37493 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37494 NS_MAX_IOVECS);
37495 NS_PRV_IOVCNT(iovb) = 0;
37496@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37497 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37498 card->index);
37499 which_list(card, skb);
37500- atomic_inc(&vcc->stats->rx_err);
37501+ atomic_inc_unchecked(&vcc->stats->rx_err);
37502 recycle_rx_buf(card, skb);
37503 vc->rx_iov = NULL;
37504 recycle_iov_buf(card, iovb);
37505@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37506 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37507 card->index);
37508 which_list(card, skb);
37509- atomic_inc(&vcc->stats->rx_err);
37510+ atomic_inc_unchecked(&vcc->stats->rx_err);
37511 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37512 NS_PRV_IOVCNT(iovb));
37513 vc->rx_iov = NULL;
37514@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37515 printk(" - PDU size mismatch.\n");
37516 else
37517 printk(".\n");
37518- atomic_inc(&vcc->stats->rx_err);
37519+ atomic_inc_unchecked(&vcc->stats->rx_err);
37520 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37521 NS_PRV_IOVCNT(iovb));
37522 vc->rx_iov = NULL;
37523@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37524 /* skb points to a small buffer */
37525 if (!atm_charge(vcc, skb->truesize)) {
37526 push_rxbufs(card, skb);
37527- atomic_inc(&vcc->stats->rx_drop);
37528+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37529 } else {
37530 skb_put(skb, len);
37531 dequeue_sm_buf(card, skb);
37532@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37533 ATM_SKB(skb)->vcc = vcc;
37534 __net_timestamp(skb);
37535 vcc->push(vcc, skb);
37536- atomic_inc(&vcc->stats->rx);
37537+ atomic_inc_unchecked(&vcc->stats->rx);
37538 }
37539 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37540 struct sk_buff *sb;
37541@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37542 if (len <= NS_SMBUFSIZE) {
37543 if (!atm_charge(vcc, sb->truesize)) {
37544 push_rxbufs(card, sb);
37545- atomic_inc(&vcc->stats->rx_drop);
37546+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37547 } else {
37548 skb_put(sb, len);
37549 dequeue_sm_buf(card, sb);
37550@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37551 ATM_SKB(sb)->vcc = vcc;
37552 __net_timestamp(sb);
37553 vcc->push(vcc, sb);
37554- atomic_inc(&vcc->stats->rx);
37555+ atomic_inc_unchecked(&vcc->stats->rx);
37556 }
37557
37558 push_rxbufs(card, skb);
37559@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37560
37561 if (!atm_charge(vcc, skb->truesize)) {
37562 push_rxbufs(card, skb);
37563- atomic_inc(&vcc->stats->rx_drop);
37564+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37565 } else {
37566 dequeue_lg_buf(card, skb);
37567 #ifdef NS_USE_DESTRUCTORS
37568@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37569 ATM_SKB(skb)->vcc = vcc;
37570 __net_timestamp(skb);
37571 vcc->push(vcc, skb);
37572- atomic_inc(&vcc->stats->rx);
37573+ atomic_inc_unchecked(&vcc->stats->rx);
37574 }
37575
37576 push_rxbufs(card, sb);
37577@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37578 printk
37579 ("nicstar%d: Out of huge buffers.\n",
37580 card->index);
37581- atomic_inc(&vcc->stats->rx_drop);
37582+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37583 recycle_iovec_rx_bufs(card,
37584 (struct iovec *)
37585 iovb->data,
37586@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37587 card->hbpool.count++;
37588 } else
37589 dev_kfree_skb_any(hb);
37590- atomic_inc(&vcc->stats->rx_drop);
37591+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37592 } else {
37593 /* Copy the small buffer to the huge buffer */
37594 sb = (struct sk_buff *)iov->iov_base;
37595@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37596 #endif /* NS_USE_DESTRUCTORS */
37597 __net_timestamp(hb);
37598 vcc->push(vcc, hb);
37599- atomic_inc(&vcc->stats->rx);
37600+ atomic_inc_unchecked(&vcc->stats->rx);
37601 }
37602 }
37603
37604diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37605index 74e18b0..f16afa0 100644
37606--- a/drivers/atm/solos-pci.c
37607+++ b/drivers/atm/solos-pci.c
37608@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37609 }
37610 atm_charge(vcc, skb->truesize);
37611 vcc->push(vcc, skb);
37612- atomic_inc(&vcc->stats->rx);
37613+ atomic_inc_unchecked(&vcc->stats->rx);
37614 break;
37615
37616 case PKT_STATUS:
37617@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37618 vcc = SKB_CB(oldskb)->vcc;
37619
37620 if (vcc) {
37621- atomic_inc(&vcc->stats->tx);
37622+ atomic_inc_unchecked(&vcc->stats->tx);
37623 solos_pop(vcc, oldskb);
37624 } else {
37625 dev_kfree_skb_irq(oldskb);
37626diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37627index 0215934..ce9f5b1 100644
37628--- a/drivers/atm/suni.c
37629+++ b/drivers/atm/suni.c
37630@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37631
37632
37633 #define ADD_LIMITED(s,v) \
37634- atomic_add((v),&stats->s); \
37635- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37636+ atomic_add_unchecked((v),&stats->s); \
37637+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37638
37639
37640 static void suni_hz(unsigned long from_timer)
37641diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37642index 5120a96..e2572bd 100644
37643--- a/drivers/atm/uPD98402.c
37644+++ b/drivers/atm/uPD98402.c
37645@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37646 struct sonet_stats tmp;
37647 int error = 0;
37648
37649- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37650+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37651 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37652 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37653 if (zero && !error) {
37654@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37655
37656
37657 #define ADD_LIMITED(s,v) \
37658- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37659- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37660- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37661+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37662+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37663+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37664
37665
37666 static void stat_event(struct atm_dev *dev)
37667@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37668 if (reason & uPD98402_INT_PFM) stat_event(dev);
37669 if (reason & uPD98402_INT_PCO) {
37670 (void) GET(PCOCR); /* clear interrupt cause */
37671- atomic_add(GET(HECCT),
37672+ atomic_add_unchecked(GET(HECCT),
37673 &PRIV(dev)->sonet_stats.uncorr_hcs);
37674 }
37675 if ((reason & uPD98402_INT_RFO) &&
37676@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37677 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37678 uPD98402_INT_LOS),PIMR); /* enable them */
37679 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37680- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37681- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37682- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37683+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37684+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37685+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37686 return 0;
37687 }
37688
37689diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37690index cecfb94..87009ec 100644
37691--- a/drivers/atm/zatm.c
37692+++ b/drivers/atm/zatm.c
37693@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37694 }
37695 if (!size) {
37696 dev_kfree_skb_irq(skb);
37697- if (vcc) atomic_inc(&vcc->stats->rx_err);
37698+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37699 continue;
37700 }
37701 if (!atm_charge(vcc,skb->truesize)) {
37702@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37703 skb->len = size;
37704 ATM_SKB(skb)->vcc = vcc;
37705 vcc->push(vcc,skb);
37706- atomic_inc(&vcc->stats->rx);
37707+ atomic_inc_unchecked(&vcc->stats->rx);
37708 }
37709 zout(pos & 0xffff,MTA(mbx));
37710 #if 0 /* probably a stupid idea */
37711@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37712 skb_queue_head(&zatm_vcc->backlog,skb);
37713 break;
37714 }
37715- atomic_inc(&vcc->stats->tx);
37716+ atomic_inc_unchecked(&vcc->stats->tx);
37717 wake_up(&zatm_vcc->tx_wait);
37718 }
37719
37720diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37721index 876bae5..8978785 100644
37722--- a/drivers/base/bus.c
37723+++ b/drivers/base/bus.c
37724@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37725 return -EINVAL;
37726
37727 mutex_lock(&subsys->p->mutex);
37728- list_add_tail(&sif->node, &subsys->p->interfaces);
37729+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37730 if (sif->add_dev) {
37731 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37732 while ((dev = subsys_dev_iter_next(&iter)))
37733@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37734 subsys = sif->subsys;
37735
37736 mutex_lock(&subsys->p->mutex);
37737- list_del_init(&sif->node);
37738+ pax_list_del_init((struct list_head *)&sif->node);
37739 if (sif->remove_dev) {
37740 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37741 while ((dev = subsys_dev_iter_next(&iter)))
37742diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37743index 25798db..15f130e 100644
37744--- a/drivers/base/devtmpfs.c
37745+++ b/drivers/base/devtmpfs.c
37746@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37747 if (!thread)
37748 return 0;
37749
37750- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37751+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37752 if (err)
37753 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37754 else
37755@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37756 *err = sys_unshare(CLONE_NEWNS);
37757 if (*err)
37758 goto out;
37759- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37760+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37761 if (*err)
37762 goto out;
37763- sys_chdir("/.."); /* will traverse into overmounted root */
37764- sys_chroot(".");
37765+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37766+ sys_chroot((char __force_user *)".");
37767 complete(&setup_done);
37768 while (1) {
37769 spin_lock(&req_lock);
37770diff --git a/drivers/base/node.c b/drivers/base/node.c
37771index 36fabe43..8cfc112 100644
37772--- a/drivers/base/node.c
37773+++ b/drivers/base/node.c
37774@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37775 struct node_attr {
37776 struct device_attribute attr;
37777 enum node_states state;
37778-};
37779+} __do_const;
37780
37781 static ssize_t show_node_state(struct device *dev,
37782 struct device_attribute *attr, char *buf)
37783diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37784index 45937f8..b9a342e 100644
37785--- a/drivers/base/power/domain.c
37786+++ b/drivers/base/power/domain.c
37787@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37788 {
37789 struct cpuidle_driver *cpuidle_drv;
37790 struct gpd_cpuidle_data *cpuidle_data;
37791- struct cpuidle_state *idle_state;
37792+ cpuidle_state_no_const *idle_state;
37793 int ret = 0;
37794
37795 if (IS_ERR_OR_NULL(genpd) || state < 0)
37796@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37797 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37798 {
37799 struct gpd_cpuidle_data *cpuidle_data;
37800- struct cpuidle_state *idle_state;
37801+ cpuidle_state_no_const *idle_state;
37802 int ret = 0;
37803
37804 if (IS_ERR_OR_NULL(genpd))
37805@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37806 return ret;
37807 }
37808
37809- dev->pm_domain->detach = genpd_dev_pm_detach;
37810+ pax_open_kernel();
37811+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37812+ pax_close_kernel();
37813+
37814 pm_genpd_poweron(pd);
37815
37816 return 0;
37817diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37818index d2be3f9..0a3167a 100644
37819--- a/drivers/base/power/sysfs.c
37820+++ b/drivers/base/power/sysfs.c
37821@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37822 return -EIO;
37823 }
37824 }
37825- return sprintf(buf, p);
37826+ return sprintf(buf, "%s", p);
37827 }
37828
37829 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37830diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37831index aab7158..b172db2 100644
37832--- a/drivers/base/power/wakeup.c
37833+++ b/drivers/base/power/wakeup.c
37834@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37835 * They need to be modified together atomically, so it's better to use one
37836 * atomic variable to hold them both.
37837 */
37838-static atomic_t combined_event_count = ATOMIC_INIT(0);
37839+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37840
37841 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37842 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37843
37844 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37845 {
37846- unsigned int comb = atomic_read(&combined_event_count);
37847+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37848
37849 *cnt = (comb >> IN_PROGRESS_BITS);
37850 *inpr = comb & MAX_IN_PROGRESS;
37851@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37852 ws->start_prevent_time = ws->last_time;
37853
37854 /* Increment the counter of events in progress. */
37855- cec = atomic_inc_return(&combined_event_count);
37856+ cec = atomic_inc_return_unchecked(&combined_event_count);
37857
37858 trace_wakeup_source_activate(ws->name, cec);
37859 }
37860@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37861 * Increment the counter of registered wakeup events and decrement the
37862 * couter of wakeup events in progress simultaneously.
37863 */
37864- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37865+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37866 trace_wakeup_source_deactivate(ws->name, cec);
37867
37868 split_counters(&cnt, &inpr);
37869diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37870index 8d98a32..61d3165 100644
37871--- a/drivers/base/syscore.c
37872+++ b/drivers/base/syscore.c
37873@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37874 void register_syscore_ops(struct syscore_ops *ops)
37875 {
37876 mutex_lock(&syscore_ops_lock);
37877- list_add_tail(&ops->node, &syscore_ops_list);
37878+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37879 mutex_unlock(&syscore_ops_lock);
37880 }
37881 EXPORT_SYMBOL_GPL(register_syscore_ops);
37882@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37883 void unregister_syscore_ops(struct syscore_ops *ops)
37884 {
37885 mutex_lock(&syscore_ops_lock);
37886- list_del(&ops->node);
37887+ pax_list_del((struct list_head *)&ops->node);
37888 mutex_unlock(&syscore_ops_lock);
37889 }
37890 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37891diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37892index ff20f19..018f1da 100644
37893--- a/drivers/block/cciss.c
37894+++ b/drivers/block/cciss.c
37895@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37896 while (!list_empty(&h->reqQ)) {
37897 c = list_entry(h->reqQ.next, CommandList_struct, list);
37898 /* can't do anything if fifo is full */
37899- if ((h->access.fifo_full(h))) {
37900+ if ((h->access->fifo_full(h))) {
37901 dev_warn(&h->pdev->dev, "fifo full\n");
37902 break;
37903 }
37904@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37905 h->Qdepth--;
37906
37907 /* Tell the controller execute command */
37908- h->access.submit_command(h, c);
37909+ h->access->submit_command(h, c);
37910
37911 /* Put job onto the completed Q */
37912 addQ(&h->cmpQ, c);
37913@@ -3444,17 +3444,17 @@ startio:
37914
37915 static inline unsigned long get_next_completion(ctlr_info_t *h)
37916 {
37917- return h->access.command_completed(h);
37918+ return h->access->command_completed(h);
37919 }
37920
37921 static inline int interrupt_pending(ctlr_info_t *h)
37922 {
37923- return h->access.intr_pending(h);
37924+ return h->access->intr_pending(h);
37925 }
37926
37927 static inline long interrupt_not_for_us(ctlr_info_t *h)
37928 {
37929- return ((h->access.intr_pending(h) == 0) ||
37930+ return ((h->access->intr_pending(h) == 0) ||
37931 (h->interrupts_enabled == 0));
37932 }
37933
37934@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37935 u32 a;
37936
37937 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37938- return h->access.command_completed(h);
37939+ return h->access->command_completed(h);
37940
37941 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37942 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37943@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37944 trans_support & CFGTBL_Trans_use_short_tags);
37945
37946 /* Change the access methods to the performant access methods */
37947- h->access = SA5_performant_access;
37948+ h->access = &SA5_performant_access;
37949 h->transMethod = CFGTBL_Trans_Performant;
37950
37951 return;
37952@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37953 if (prod_index < 0)
37954 return -ENODEV;
37955 h->product_name = products[prod_index].product_name;
37956- h->access = *(products[prod_index].access);
37957+ h->access = products[prod_index].access;
37958
37959 if (cciss_board_disabled(h)) {
37960 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37961@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37962 }
37963
37964 /* make sure the board interrupts are off */
37965- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37966+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37967 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37968 if (rc)
37969 goto clean2;
37970@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37971 * fake ones to scoop up any residual completions.
37972 */
37973 spin_lock_irqsave(&h->lock, flags);
37974- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37975+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37976 spin_unlock_irqrestore(&h->lock, flags);
37977 free_irq(h->intr[h->intr_mode], h);
37978 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37979@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37980 dev_info(&h->pdev->dev, "Board READY.\n");
37981 dev_info(&h->pdev->dev,
37982 "Waiting for stale completions to drain.\n");
37983- h->access.set_intr_mask(h, CCISS_INTR_ON);
37984+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37985 msleep(10000);
37986- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37987+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37988
37989 rc = controller_reset_failed(h->cfgtable);
37990 if (rc)
37991@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37992 cciss_scsi_setup(h);
37993
37994 /* Turn the interrupts on so we can service requests */
37995- h->access.set_intr_mask(h, CCISS_INTR_ON);
37996+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37997
37998 /* Get the firmware version */
37999 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38000@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38001 kfree(flush_buf);
38002 if (return_code != IO_OK)
38003 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38004- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38005+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38006 free_irq(h->intr[h->intr_mode], h);
38007 }
38008
38009diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38010index 7fda30e..2f27946 100644
38011--- a/drivers/block/cciss.h
38012+++ b/drivers/block/cciss.h
38013@@ -101,7 +101,7 @@ struct ctlr_info
38014 /* information about each logical volume */
38015 drive_info_struct *drv[CISS_MAX_LUN];
38016
38017- struct access_method access;
38018+ struct access_method *access;
38019
38020 /* queue and queue Info */
38021 struct list_head reqQ;
38022@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38023 }
38024
38025 static struct access_method SA5_access = {
38026- SA5_submit_command,
38027- SA5_intr_mask,
38028- SA5_fifo_full,
38029- SA5_intr_pending,
38030- SA5_completed,
38031+ .submit_command = SA5_submit_command,
38032+ .set_intr_mask = SA5_intr_mask,
38033+ .fifo_full = SA5_fifo_full,
38034+ .intr_pending = SA5_intr_pending,
38035+ .command_completed = SA5_completed,
38036 };
38037
38038 static struct access_method SA5B_access = {
38039- SA5_submit_command,
38040- SA5B_intr_mask,
38041- SA5_fifo_full,
38042- SA5B_intr_pending,
38043- SA5_completed,
38044+ .submit_command = SA5_submit_command,
38045+ .set_intr_mask = SA5B_intr_mask,
38046+ .fifo_full = SA5_fifo_full,
38047+ .intr_pending = SA5B_intr_pending,
38048+ .command_completed = SA5_completed,
38049 };
38050
38051 static struct access_method SA5_performant_access = {
38052- SA5_submit_command,
38053- SA5_performant_intr_mask,
38054- SA5_fifo_full,
38055- SA5_performant_intr_pending,
38056- SA5_performant_completed,
38057+ .submit_command = SA5_submit_command,
38058+ .set_intr_mask = SA5_performant_intr_mask,
38059+ .fifo_full = SA5_fifo_full,
38060+ .intr_pending = SA5_performant_intr_pending,
38061+ .command_completed = SA5_performant_completed,
38062 };
38063
38064 struct board_type {
38065diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38066index 2b94403..fd6ad1f 100644
38067--- a/drivers/block/cpqarray.c
38068+++ b/drivers/block/cpqarray.c
38069@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38070 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38071 goto Enomem4;
38072 }
38073- hba[i]->access.set_intr_mask(hba[i], 0);
38074+ hba[i]->access->set_intr_mask(hba[i], 0);
38075 if (request_irq(hba[i]->intr, do_ida_intr,
38076 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38077 {
38078@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38079 add_timer(&hba[i]->timer);
38080
38081 /* Enable IRQ now that spinlock and rate limit timer are set up */
38082- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38083+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38084
38085 for(j=0; j<NWD; j++) {
38086 struct gendisk *disk = ida_gendisk[i][j];
38087@@ -694,7 +694,7 @@ DBGINFO(
38088 for(i=0; i<NR_PRODUCTS; i++) {
38089 if (board_id == products[i].board_id) {
38090 c->product_name = products[i].product_name;
38091- c->access = *(products[i].access);
38092+ c->access = products[i].access;
38093 break;
38094 }
38095 }
38096@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38097 hba[ctlr]->intr = intr;
38098 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38099 hba[ctlr]->product_name = products[j].product_name;
38100- hba[ctlr]->access = *(products[j].access);
38101+ hba[ctlr]->access = products[j].access;
38102 hba[ctlr]->ctlr = ctlr;
38103 hba[ctlr]->board_id = board_id;
38104 hba[ctlr]->pci_dev = NULL; /* not PCI */
38105@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38106
38107 while((c = h->reqQ) != NULL) {
38108 /* Can't do anything if we're busy */
38109- if (h->access.fifo_full(h) == 0)
38110+ if (h->access->fifo_full(h) == 0)
38111 return;
38112
38113 /* Get the first entry from the request Q */
38114@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38115 h->Qdepth--;
38116
38117 /* Tell the controller to do our bidding */
38118- h->access.submit_command(h, c);
38119+ h->access->submit_command(h, c);
38120
38121 /* Get onto the completion Q */
38122 addQ(&h->cmpQ, c);
38123@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38124 unsigned long flags;
38125 __u32 a,a1;
38126
38127- istat = h->access.intr_pending(h);
38128+ istat = h->access->intr_pending(h);
38129 /* Is this interrupt for us? */
38130 if (istat == 0)
38131 return IRQ_NONE;
38132@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38133 */
38134 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38135 if (istat & FIFO_NOT_EMPTY) {
38136- while((a = h->access.command_completed(h))) {
38137+ while((a = h->access->command_completed(h))) {
38138 a1 = a; a &= ~3;
38139 if ((c = h->cmpQ) == NULL)
38140 {
38141@@ -1448,11 +1448,11 @@ static int sendcmd(
38142 /*
38143 * Disable interrupt
38144 */
38145- info_p->access.set_intr_mask(info_p, 0);
38146+ info_p->access->set_intr_mask(info_p, 0);
38147 /* Make sure there is room in the command FIFO */
38148 /* Actually it should be completely empty at this time. */
38149 for (i = 200000; i > 0; i--) {
38150- temp = info_p->access.fifo_full(info_p);
38151+ temp = info_p->access->fifo_full(info_p);
38152 if (temp != 0) {
38153 break;
38154 }
38155@@ -1465,7 +1465,7 @@ DBG(
38156 /*
38157 * Send the cmd
38158 */
38159- info_p->access.submit_command(info_p, c);
38160+ info_p->access->submit_command(info_p, c);
38161 complete = pollcomplete(ctlr);
38162
38163 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38164@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38165 * we check the new geometry. Then turn interrupts back on when
38166 * we're done.
38167 */
38168- host->access.set_intr_mask(host, 0);
38169+ host->access->set_intr_mask(host, 0);
38170 getgeometry(ctlr);
38171- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38172+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38173
38174 for(i=0; i<NWD; i++) {
38175 struct gendisk *disk = ida_gendisk[ctlr][i];
38176@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38177 /* Wait (up to 2 seconds) for a command to complete */
38178
38179 for (i = 200000; i > 0; i--) {
38180- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38181+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38182 if (done == 0) {
38183 udelay(10); /* a short fixed delay */
38184 } else
38185diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38186index be73e9d..7fbf140 100644
38187--- a/drivers/block/cpqarray.h
38188+++ b/drivers/block/cpqarray.h
38189@@ -99,7 +99,7 @@ struct ctlr_info {
38190 drv_info_t drv[NWD];
38191 struct proc_dir_entry *proc;
38192
38193- struct access_method access;
38194+ struct access_method *access;
38195
38196 cmdlist_t *reqQ;
38197 cmdlist_t *cmpQ;
38198diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38199index 434c77d..6d3219a 100644
38200--- a/drivers/block/drbd/drbd_bitmap.c
38201+++ b/drivers/block/drbd/drbd_bitmap.c
38202@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38203 submit_bio(rw, bio);
38204 /* this should not count as user activity and cause the
38205 * resync to throttle -- see drbd_rs_should_slow_down(). */
38206- atomic_add(len >> 9, &device->rs_sect_ev);
38207+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38208 }
38209 }
38210
38211diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38212index b905e98..0812ed8 100644
38213--- a/drivers/block/drbd/drbd_int.h
38214+++ b/drivers/block/drbd/drbd_int.h
38215@@ -385,7 +385,7 @@ struct drbd_epoch {
38216 struct drbd_connection *connection;
38217 struct list_head list;
38218 unsigned int barrier_nr;
38219- atomic_t epoch_size; /* increased on every request added. */
38220+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38221 atomic_t active; /* increased on every req. added, and dec on every finished. */
38222 unsigned long flags;
38223 };
38224@@ -946,7 +946,7 @@ struct drbd_device {
38225 unsigned int al_tr_number;
38226 int al_tr_cycle;
38227 wait_queue_head_t seq_wait;
38228- atomic_t packet_seq;
38229+ atomic_unchecked_t packet_seq;
38230 unsigned int peer_seq;
38231 spinlock_t peer_seq_lock;
38232 unsigned long comm_bm_set; /* communicated number of set bits. */
38233@@ -955,8 +955,8 @@ struct drbd_device {
38234 struct mutex own_state_mutex;
38235 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38236 char congestion_reason; /* Why we where congested... */
38237- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38238- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38239+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38240+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38241 int rs_last_sect_ev; /* counter to compare with */
38242 int rs_last_events; /* counter of read or write "events" (unit sectors)
38243 * on the lower level device when we last looked. */
38244diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38245index 1fc8342..7e7742b 100644
38246--- a/drivers/block/drbd/drbd_main.c
38247+++ b/drivers/block/drbd/drbd_main.c
38248@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38249 p->sector = sector;
38250 p->block_id = block_id;
38251 p->blksize = blksize;
38252- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38253+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38254 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38255 }
38256
38257@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38258 return -EIO;
38259 p->sector = cpu_to_be64(req->i.sector);
38260 p->block_id = (unsigned long)req;
38261- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38262+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38263 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38264 if (device->state.conn >= C_SYNC_SOURCE &&
38265 device->state.conn <= C_PAUSED_SYNC_T)
38266@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38267 atomic_set(&device->unacked_cnt, 0);
38268 atomic_set(&device->local_cnt, 0);
38269 atomic_set(&device->pp_in_use_by_net, 0);
38270- atomic_set(&device->rs_sect_in, 0);
38271- atomic_set(&device->rs_sect_ev, 0);
38272+ atomic_set_unchecked(&device->rs_sect_in, 0);
38273+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38274 atomic_set(&device->ap_in_flight, 0);
38275 atomic_set(&device->md_io.in_use, 0);
38276
38277@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38278 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38279 struct drbd_resource *resource = connection->resource;
38280
38281- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38282- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38283+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38284+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38285 kfree(connection->current_epoch);
38286
38287 idr_destroy(&connection->peer_devices);
38288diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38289index 74df8cf..e41fc24 100644
38290--- a/drivers/block/drbd/drbd_nl.c
38291+++ b/drivers/block/drbd/drbd_nl.c
38292@@ -3637,13 +3637,13 @@ finish:
38293
38294 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38295 {
38296- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38297+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38298 struct sk_buff *msg;
38299 struct drbd_genlmsghdr *d_out;
38300 unsigned seq;
38301 int err = -ENOMEM;
38302
38303- seq = atomic_inc_return(&drbd_genl_seq);
38304+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38305 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38306 if (!msg)
38307 goto failed;
38308diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38309index cee2035..22f66bd 100644
38310--- a/drivers/block/drbd/drbd_receiver.c
38311+++ b/drivers/block/drbd/drbd_receiver.c
38312@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38313 struct drbd_device *device = peer_device->device;
38314 int err;
38315
38316- atomic_set(&device->packet_seq, 0);
38317+ atomic_set_unchecked(&device->packet_seq, 0);
38318 device->peer_seq = 0;
38319
38320 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38321@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38322 do {
38323 next_epoch = NULL;
38324
38325- epoch_size = atomic_read(&epoch->epoch_size);
38326+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38327
38328 switch (ev & ~EV_CLEANUP) {
38329 case EV_PUT:
38330@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38331 rv = FE_DESTROYED;
38332 } else {
38333 epoch->flags = 0;
38334- atomic_set(&epoch->epoch_size, 0);
38335+ atomic_set_unchecked(&epoch->epoch_size, 0);
38336 /* atomic_set(&epoch->active, 0); is already zero */
38337 if (rv == FE_STILL_LIVE)
38338 rv = FE_RECYCLED;
38339@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38340 conn_wait_active_ee_empty(connection);
38341 drbd_flush(connection);
38342
38343- if (atomic_read(&connection->current_epoch->epoch_size)) {
38344+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38345 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38346 if (epoch)
38347 break;
38348@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38349 }
38350
38351 epoch->flags = 0;
38352- atomic_set(&epoch->epoch_size, 0);
38353+ atomic_set_unchecked(&epoch->epoch_size, 0);
38354 atomic_set(&epoch->active, 0);
38355
38356 spin_lock(&connection->epoch_lock);
38357- if (atomic_read(&connection->current_epoch->epoch_size)) {
38358+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38359 list_add(&epoch->list, &connection->current_epoch->list);
38360 connection->current_epoch = epoch;
38361 connection->epochs++;
38362@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38363 list_add_tail(&peer_req->w.list, &device->sync_ee);
38364 spin_unlock_irq(&device->resource->req_lock);
38365
38366- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38367+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38368 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38369 return 0;
38370
38371@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38372 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38373 }
38374
38375- atomic_add(pi->size >> 9, &device->rs_sect_in);
38376+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38377
38378 return err;
38379 }
38380@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38381
38382 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38383 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38384- atomic_inc(&connection->current_epoch->epoch_size);
38385+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38386 err2 = drbd_drain_block(peer_device, pi->size);
38387 if (!err)
38388 err = err2;
38389@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38390
38391 spin_lock(&connection->epoch_lock);
38392 peer_req->epoch = connection->current_epoch;
38393- atomic_inc(&peer_req->epoch->epoch_size);
38394+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38395 atomic_inc(&peer_req->epoch->active);
38396 spin_unlock(&connection->epoch_lock);
38397
38398@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38399
38400 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38401 (int)part_stat_read(&disk->part0, sectors[1]) -
38402- atomic_read(&device->rs_sect_ev);
38403+ atomic_read_unchecked(&device->rs_sect_ev);
38404
38405 if (atomic_read(&device->ap_actlog_cnt)
38406 || curr_events - device->rs_last_events > 64) {
38407@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38408 device->use_csums = true;
38409 } else if (pi->cmd == P_OV_REPLY) {
38410 /* track progress, we may need to throttle */
38411- atomic_add(size >> 9, &device->rs_sect_in);
38412+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38413 peer_req->w.cb = w_e_end_ov_reply;
38414 dec_rs_pending(device);
38415 /* drbd_rs_begin_io done when we sent this request,
38416@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38417 goto out_free_e;
38418
38419 submit_for_resync:
38420- atomic_add(size >> 9, &device->rs_sect_ev);
38421+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38422
38423 submit:
38424 update_receiver_timing_details(connection, drbd_submit_peer_request);
38425@@ -4564,7 +4564,7 @@ struct data_cmd {
38426 int expect_payload;
38427 size_t pkt_size;
38428 int (*fn)(struct drbd_connection *, struct packet_info *);
38429-};
38430+} __do_const;
38431
38432 static struct data_cmd drbd_cmd_handler[] = {
38433 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38434@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38435 if (!list_empty(&connection->current_epoch->list))
38436 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38437 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38438- atomic_set(&connection->current_epoch->epoch_size, 0);
38439+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38440 connection->send.seen_any_write_yet = false;
38441
38442 drbd_info(connection, "Connection closed\n");
38443@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38444 put_ldev(device);
38445 }
38446 dec_rs_pending(device);
38447- atomic_add(blksize >> 9, &device->rs_sect_in);
38448+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38449
38450 return 0;
38451 }
38452@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38453 struct asender_cmd {
38454 size_t pkt_size;
38455 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38456-};
38457+} __do_const;
38458
38459 static struct asender_cmd asender_tbl[] = {
38460 [P_PING] = { 0, got_Ping },
38461diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38462index d0fae55..4469096 100644
38463--- a/drivers/block/drbd/drbd_worker.c
38464+++ b/drivers/block/drbd/drbd_worker.c
38465@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38466 list_add_tail(&peer_req->w.list, &device->read_ee);
38467 spin_unlock_irq(&device->resource->req_lock);
38468
38469- atomic_add(size >> 9, &device->rs_sect_ev);
38470+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38471 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38472 return 0;
38473
38474@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38475 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38476 int number, mxb;
38477
38478- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38479+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38480 device->rs_in_flight -= sect_in;
38481
38482 rcu_read_lock();
38483@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38484 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38485 struct fifo_buffer *plan;
38486
38487- atomic_set(&device->rs_sect_in, 0);
38488- atomic_set(&device->rs_sect_ev, 0);
38489+ atomic_set_unchecked(&device->rs_sect_in, 0);
38490+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38491 device->rs_in_flight = 0;
38492 device->rs_last_events =
38493 (int)part_stat_read(&disk->part0, sectors[0]) +
38494diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38495index d1f168b..8f8cc52 100644
38496--- a/drivers/block/loop.c
38497+++ b/drivers/block/loop.c
38498@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38499
38500 file_start_write(file);
38501 set_fs(get_ds());
38502- bw = file->f_op->write(file, buf, len, &pos);
38503+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38504 set_fs(old_fs);
38505 file_end_write(file);
38506 if (likely(bw == len))
38507diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38508index 09e628da..7607aaa 100644
38509--- a/drivers/block/pktcdvd.c
38510+++ b/drivers/block/pktcdvd.c
38511@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38512
38513 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38514 {
38515- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38516+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38517 }
38518
38519 /*
38520@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38521 return -EROFS;
38522 }
38523 pd->settings.fp = ti.fp;
38524- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38525+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38526
38527 if (ti.nwa_v) {
38528 pd->nwa = be32_to_cpu(ti.next_writable);
38529diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38530index b40af32..5fa825d 100644
38531--- a/drivers/block/rbd.c
38532+++ b/drivers/block/rbd.c
38533@@ -64,7 +64,7 @@
38534 * If the counter is already at its maximum value returns
38535 * -EINVAL without updating it.
38536 */
38537-static int atomic_inc_return_safe(atomic_t *v)
38538+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38539 {
38540 unsigned int counter;
38541
38542diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38543index e5565fb..71be10b4 100644
38544--- a/drivers/block/smart1,2.h
38545+++ b/drivers/block/smart1,2.h
38546@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38547 }
38548
38549 static struct access_method smart4_access = {
38550- smart4_submit_command,
38551- smart4_intr_mask,
38552- smart4_fifo_full,
38553- smart4_intr_pending,
38554- smart4_completed,
38555+ .submit_command = smart4_submit_command,
38556+ .set_intr_mask = smart4_intr_mask,
38557+ .fifo_full = smart4_fifo_full,
38558+ .intr_pending = smart4_intr_pending,
38559+ .command_completed = smart4_completed,
38560 };
38561
38562 /*
38563@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38564 }
38565
38566 static struct access_method smart2_access = {
38567- smart2_submit_command,
38568- smart2_intr_mask,
38569- smart2_fifo_full,
38570- smart2_intr_pending,
38571- smart2_completed,
38572+ .submit_command = smart2_submit_command,
38573+ .set_intr_mask = smart2_intr_mask,
38574+ .fifo_full = smart2_fifo_full,
38575+ .intr_pending = smart2_intr_pending,
38576+ .command_completed = smart2_completed,
38577 };
38578
38579 /*
38580@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38581 }
38582
38583 static struct access_method smart2e_access = {
38584- smart2e_submit_command,
38585- smart2e_intr_mask,
38586- smart2e_fifo_full,
38587- smart2e_intr_pending,
38588- smart2e_completed,
38589+ .submit_command = smart2e_submit_command,
38590+ .set_intr_mask = smart2e_intr_mask,
38591+ .fifo_full = smart2e_fifo_full,
38592+ .intr_pending = smart2e_intr_pending,
38593+ .command_completed = smart2e_completed,
38594 };
38595
38596 /*
38597@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38598 }
38599
38600 static struct access_method smart1_access = {
38601- smart1_submit_command,
38602- smart1_intr_mask,
38603- smart1_fifo_full,
38604- smart1_intr_pending,
38605- smart1_completed,
38606+ .submit_command = smart1_submit_command,
38607+ .set_intr_mask = smart1_intr_mask,
38608+ .fifo_full = smart1_fifo_full,
38609+ .intr_pending = smart1_intr_pending,
38610+ .command_completed = smart1_completed,
38611 };
38612diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38613index 55c135b..9f8d60c 100644
38614--- a/drivers/bluetooth/btwilink.c
38615+++ b/drivers/bluetooth/btwilink.c
38616@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38617
38618 static int bt_ti_probe(struct platform_device *pdev)
38619 {
38620- static struct ti_st *hst;
38621+ struct ti_st *hst;
38622 struct hci_dev *hdev;
38623 int err;
38624
38625diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38626index 5d28a45..a538f90 100644
38627--- a/drivers/cdrom/cdrom.c
38628+++ b/drivers/cdrom/cdrom.c
38629@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38630 ENSURE(reset, CDC_RESET);
38631 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38632 cdi->mc_flags = 0;
38633- cdo->n_minors = 0;
38634 cdi->options = CDO_USE_FFLAGS;
38635
38636 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38637@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38638 else
38639 cdi->cdda_method = CDDA_OLD;
38640
38641- if (!cdo->generic_packet)
38642- cdo->generic_packet = cdrom_dummy_generic_packet;
38643+ if (!cdo->generic_packet) {
38644+ pax_open_kernel();
38645+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38646+ pax_close_kernel();
38647+ }
38648
38649 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38650 mutex_lock(&cdrom_mutex);
38651@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38652 if (cdi->exit)
38653 cdi->exit(cdi);
38654
38655- cdi->ops->n_minors--;
38656 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38657 }
38658
38659@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38660 */
38661 nr = nframes;
38662 do {
38663- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38664+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38665 if (cgc.buffer)
38666 break;
38667
38668@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38669 struct cdrom_device_info *cdi;
38670 int ret;
38671
38672- ret = scnprintf(info + *pos, max_size - *pos, header);
38673+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38674 if (!ret)
38675 return 1;
38676
38677diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38678index 584bc31..e64a12c 100644
38679--- a/drivers/cdrom/gdrom.c
38680+++ b/drivers/cdrom/gdrom.c
38681@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38682 .audio_ioctl = gdrom_audio_ioctl,
38683 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38684 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38685- .n_minors = 1,
38686 };
38687
38688 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38689diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38690index a4af822..ed58cd1 100644
38691--- a/drivers/char/Kconfig
38692+++ b/drivers/char/Kconfig
38693@@ -17,7 +17,8 @@ config DEVMEM
38694
38695 config DEVKMEM
38696 bool "/dev/kmem virtual device support"
38697- default y
38698+ default n
38699+ depends on !GRKERNSEC_KMEM
38700 help
38701 Say Y here if you want to support the /dev/kmem device. The
38702 /dev/kmem device is rarely used, but can be used for certain
38703@@ -586,6 +587,7 @@ config DEVPORT
38704 bool
38705 depends on !M68K
38706 depends on ISA || PCI
38707+ depends on !GRKERNSEC_KMEM
38708 default y
38709
38710 source "drivers/s390/char/Kconfig"
38711diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38712index a48e05b..6bac831 100644
38713--- a/drivers/char/agp/compat_ioctl.c
38714+++ b/drivers/char/agp/compat_ioctl.c
38715@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38716 return -ENOMEM;
38717 }
38718
38719- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38720+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38721 sizeof(*usegment) * ureserve.seg_count)) {
38722 kfree(usegment);
38723 kfree(ksegment);
38724diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38725index 09f17eb..8531d2f 100644
38726--- a/drivers/char/agp/frontend.c
38727+++ b/drivers/char/agp/frontend.c
38728@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38729 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38730 return -EFAULT;
38731
38732- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38733+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38734 return -EFAULT;
38735
38736 client = agp_find_client_by_pid(reserve.pid);
38737@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38738 if (segment == NULL)
38739 return -ENOMEM;
38740
38741- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38742+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38743 sizeof(struct agp_segment) * reserve.seg_count)) {
38744 kfree(segment);
38745 return -EFAULT;
38746diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38747index 4f94375..413694e 100644
38748--- a/drivers/char/genrtc.c
38749+++ b/drivers/char/genrtc.c
38750@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38751 switch (cmd) {
38752
38753 case RTC_PLL_GET:
38754+ memset(&pll, 0, sizeof(pll));
38755 if (get_rtc_pll(&pll))
38756 return -EINVAL;
38757 else
38758diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38759index 5c0baa9..44011b1 100644
38760--- a/drivers/char/hpet.c
38761+++ b/drivers/char/hpet.c
38762@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38763 }
38764
38765 static int
38766-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38767+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38768 struct hpet_info *info)
38769 {
38770 struct hpet_timer __iomem *timer;
38771diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38772index 24cc4ed..f9807cf 100644
38773--- a/drivers/char/i8k.c
38774+++ b/drivers/char/i8k.c
38775@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38776 },
38777 };
38778
38779-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38780+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38781 {
38782 .ident = "Dell Inspiron",
38783 .matches = {
38784diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38785index 9bb5928..57a7801 100644
38786--- a/drivers/char/ipmi/ipmi_msghandler.c
38787+++ b/drivers/char/ipmi/ipmi_msghandler.c
38788@@ -436,7 +436,7 @@ struct ipmi_smi {
38789 struct proc_dir_entry *proc_dir;
38790 char proc_dir_name[10];
38791
38792- atomic_t stats[IPMI_NUM_STATS];
38793+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38794
38795 /*
38796 * run_to_completion duplicate of smb_info, smi_info
38797@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38798 static DEFINE_MUTEX(smi_watchers_mutex);
38799
38800 #define ipmi_inc_stat(intf, stat) \
38801- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38802+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38803 #define ipmi_get_stat(intf, stat) \
38804- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38805+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38806
38807 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38808 "ACPI", "SMBIOS", "PCI",
38809@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38810 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38811 init_waitqueue_head(&intf->waitq);
38812 for (i = 0; i < IPMI_NUM_STATS; i++)
38813- atomic_set(&intf->stats[i], 0);
38814+ atomic_set_unchecked(&intf->stats[i], 0);
38815
38816 intf->proc_dir = NULL;
38817
38818diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38819index 518585c..6c985cef 100644
38820--- a/drivers/char/ipmi/ipmi_si_intf.c
38821+++ b/drivers/char/ipmi/ipmi_si_intf.c
38822@@ -289,7 +289,7 @@ struct smi_info {
38823 unsigned char slave_addr;
38824
38825 /* Counters and things for the proc filesystem. */
38826- atomic_t stats[SI_NUM_STATS];
38827+ atomic_unchecked_t stats[SI_NUM_STATS];
38828
38829 struct task_struct *thread;
38830
38831@@ -298,9 +298,9 @@ struct smi_info {
38832 };
38833
38834 #define smi_inc_stat(smi, stat) \
38835- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38836+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38837 #define smi_get_stat(smi, stat) \
38838- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38839+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38840
38841 #define SI_MAX_PARMS 4
38842
38843@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38844 atomic_set(&new_smi->req_events, 0);
38845 new_smi->run_to_completion = false;
38846 for (i = 0; i < SI_NUM_STATS; i++)
38847- atomic_set(&new_smi->stats[i], 0);
38848+ atomic_set_unchecked(&new_smi->stats[i], 0);
38849
38850 new_smi->interrupt_disabled = true;
38851 atomic_set(&new_smi->need_watch, 0);
38852diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38853index 297110c..3f69b43 100644
38854--- a/drivers/char/mem.c
38855+++ b/drivers/char/mem.c
38856@@ -18,6 +18,7 @@
38857 #include <linux/raw.h>
38858 #include <linux/tty.h>
38859 #include <linux/capability.h>
38860+#include <linux/security.h>
38861 #include <linux/ptrace.h>
38862 #include <linux/device.h>
38863 #include <linux/highmem.h>
38864@@ -36,6 +37,10 @@
38865
38866 #define DEVPORT_MINOR 4
38867
38868+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38869+extern const struct file_operations grsec_fops;
38870+#endif
38871+
38872 static inline unsigned long size_inside_page(unsigned long start,
38873 unsigned long size)
38874 {
38875@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38876
38877 while (cursor < to) {
38878 if (!devmem_is_allowed(pfn)) {
38879+#ifdef CONFIG_GRKERNSEC_KMEM
38880+ gr_handle_mem_readwrite(from, to);
38881+#else
38882 printk(KERN_INFO
38883 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38884 current->comm, from, to);
38885+#endif
38886 return 0;
38887 }
38888 cursor += PAGE_SIZE;
38889@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38890 }
38891 return 1;
38892 }
38893+#elif defined(CONFIG_GRKERNSEC_KMEM)
38894+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38895+{
38896+ return 0;
38897+}
38898 #else
38899 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38900 {
38901@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38902 #endif
38903
38904 while (count > 0) {
38905- unsigned long remaining;
38906+ unsigned long remaining = 0;
38907+ char *temp;
38908
38909 sz = size_inside_page(p, count);
38910
38911@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38912 if (!ptr)
38913 return -EFAULT;
38914
38915- remaining = copy_to_user(buf, ptr, sz);
38916+#ifdef CONFIG_PAX_USERCOPY
38917+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38918+ if (!temp) {
38919+ unxlate_dev_mem_ptr(p, ptr);
38920+ return -ENOMEM;
38921+ }
38922+ remaining = probe_kernel_read(temp, ptr, sz);
38923+#else
38924+ temp = ptr;
38925+#endif
38926+
38927+ if (!remaining)
38928+ remaining = copy_to_user(buf, temp, sz);
38929+
38930+#ifdef CONFIG_PAX_USERCOPY
38931+ kfree(temp);
38932+#endif
38933+
38934 unxlate_dev_mem_ptr(p, ptr);
38935 if (remaining)
38936 return -EFAULT;
38937@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38938 size_t count, loff_t *ppos)
38939 {
38940 unsigned long p = *ppos;
38941- ssize_t low_count, read, sz;
38942+ ssize_t low_count, read, sz, err = 0;
38943 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38944- int err = 0;
38945
38946 read = 0;
38947 if (p < (unsigned long) high_memory) {
38948@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38949 }
38950 #endif
38951 while (low_count > 0) {
38952+ char *temp;
38953+
38954 sz = size_inside_page(p, low_count);
38955
38956 /*
38957@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38958 */
38959 kbuf = xlate_dev_kmem_ptr((void *)p);
38960
38961- if (copy_to_user(buf, kbuf, sz))
38962+#ifdef CONFIG_PAX_USERCOPY
38963+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38964+ if (!temp)
38965+ return -ENOMEM;
38966+ err = probe_kernel_read(temp, kbuf, sz);
38967+#else
38968+ temp = kbuf;
38969+#endif
38970+
38971+ if (!err)
38972+ err = copy_to_user(buf, temp, sz);
38973+
38974+#ifdef CONFIG_PAX_USERCOPY
38975+ kfree(temp);
38976+#endif
38977+
38978+ if (err)
38979 return -EFAULT;
38980 buf += sz;
38981 p += sz;
38982@@ -804,6 +853,9 @@ static const struct memdev {
38983 #ifdef CONFIG_PRINTK
38984 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
38985 #endif
38986+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38987+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
38988+#endif
38989 };
38990
38991 static int memory_open(struct inode *inode, struct file *filp)
38992@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
38993 continue;
38994
38995 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38996- NULL, devlist[minor].name);
38997+ NULL, "%s", devlist[minor].name);
38998 }
38999
39000 return tty_init();
39001diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39002index 9df78e2..01ba9ae 100644
39003--- a/drivers/char/nvram.c
39004+++ b/drivers/char/nvram.c
39005@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39006
39007 spin_unlock_irq(&rtc_lock);
39008
39009- if (copy_to_user(buf, contents, tmp - contents))
39010+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39011 return -EFAULT;
39012
39013 *ppos = i;
39014diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39015index 0ea9986..e7b07e4 100644
39016--- a/drivers/char/pcmcia/synclink_cs.c
39017+++ b/drivers/char/pcmcia/synclink_cs.c
39018@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39019
39020 if (debug_level >= DEBUG_LEVEL_INFO)
39021 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39022- __FILE__, __LINE__, info->device_name, port->count);
39023+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39024
39025 if (tty_port_close_start(port, tty, filp) == 0)
39026 goto cleanup;
39027@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39028 cleanup:
39029 if (debug_level >= DEBUG_LEVEL_INFO)
39030 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39031- tty->driver->name, port->count);
39032+ tty->driver->name, atomic_read(&port->count));
39033 }
39034
39035 /* Wait until the transmitter is empty.
39036@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39037
39038 if (debug_level >= DEBUG_LEVEL_INFO)
39039 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39040- __FILE__, __LINE__, tty->driver->name, port->count);
39041+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39042
39043 /* If port is closing, signal caller to try again */
39044 if (port->flags & ASYNC_CLOSING){
39045@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39046 goto cleanup;
39047 }
39048 spin_lock(&port->lock);
39049- port->count++;
39050+ atomic_inc(&port->count);
39051 spin_unlock(&port->lock);
39052 spin_unlock_irqrestore(&info->netlock, flags);
39053
39054- if (port->count == 1) {
39055+ if (atomic_read(&port->count) == 1) {
39056 /* 1st open on this device, init hardware */
39057 retval = startup(info, tty);
39058 if (retval < 0)
39059@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39060 unsigned short new_crctype;
39061
39062 /* return error if TTY interface open */
39063- if (info->port.count)
39064+ if (atomic_read(&info->port.count))
39065 return -EBUSY;
39066
39067 switch (encoding)
39068@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39069
39070 /* arbitrate between network and tty opens */
39071 spin_lock_irqsave(&info->netlock, flags);
39072- if (info->port.count != 0 || info->netcount != 0) {
39073+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39074 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39075 spin_unlock_irqrestore(&info->netlock, flags);
39076 return -EBUSY;
39077@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39078 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39079
39080 /* return error if TTY interface open */
39081- if (info->port.count)
39082+ if (atomic_read(&info->port.count))
39083 return -EBUSY;
39084
39085 if (cmd != SIOCWANDEV)
39086diff --git a/drivers/char/random.c b/drivers/char/random.c
39087index 9cd6968..6416f00 100644
39088--- a/drivers/char/random.c
39089+++ b/drivers/char/random.c
39090@@ -289,9 +289,6 @@
39091 /*
39092 * To allow fractional bits to be tracked, the entropy_count field is
39093 * denominated in units of 1/8th bits.
39094- *
39095- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39096- * credit_entropy_bits() needs to be 64 bits wide.
39097 */
39098 #define ENTROPY_SHIFT 3
39099 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39100@@ -439,9 +436,9 @@ struct entropy_store {
39101 };
39102
39103 static void push_to_pool(struct work_struct *work);
39104-static __u32 input_pool_data[INPUT_POOL_WORDS];
39105-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39106-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39107+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39108+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39109+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39110
39111 static struct entropy_store input_pool = {
39112 .poolinfo = &poolinfo_table[0],
39113@@ -635,7 +632,7 @@ retry:
39114 /* The +2 corresponds to the /4 in the denominator */
39115
39116 do {
39117- unsigned int anfrac = min(pnfrac, pool_size/2);
39118+ u64 anfrac = min(pnfrac, pool_size/2);
39119 unsigned int add =
39120 ((pool_size - entropy_count)*anfrac*3) >> s;
39121
39122@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39123
39124 extract_buf(r, tmp);
39125 i = min_t(int, nbytes, EXTRACT_SIZE);
39126- if (copy_to_user(buf, tmp, i)) {
39127+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39128 ret = -EFAULT;
39129 break;
39130 }
39131@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39132 static int proc_do_uuid(struct ctl_table *table, int write,
39133 void __user *buffer, size_t *lenp, loff_t *ppos)
39134 {
39135- struct ctl_table fake_table;
39136+ ctl_table_no_const fake_table;
39137 unsigned char buf[64], tmp_uuid[16], *uuid;
39138
39139 uuid = table->data;
39140@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39141 static int proc_do_entropy(struct ctl_table *table, int write,
39142 void __user *buffer, size_t *lenp, loff_t *ppos)
39143 {
39144- struct ctl_table fake_table;
39145+ ctl_table_no_const fake_table;
39146 int entropy_count;
39147
39148 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39149diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39150index e496dae..3db53b6 100644
39151--- a/drivers/char/sonypi.c
39152+++ b/drivers/char/sonypi.c
39153@@ -54,6 +54,7 @@
39154
39155 #include <asm/uaccess.h>
39156 #include <asm/io.h>
39157+#include <asm/local.h>
39158
39159 #include <linux/sonypi.h>
39160
39161@@ -490,7 +491,7 @@ static struct sonypi_device {
39162 spinlock_t fifo_lock;
39163 wait_queue_head_t fifo_proc_list;
39164 struct fasync_struct *fifo_async;
39165- int open_count;
39166+ local_t open_count;
39167 int model;
39168 struct input_dev *input_jog_dev;
39169 struct input_dev *input_key_dev;
39170@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39171 static int sonypi_misc_release(struct inode *inode, struct file *file)
39172 {
39173 mutex_lock(&sonypi_device.lock);
39174- sonypi_device.open_count--;
39175+ local_dec(&sonypi_device.open_count);
39176 mutex_unlock(&sonypi_device.lock);
39177 return 0;
39178 }
39179@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39180 {
39181 mutex_lock(&sonypi_device.lock);
39182 /* Flush input queue on first open */
39183- if (!sonypi_device.open_count)
39184+ if (!local_read(&sonypi_device.open_count))
39185 kfifo_reset(&sonypi_device.fifo);
39186- sonypi_device.open_count++;
39187+ local_inc(&sonypi_device.open_count);
39188 mutex_unlock(&sonypi_device.lock);
39189
39190 return 0;
39191@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39192
39193 static struct platform_device *sonypi_platform_device;
39194
39195-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39196+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39197 {
39198 .ident = "Sony Vaio",
39199 .matches = {
39200diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39201index 565a947..dcdc06e 100644
39202--- a/drivers/char/tpm/tpm_acpi.c
39203+++ b/drivers/char/tpm/tpm_acpi.c
39204@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39205 virt = acpi_os_map_iomem(start, len);
39206 if (!virt) {
39207 kfree(log->bios_event_log);
39208+ log->bios_event_log = NULL;
39209 printk("%s: ERROR - Unable to map memory\n", __func__);
39210 return -EIO;
39211 }
39212
39213- memcpy_fromio(log->bios_event_log, virt, len);
39214+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39215
39216 acpi_os_unmap_iomem(virt, len);
39217 return 0;
39218diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39219index 3a56a13..f8cbd25 100644
39220--- a/drivers/char/tpm/tpm_eventlog.c
39221+++ b/drivers/char/tpm/tpm_eventlog.c
39222@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39223 event = addr;
39224
39225 if ((event->event_type == 0 && event->event_size == 0) ||
39226- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39227+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39228 return NULL;
39229
39230 return addr;
39231@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39232 return NULL;
39233
39234 if ((event->event_type == 0 && event->event_size == 0) ||
39235- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39236+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39237 return NULL;
39238
39239 (*pos)++;
39240@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39241 int i;
39242
39243 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39244- seq_putc(m, data[i]);
39245+ if (!seq_putc(m, data[i]))
39246+ return -EFAULT;
39247
39248 return 0;
39249 }
39250diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39251index 72d7028..1586601 100644
39252--- a/drivers/char/virtio_console.c
39253+++ b/drivers/char/virtio_console.c
39254@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39255 if (to_user) {
39256 ssize_t ret;
39257
39258- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39259+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39260 if (ret)
39261 return -EFAULT;
39262 } else {
39263@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39264 if (!port_has_data(port) && !port->host_connected)
39265 return 0;
39266
39267- return fill_readbuf(port, ubuf, count, true);
39268+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39269 }
39270
39271 static int wait_port_writable(struct port *port, bool nonblock)
39272diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39273index 956b7e5..b655045 100644
39274--- a/drivers/clk/clk-composite.c
39275+++ b/drivers/clk/clk-composite.c
39276@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39277 struct clk *clk;
39278 struct clk_init_data init;
39279 struct clk_composite *composite;
39280- struct clk_ops *clk_composite_ops;
39281+ clk_ops_no_const *clk_composite_ops;
39282
39283 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39284 if (!composite) {
39285diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39286index dd3a78c..386d49c 100644
39287--- a/drivers/clk/socfpga/clk-gate.c
39288+++ b/drivers/clk/socfpga/clk-gate.c
39289@@ -22,6 +22,7 @@
39290 #include <linux/mfd/syscon.h>
39291 #include <linux/of.h>
39292 #include <linux/regmap.h>
39293+#include <asm/pgtable.h>
39294
39295 #include "clk.h"
39296
39297@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39298 return 0;
39299 }
39300
39301-static struct clk_ops gateclk_ops = {
39302+static clk_ops_no_const gateclk_ops __read_only = {
39303 .prepare = socfpga_clk_prepare,
39304 .recalc_rate = socfpga_clk_recalc_rate,
39305 .get_parent = socfpga_clk_get_parent,
39306@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39307 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39308 socfpga_clk->hw.bit_idx = clk_gate[1];
39309
39310- gateclk_ops.enable = clk_gate_ops.enable;
39311- gateclk_ops.disable = clk_gate_ops.disable;
39312+ pax_open_kernel();
39313+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39314+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39315+ pax_close_kernel();
39316 }
39317
39318 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39319diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39320index de6da95..c98278b 100644
39321--- a/drivers/clk/socfpga/clk-pll.c
39322+++ b/drivers/clk/socfpga/clk-pll.c
39323@@ -21,6 +21,7 @@
39324 #include <linux/io.h>
39325 #include <linux/of.h>
39326 #include <linux/of_address.h>
39327+#include <asm/pgtable.h>
39328
39329 #include "clk.h"
39330
39331@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39332 CLK_MGR_PLL_CLK_SRC_MASK;
39333 }
39334
39335-static struct clk_ops clk_pll_ops = {
39336+static clk_ops_no_const clk_pll_ops __read_only = {
39337 .recalc_rate = clk_pll_recalc_rate,
39338 .get_parent = clk_pll_get_parent,
39339 };
39340@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39341 pll_clk->hw.hw.init = &init;
39342
39343 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39344- clk_pll_ops.enable = clk_gate_ops.enable;
39345- clk_pll_ops.disable = clk_gate_ops.disable;
39346+ pax_open_kernel();
39347+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39348+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39349+ pax_close_kernel();
39350
39351 clk = clk_register(NULL, &pll_clk->hw.hw);
39352 if (WARN_ON(IS_ERR(clk))) {
39353diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39354index b0c18ed..1713a80 100644
39355--- a/drivers/cpufreq/acpi-cpufreq.c
39356+++ b/drivers/cpufreq/acpi-cpufreq.c
39357@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39358 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39359 per_cpu(acfreq_data, cpu) = data;
39360
39361- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39362- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39363+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39364+ pax_open_kernel();
39365+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39366+ pax_close_kernel();
39367+ }
39368
39369 result = acpi_processor_register_performance(data->acpi_data, cpu);
39370 if (result)
39371@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39372 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39373 break;
39374 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39375- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39376+ pax_open_kernel();
39377+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39378+ pax_close_kernel();
39379 break;
39380 default:
39381 break;
39382@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39383 if (!msrs)
39384 return;
39385
39386- acpi_cpufreq_driver.boost_supported = true;
39387- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39388+ pax_open_kernel();
39389+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39390+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39391+ pax_close_kernel();
39392
39393 cpu_notifier_register_begin();
39394
39395diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39396index bab67db..91af7e3 100644
39397--- a/drivers/cpufreq/cpufreq-dt.c
39398+++ b/drivers/cpufreq/cpufreq-dt.c
39399@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39400 if (!IS_ERR(cpu_reg))
39401 regulator_put(cpu_reg);
39402
39403- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39404+ pax_open_kernel();
39405+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39406+ pax_close_kernel();
39407
39408 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39409 if (ret)
39410diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39411index 8ae655c..3141442 100644
39412--- a/drivers/cpufreq/cpufreq.c
39413+++ b/drivers/cpufreq/cpufreq.c
39414@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39415 }
39416
39417 mutex_lock(&cpufreq_governor_mutex);
39418- list_del(&governor->governor_list);
39419+ pax_list_del(&governor->governor_list);
39420 mutex_unlock(&cpufreq_governor_mutex);
39421 return;
39422 }
39423@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39424 return NOTIFY_OK;
39425 }
39426
39427-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39428+static struct notifier_block cpufreq_cpu_notifier = {
39429 .notifier_call = cpufreq_cpu_callback,
39430 };
39431
39432@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39433 return 0;
39434
39435 write_lock_irqsave(&cpufreq_driver_lock, flags);
39436- cpufreq_driver->boost_enabled = state;
39437+ pax_open_kernel();
39438+ *(bool *)&cpufreq_driver->boost_enabled = state;
39439+ pax_close_kernel();
39440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39441
39442 ret = cpufreq_driver->set_boost(state);
39443 if (ret) {
39444 write_lock_irqsave(&cpufreq_driver_lock, flags);
39445- cpufreq_driver->boost_enabled = !state;
39446+ pax_open_kernel();
39447+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39448+ pax_close_kernel();
39449 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39450
39451 pr_err("%s: Cannot %s BOOST\n",
39452@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39453 cpufreq_driver = driver_data;
39454 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39455
39456- if (driver_data->setpolicy)
39457- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39458+ if (driver_data->setpolicy) {
39459+ pax_open_kernel();
39460+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39461+ pax_close_kernel();
39462+ }
39463
39464 if (cpufreq_boost_supported()) {
39465 /*
39466 * Check if driver provides function to enable boost -
39467 * if not, use cpufreq_boost_set_sw as default
39468 */
39469- if (!cpufreq_driver->set_boost)
39470- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39471+ if (!cpufreq_driver->set_boost) {
39472+ pax_open_kernel();
39473+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39474+ pax_close_kernel();
39475+ }
39476
39477 ret = cpufreq_sysfs_create_file(&boost.attr);
39478 if (ret) {
39479diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39480index 1b44496..b80ff5e 100644
39481--- a/drivers/cpufreq/cpufreq_governor.c
39482+++ b/drivers/cpufreq/cpufreq_governor.c
39483@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39484 struct dbs_data *dbs_data;
39485 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39486 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39487- struct od_ops *od_ops = NULL;
39488+ const struct od_ops *od_ops = NULL;
39489 struct od_dbs_tuners *od_tuners = NULL;
39490 struct cs_dbs_tuners *cs_tuners = NULL;
39491 struct cpu_dbs_common_info *cpu_cdbs;
39492@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39493
39494 if ((cdata->governor == GOV_CONSERVATIVE) &&
39495 (!policy->governor->initialized)) {
39496- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39497+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39498
39499 cpufreq_register_notifier(cs_ops->notifier_block,
39500 CPUFREQ_TRANSITION_NOTIFIER);
39501@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39502
39503 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39504 (policy->governor->initialized == 1)) {
39505- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39506+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39507
39508 cpufreq_unregister_notifier(cs_ops->notifier_block,
39509 CPUFREQ_TRANSITION_NOTIFIER);
39510diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39511index cc401d1..8197340 100644
39512--- a/drivers/cpufreq/cpufreq_governor.h
39513+++ b/drivers/cpufreq/cpufreq_governor.h
39514@@ -212,7 +212,7 @@ struct common_dbs_data {
39515 void (*exit)(struct dbs_data *dbs_data);
39516
39517 /* Governor specific ops, see below */
39518- void *gov_ops;
39519+ const void *gov_ops;
39520 };
39521
39522 /* Governor Per policy data */
39523@@ -232,7 +232,7 @@ struct od_ops {
39524 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39525 unsigned int freq_next, unsigned int relation);
39526 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39527-};
39528+} __no_const;
39529
39530 struct cs_ops {
39531 struct notifier_block *notifier_block;
39532diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39533index ad3f38f..8f086cd 100644
39534--- a/drivers/cpufreq/cpufreq_ondemand.c
39535+++ b/drivers/cpufreq/cpufreq_ondemand.c
39536@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39537
39538 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39539
39540-static struct od_ops od_ops = {
39541+static struct od_ops od_ops __read_only = {
39542 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39543 .powersave_bias_target = generic_powersave_bias_target,
39544 .freq_increase = dbs_freq_increase,
39545@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39546 (struct cpufreq_policy *, unsigned int, unsigned int),
39547 unsigned int powersave_bias)
39548 {
39549- od_ops.powersave_bias_target = f;
39550+ pax_open_kernel();
39551+ *(void **)&od_ops.powersave_bias_target = f;
39552+ pax_close_kernel();
39553 od_set_powersave_bias(powersave_bias);
39554 }
39555 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39556
39557 void od_unregister_powersave_bias_handler(void)
39558 {
39559- od_ops.powersave_bias_target = generic_powersave_bias_target;
39560+ pax_open_kernel();
39561+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39562+ pax_close_kernel();
39563 od_set_powersave_bias(0);
39564 }
39565 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39566diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39567index 872c577..5fb3c20 100644
39568--- a/drivers/cpufreq/intel_pstate.c
39569+++ b/drivers/cpufreq/intel_pstate.c
39570@@ -133,10 +133,10 @@ struct pstate_funcs {
39571 struct cpu_defaults {
39572 struct pstate_adjust_policy pid_policy;
39573 struct pstate_funcs funcs;
39574-};
39575+} __do_const;
39576
39577 static struct pstate_adjust_policy pid_params;
39578-static struct pstate_funcs pstate_funcs;
39579+static struct pstate_funcs *pstate_funcs;
39580 static int hwp_active;
39581
39582 struct perf_limits {
39583@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39584
39585 cpu->pstate.current_pstate = pstate;
39586
39587- pstate_funcs.set(cpu, pstate);
39588+ pstate_funcs->set(cpu, pstate);
39589 }
39590
39591 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39592 {
39593- cpu->pstate.min_pstate = pstate_funcs.get_min();
39594- cpu->pstate.max_pstate = pstate_funcs.get_max();
39595- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39596- cpu->pstate.scaling = pstate_funcs.get_scaling();
39597+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39598+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39599+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39600+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39601
39602- if (pstate_funcs.get_vid)
39603- pstate_funcs.get_vid(cpu);
39604+ if (pstate_funcs->get_vid)
39605+ pstate_funcs->get_vid(cpu);
39606 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39607 }
39608
39609@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39610 rdmsrl(MSR_IA32_APERF, aperf);
39611 rdmsrl(MSR_IA32_MPERF, mperf);
39612
39613- if (!pstate_funcs.get_max() ||
39614- !pstate_funcs.get_min() ||
39615- !pstate_funcs.get_turbo())
39616+ if (!pstate_funcs->get_max() ||
39617+ !pstate_funcs->get_min() ||
39618+ !pstate_funcs->get_turbo())
39619 return -ENODEV;
39620
39621 rdmsrl(MSR_IA32_APERF, tmp);
39622@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39623 return 0;
39624 }
39625
39626-static void copy_pid_params(struct pstate_adjust_policy *policy)
39627+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39628 {
39629 pid_params.sample_rate_ms = policy->sample_rate_ms;
39630 pid_params.p_gain_pct = policy->p_gain_pct;
39631@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39632
39633 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39634 {
39635- pstate_funcs.get_max = funcs->get_max;
39636- pstate_funcs.get_min = funcs->get_min;
39637- pstate_funcs.get_turbo = funcs->get_turbo;
39638- pstate_funcs.get_scaling = funcs->get_scaling;
39639- pstate_funcs.set = funcs->set;
39640- pstate_funcs.get_vid = funcs->get_vid;
39641+ pstate_funcs = funcs;
39642 }
39643
39644 #if IS_ENABLED(CONFIG_ACPI)
39645diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39646index 529cfd9..0e28fff 100644
39647--- a/drivers/cpufreq/p4-clockmod.c
39648+++ b/drivers/cpufreq/p4-clockmod.c
39649@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39650 case 0x0F: /* Core Duo */
39651 case 0x16: /* Celeron Core */
39652 case 0x1C: /* Atom */
39653- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39654+ pax_open_kernel();
39655+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39656+ pax_close_kernel();
39657 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39658 case 0x0D: /* Pentium M (Dothan) */
39659- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39660+ pax_open_kernel();
39661+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39662+ pax_close_kernel();
39663 /* fall through */
39664 case 0x09: /* Pentium M (Banias) */
39665 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39666@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39667
39668 /* on P-4s, the TSC runs with constant frequency independent whether
39669 * throttling is active or not. */
39670- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39671+ pax_open_kernel();
39672+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39673+ pax_close_kernel();
39674
39675 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39676 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39677diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39678index 9bb42ba..b01b4a2 100644
39679--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39680+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39681@@ -18,14 +18,12 @@
39682 #include <asm/head.h>
39683 #include <asm/timer.h>
39684
39685-static struct cpufreq_driver *cpufreq_us3_driver;
39686-
39687 struct us3_freq_percpu_info {
39688 struct cpufreq_frequency_table table[4];
39689 };
39690
39691 /* Indexed by cpu number. */
39692-static struct us3_freq_percpu_info *us3_freq_table;
39693+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39694
39695 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39696 * in the Safari config register.
39697@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39698
39699 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39700 {
39701- if (cpufreq_us3_driver)
39702- us3_freq_target(policy, 0);
39703+ us3_freq_target(policy, 0);
39704
39705 return 0;
39706 }
39707
39708+static int __init us3_freq_init(void);
39709+static void __exit us3_freq_exit(void);
39710+
39711+static struct cpufreq_driver cpufreq_us3_driver = {
39712+ .init = us3_freq_cpu_init,
39713+ .verify = cpufreq_generic_frequency_table_verify,
39714+ .target_index = us3_freq_target,
39715+ .get = us3_freq_get,
39716+ .exit = us3_freq_cpu_exit,
39717+ .name = "UltraSPARC-III",
39718+
39719+};
39720+
39721 static int __init us3_freq_init(void)
39722 {
39723 unsigned long manuf, impl, ver;
39724- int ret;
39725
39726 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39727 return -ENODEV;
39728@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39729 (impl == CHEETAH_IMPL ||
39730 impl == CHEETAH_PLUS_IMPL ||
39731 impl == JAGUAR_IMPL ||
39732- impl == PANTHER_IMPL)) {
39733- struct cpufreq_driver *driver;
39734-
39735- ret = -ENOMEM;
39736- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39737- if (!driver)
39738- goto err_out;
39739-
39740- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39741- GFP_KERNEL);
39742- if (!us3_freq_table)
39743- goto err_out;
39744-
39745- driver->init = us3_freq_cpu_init;
39746- driver->verify = cpufreq_generic_frequency_table_verify;
39747- driver->target_index = us3_freq_target;
39748- driver->get = us3_freq_get;
39749- driver->exit = us3_freq_cpu_exit;
39750- strcpy(driver->name, "UltraSPARC-III");
39751-
39752- cpufreq_us3_driver = driver;
39753- ret = cpufreq_register_driver(driver);
39754- if (ret)
39755- goto err_out;
39756-
39757- return 0;
39758-
39759-err_out:
39760- if (driver) {
39761- kfree(driver);
39762- cpufreq_us3_driver = NULL;
39763- }
39764- kfree(us3_freq_table);
39765- us3_freq_table = NULL;
39766- return ret;
39767- }
39768+ impl == PANTHER_IMPL))
39769+ return cpufreq_register_driver(&cpufreq_us3_driver);
39770
39771 return -ENODEV;
39772 }
39773
39774 static void __exit us3_freq_exit(void)
39775 {
39776- if (cpufreq_us3_driver) {
39777- cpufreq_unregister_driver(cpufreq_us3_driver);
39778- kfree(cpufreq_us3_driver);
39779- cpufreq_us3_driver = NULL;
39780- kfree(us3_freq_table);
39781- us3_freq_table = NULL;
39782- }
39783+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39784 }
39785
39786 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39787diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39788index 7d4a315..21bb886 100644
39789--- a/drivers/cpufreq/speedstep-centrino.c
39790+++ b/drivers/cpufreq/speedstep-centrino.c
39791@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39792 !cpu_has(cpu, X86_FEATURE_EST))
39793 return -ENODEV;
39794
39795- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39796- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39797+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39798+ pax_open_kernel();
39799+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39800+ pax_close_kernel();
39801+ }
39802
39803 if (policy->cpu != 0)
39804 return -ENODEV;
39805diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39806index 2697e87..c32476c 100644
39807--- a/drivers/cpuidle/driver.c
39808+++ b/drivers/cpuidle/driver.c
39809@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39810
39811 static void poll_idle_init(struct cpuidle_driver *drv)
39812 {
39813- struct cpuidle_state *state = &drv->states[0];
39814+ cpuidle_state_no_const *state = &drv->states[0];
39815
39816 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39817 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39818diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39819index fb9f511..213e6cc 100644
39820--- a/drivers/cpuidle/governor.c
39821+++ b/drivers/cpuidle/governor.c
39822@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39823 mutex_lock(&cpuidle_lock);
39824 if (__cpuidle_find_governor(gov->name) == NULL) {
39825 ret = 0;
39826- list_add_tail(&gov->governor_list, &cpuidle_governors);
39827+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39828 if (!cpuidle_curr_governor ||
39829 cpuidle_curr_governor->rating < gov->rating)
39830 cpuidle_switch_governor(gov);
39831diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39832index 832a2c3..1794080 100644
39833--- a/drivers/cpuidle/sysfs.c
39834+++ b/drivers/cpuidle/sysfs.c
39835@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39836 NULL
39837 };
39838
39839-static struct attribute_group cpuidle_attr_group = {
39840+static attribute_group_no_const cpuidle_attr_group = {
39841 .attrs = cpuidle_default_attrs,
39842 .name = "cpuidle",
39843 };
39844diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39845index 8d2a772..33826c9 100644
39846--- a/drivers/crypto/hifn_795x.c
39847+++ b/drivers/crypto/hifn_795x.c
39848@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39849 MODULE_PARM_DESC(hifn_pll_ref,
39850 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39851
39852-static atomic_t hifn_dev_number;
39853+static atomic_unchecked_t hifn_dev_number;
39854
39855 #define ACRYPTO_OP_DECRYPT 0
39856 #define ACRYPTO_OP_ENCRYPT 1
39857@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39858 goto err_out_disable_pci_device;
39859
39860 snprintf(name, sizeof(name), "hifn%d",
39861- atomic_inc_return(&hifn_dev_number)-1);
39862+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39863
39864 err = pci_request_regions(pdev, name);
39865 if (err)
39866diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39867index 30b538d8..1610d75 100644
39868--- a/drivers/devfreq/devfreq.c
39869+++ b/drivers/devfreq/devfreq.c
39870@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39871 goto err_out;
39872 }
39873
39874- list_add(&governor->node, &devfreq_governor_list);
39875+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39876
39877 list_for_each_entry(devfreq, &devfreq_list, node) {
39878 int ret = 0;
39879@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39880 }
39881 }
39882
39883- list_del(&governor->node);
39884+ pax_list_del((struct list_head *)&governor->node);
39885 err_out:
39886 mutex_unlock(&devfreq_list_lock);
39887
39888diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39889index 8ee383d..736b5de 100644
39890--- a/drivers/dma/sh/shdma-base.c
39891+++ b/drivers/dma/sh/shdma-base.c
39892@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39893 schan->slave_id = -EINVAL;
39894 }
39895
39896- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39897- sdev->desc_size, GFP_KERNEL);
39898+ schan->desc = kcalloc(sdev->desc_size,
39899+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39900 if (!schan->desc) {
39901 ret = -ENOMEM;
39902 goto edescalloc;
39903diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39904index 9f1d4c7..fceff78 100644
39905--- a/drivers/dma/sh/shdmac.c
39906+++ b/drivers/dma/sh/shdmac.c
39907@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39908 return ret;
39909 }
39910
39911-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39912+static struct notifier_block sh_dmae_nmi_notifier = {
39913 .notifier_call = sh_dmae_nmi_handler,
39914
39915 /* Run before NMI debug handler and KGDB */
39916diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39917index 592af5f..bb1d583 100644
39918--- a/drivers/edac/edac_device.c
39919+++ b/drivers/edac/edac_device.c
39920@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39921 */
39922 int edac_device_alloc_index(void)
39923 {
39924- static atomic_t device_indexes = ATOMIC_INIT(0);
39925+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39926
39927- return atomic_inc_return(&device_indexes) - 1;
39928+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39929 }
39930 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39931
39932diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39933index c84eecb..4d7381d 100644
39934--- a/drivers/edac/edac_mc_sysfs.c
39935+++ b/drivers/edac/edac_mc_sysfs.c
39936@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39937 struct dev_ch_attribute {
39938 struct device_attribute attr;
39939 int channel;
39940-};
39941+} __do_const;
39942
39943 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39944 static struct dev_ch_attribute dev_attr_legacy_##_name = \
39945@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39946 }
39947
39948 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39949+ pax_open_kernel();
39950 if (mci->get_sdram_scrub_rate) {
39951- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39952- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39953+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39954+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39955 }
39956
39957 if (mci->set_sdram_scrub_rate) {
39958- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39959- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39960+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39961+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39962 }
39963+ pax_close_kernel();
39964
39965 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
39966 if (err) {
39967diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39968index 2cf44b4d..6dd2dc7 100644
39969--- a/drivers/edac/edac_pci.c
39970+++ b/drivers/edac/edac_pci.c
39971@@ -29,7 +29,7 @@
39972
39973 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39974 static LIST_HEAD(edac_pci_list);
39975-static atomic_t pci_indexes = ATOMIC_INIT(0);
39976+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39977
39978 /*
39979 * edac_pci_alloc_ctl_info
39980@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39981 */
39982 int edac_pci_alloc_index(void)
39983 {
39984- return atomic_inc_return(&pci_indexes) - 1;
39985+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39986 }
39987 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39988
39989diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39990index 24d877f..4e30133 100644
39991--- a/drivers/edac/edac_pci_sysfs.c
39992+++ b/drivers/edac/edac_pci_sysfs.c
39993@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39994 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39995 static int edac_pci_poll_msec = 1000; /* one second workq period */
39996
39997-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39998-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39999+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40000+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40001
40002 static struct kobject *edac_pci_top_main_kobj;
40003 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40004@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40005 void *value;
40006 ssize_t(*show) (void *, char *);
40007 ssize_t(*store) (void *, const char *, size_t);
40008-};
40009+} __do_const;
40010
40011 /* Set of show/store abstract level functions for PCI Parity object */
40012 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40013@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40014 edac_printk(KERN_CRIT, EDAC_PCI,
40015 "Signaled System Error on %s\n",
40016 pci_name(dev));
40017- atomic_inc(&pci_nonparity_count);
40018+ atomic_inc_unchecked(&pci_nonparity_count);
40019 }
40020
40021 if (status & (PCI_STATUS_PARITY)) {
40022@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40023 "Master Data Parity Error on %s\n",
40024 pci_name(dev));
40025
40026- atomic_inc(&pci_parity_count);
40027+ atomic_inc_unchecked(&pci_parity_count);
40028 }
40029
40030 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40031@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40032 "Detected Parity Error on %s\n",
40033 pci_name(dev));
40034
40035- atomic_inc(&pci_parity_count);
40036+ atomic_inc_unchecked(&pci_parity_count);
40037 }
40038 }
40039
40040@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40041 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40042 "Signaled System Error on %s\n",
40043 pci_name(dev));
40044- atomic_inc(&pci_nonparity_count);
40045+ atomic_inc_unchecked(&pci_nonparity_count);
40046 }
40047
40048 if (status & (PCI_STATUS_PARITY)) {
40049@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40050 "Master Data Parity Error on "
40051 "%s\n", pci_name(dev));
40052
40053- atomic_inc(&pci_parity_count);
40054+ atomic_inc_unchecked(&pci_parity_count);
40055 }
40056
40057 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40058@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40059 "Detected Parity Error on %s\n",
40060 pci_name(dev));
40061
40062- atomic_inc(&pci_parity_count);
40063+ atomic_inc_unchecked(&pci_parity_count);
40064 }
40065 }
40066 }
40067@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40068 if (!check_pci_errors)
40069 return;
40070
40071- before_count = atomic_read(&pci_parity_count);
40072+ before_count = atomic_read_unchecked(&pci_parity_count);
40073
40074 /* scan all PCI devices looking for a Parity Error on devices and
40075 * bridges.
40076@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40077 /* Only if operator has selected panic on PCI Error */
40078 if (edac_pci_get_panic_on_pe()) {
40079 /* If the count is different 'after' from 'before' */
40080- if (before_count != atomic_read(&pci_parity_count))
40081+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40082 panic("EDAC: PCI Parity Error");
40083 }
40084 }
40085diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40086index c2359a1..8bd119d 100644
40087--- a/drivers/edac/mce_amd.h
40088+++ b/drivers/edac/mce_amd.h
40089@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40090 bool (*mc0_mce)(u16, u8);
40091 bool (*mc1_mce)(u16, u8);
40092 bool (*mc2_mce)(u16, u8);
40093-};
40094+} __no_const;
40095
40096 void amd_report_gart_errors(bool);
40097 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40098diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40099index 57ea7f4..af06b76 100644
40100--- a/drivers/firewire/core-card.c
40101+++ b/drivers/firewire/core-card.c
40102@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40103 const struct fw_card_driver *driver,
40104 struct device *device)
40105 {
40106- static atomic_t index = ATOMIC_INIT(-1);
40107+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40108
40109- card->index = atomic_inc_return(&index);
40110+ card->index = atomic_inc_return_unchecked(&index);
40111 card->driver = driver;
40112 card->device = device;
40113 card->current_tlabel = 0;
40114@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40115
40116 void fw_core_remove_card(struct fw_card *card)
40117 {
40118- struct fw_card_driver dummy_driver = dummy_driver_template;
40119+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40120
40121 card->driver->update_phy_reg(card, 4,
40122 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40123diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40124index f9e3aee..269dbdb 100644
40125--- a/drivers/firewire/core-device.c
40126+++ b/drivers/firewire/core-device.c
40127@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40128 struct config_rom_attribute {
40129 struct device_attribute attr;
40130 u32 key;
40131-};
40132+} __do_const;
40133
40134 static ssize_t show_immediate(struct device *dev,
40135 struct device_attribute *dattr, char *buf)
40136diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40137index d6a09b9..18e90dd 100644
40138--- a/drivers/firewire/core-transaction.c
40139+++ b/drivers/firewire/core-transaction.c
40140@@ -38,6 +38,7 @@
40141 #include <linux/timer.h>
40142 #include <linux/types.h>
40143 #include <linux/workqueue.h>
40144+#include <linux/sched.h>
40145
40146 #include <asm/byteorder.h>
40147
40148diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40149index e1480ff6..1a429bd 100644
40150--- a/drivers/firewire/core.h
40151+++ b/drivers/firewire/core.h
40152@@ -111,6 +111,7 @@ struct fw_card_driver {
40153
40154 int (*stop_iso)(struct fw_iso_context *ctx);
40155 };
40156+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40157
40158 void fw_card_initialize(struct fw_card *card,
40159 const struct fw_card_driver *driver, struct device *device);
40160diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40161index f51d376..b118e40 100644
40162--- a/drivers/firewire/ohci.c
40163+++ b/drivers/firewire/ohci.c
40164@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40165 be32_to_cpu(ohci->next_header));
40166 }
40167
40168+#ifndef CONFIG_GRKERNSEC
40169 if (param_remote_dma) {
40170 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40171 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40172 }
40173+#endif
40174
40175 spin_unlock_irq(&ohci->lock);
40176
40177@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40178 unsigned long flags;
40179 int n, ret = 0;
40180
40181+#ifndef CONFIG_GRKERNSEC
40182 if (param_remote_dma)
40183 return 0;
40184+#endif
40185
40186 /*
40187 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40188diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40189index 94a58a0..f5eba42 100644
40190--- a/drivers/firmware/dmi-id.c
40191+++ b/drivers/firmware/dmi-id.c
40192@@ -16,7 +16,7 @@
40193 struct dmi_device_attribute{
40194 struct device_attribute dev_attr;
40195 int field;
40196-};
40197+} __do_const;
40198 #define to_dmi_dev_attr(_dev_attr) \
40199 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40200
40201diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40202index 2eebd28b..4261350 100644
40203--- a/drivers/firmware/dmi_scan.c
40204+++ b/drivers/firmware/dmi_scan.c
40205@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40206 if (buf == NULL)
40207 return -1;
40208
40209- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40210+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40211
40212 dmi_unmap(buf);
40213 return 0;
40214diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40215index 4fd9961..52d60ce 100644
40216--- a/drivers/firmware/efi/cper.c
40217+++ b/drivers/firmware/efi/cper.c
40218@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40219 */
40220 u64 cper_next_record_id(void)
40221 {
40222- static atomic64_t seq;
40223+ static atomic64_unchecked_t seq;
40224
40225- if (!atomic64_read(&seq))
40226- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40227+ if (!atomic64_read_unchecked(&seq))
40228+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40229
40230- return atomic64_inc_return(&seq);
40231+ return atomic64_inc_return_unchecked(&seq);
40232 }
40233 EXPORT_SYMBOL_GPL(cper_next_record_id);
40234
40235diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40236index 3061bb8..92b5fcc 100644
40237--- a/drivers/firmware/efi/efi.c
40238+++ b/drivers/firmware/efi/efi.c
40239@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40240 };
40241
40242 static struct efivars generic_efivars;
40243-static struct efivar_operations generic_ops;
40244+static efivar_operations_no_const generic_ops __read_only;
40245
40246 static int generic_ops_register(void)
40247 {
40248- generic_ops.get_variable = efi.get_variable;
40249- generic_ops.set_variable = efi.set_variable;
40250- generic_ops.get_next_variable = efi.get_next_variable;
40251- generic_ops.query_variable_store = efi_query_variable_store;
40252+ pax_open_kernel();
40253+ *(void **)&generic_ops.get_variable = efi.get_variable;
40254+ *(void **)&generic_ops.set_variable = efi.set_variable;
40255+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40256+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40257+ pax_close_kernel();
40258
40259 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40260 }
40261diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40262index 7b2e049..a253334 100644
40263--- a/drivers/firmware/efi/efivars.c
40264+++ b/drivers/firmware/efi/efivars.c
40265@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40266 static int
40267 create_efivars_bin_attributes(void)
40268 {
40269- struct bin_attribute *attr;
40270+ bin_attribute_no_const *attr;
40271 int error;
40272
40273 /* new_var */
40274diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40275index 87b8e3b..c4afb35 100644
40276--- a/drivers/firmware/efi/runtime-map.c
40277+++ b/drivers/firmware/efi/runtime-map.c
40278@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40279 kfree(entry);
40280 }
40281
40282-static struct kobj_type __refdata map_ktype = {
40283+static const struct kobj_type __refconst map_ktype = {
40284 .sysfs_ops = &map_attr_ops,
40285 .default_attrs = def_attrs,
40286 .release = map_release,
40287diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40288index f1ab05e..ab51228 100644
40289--- a/drivers/firmware/google/gsmi.c
40290+++ b/drivers/firmware/google/gsmi.c
40291@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40292 return local_hash_64(input, 32);
40293 }
40294
40295-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40296+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40297 {
40298 .ident = "Google Board",
40299 .matches = {
40300diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40301index 2f569aa..26e4f39 100644
40302--- a/drivers/firmware/google/memconsole.c
40303+++ b/drivers/firmware/google/memconsole.c
40304@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40305 return false;
40306 }
40307
40308-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40309+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40310 {
40311 .ident = "Google Board",
40312 .matches = {
40313@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40314 if (!found_memconsole())
40315 return -ENODEV;
40316
40317- memconsole_bin_attr.size = memconsole_length;
40318+ pax_open_kernel();
40319+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40320+ pax_close_kernel();
40321+
40322 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40323 }
40324
40325diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40326index cc016c61..d35279e 100644
40327--- a/drivers/firmware/memmap.c
40328+++ b/drivers/firmware/memmap.c
40329@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40330 kfree(entry);
40331 }
40332
40333-static struct kobj_type __refdata memmap_ktype = {
40334+static const struct kobj_type __refconst memmap_ktype = {
40335 .release = release_firmware_map_entry,
40336 .sysfs_ops = &memmap_attr_ops,
40337 .default_attrs = def_attrs,
40338diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40339index 3cfcfc6..09d6f117 100644
40340--- a/drivers/gpio/gpio-em.c
40341+++ b/drivers/gpio/gpio-em.c
40342@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40343 struct em_gio_priv *p;
40344 struct resource *io[2], *irq[2];
40345 struct gpio_chip *gpio_chip;
40346- struct irq_chip *irq_chip;
40347+ irq_chip_no_const *irq_chip;
40348 const char *name = dev_name(&pdev->dev);
40349 int ret;
40350
40351diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40352index 7818cd1..1be40e5 100644
40353--- a/drivers/gpio/gpio-ich.c
40354+++ b/drivers/gpio/gpio-ich.c
40355@@ -94,7 +94,7 @@ struct ichx_desc {
40356 * this option allows driver caching written output values
40357 */
40358 bool use_outlvl_cache;
40359-};
40360+} __do_const;
40361
40362 static struct {
40363 spinlock_t lock;
40364diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40365index f476ae2..05e1bdd 100644
40366--- a/drivers/gpio/gpio-omap.c
40367+++ b/drivers/gpio/gpio-omap.c
40368@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40369 const struct omap_gpio_platform_data *pdata;
40370 struct resource *res;
40371 struct gpio_bank *bank;
40372- struct irq_chip *irqc;
40373+ irq_chip_no_const *irqc;
40374 int ret;
40375
40376 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40377diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40378index c49522e..9a7ee54 100644
40379--- a/drivers/gpio/gpio-rcar.c
40380+++ b/drivers/gpio/gpio-rcar.c
40381@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40382 struct gpio_rcar_priv *p;
40383 struct resource *io, *irq;
40384 struct gpio_chip *gpio_chip;
40385- struct irq_chip *irq_chip;
40386+ irq_chip_no_const *irq_chip;
40387 struct device *dev = &pdev->dev;
40388 const char *name = dev_name(dev);
40389 int ret;
40390diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40391index c1caa45..f0f97d2 100644
40392--- a/drivers/gpio/gpio-vr41xx.c
40393+++ b/drivers/gpio/gpio-vr41xx.c
40394@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40395 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40396 maskl, pendl, maskh, pendh);
40397
40398- atomic_inc(&irq_err_count);
40399+ atomic_inc_unchecked(&irq_err_count);
40400
40401 return -EINVAL;
40402 }
40403diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40404index 1ca9295..9f3d481 100644
40405--- a/drivers/gpio/gpiolib.c
40406+++ b/drivers/gpio/gpiolib.c
40407@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40408 }
40409
40410 if (gpiochip->irqchip) {
40411- gpiochip->irqchip->irq_request_resources = NULL;
40412- gpiochip->irqchip->irq_release_resources = NULL;
40413+ pax_open_kernel();
40414+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40415+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40416+ pax_close_kernel();
40417 gpiochip->irqchip = NULL;
40418 }
40419 }
40420@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40421 gpiochip->irqchip = NULL;
40422 return -EINVAL;
40423 }
40424- irqchip->irq_request_resources = gpiochip_irq_reqres;
40425- irqchip->irq_release_resources = gpiochip_irq_relres;
40426+
40427+ pax_open_kernel();
40428+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40429+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40430+ pax_close_kernel();
40431
40432 /*
40433 * Prepare the mapping since the irqchip shall be orthogonal to
40434diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40435index 488f51d..301d462 100644
40436--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40437+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40438@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40439 enum cache_policy alternate_policy,
40440 void __user *alternate_aperture_base,
40441 uint64_t alternate_aperture_size);
40442-};
40443+} __no_const;
40444
40445 /**
40446 * struct device_queue_manager
40447diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40448index 5940531..a75b0e5 100644
40449--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40450+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40451@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40452
40453 void (*submit_packet)(struct kernel_queue *kq);
40454 void (*rollback_packet)(struct kernel_queue *kq);
40455-};
40456+} __no_const;
40457
40458 struct kernel_queue {
40459 struct kernel_queue_ops ops;
40460diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40461index b6f076b..2918de2 100644
40462--- a/drivers/gpu/drm/drm_crtc.c
40463+++ b/drivers/gpu/drm/drm_crtc.c
40464@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40465 goto done;
40466 }
40467
40468- if (copy_to_user(&enum_ptr[copied].name,
40469+ if (copy_to_user(enum_ptr[copied].name,
40470 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40471 ret = -EFAULT;
40472 goto done;
40473diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40474index d512134..046f258 100644
40475--- a/drivers/gpu/drm/drm_drv.c
40476+++ b/drivers/gpu/drm/drm_drv.c
40477@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40478
40479 drm_device_set_unplugged(dev);
40480
40481- if (dev->open_count == 0) {
40482+ if (local_read(&dev->open_count) == 0) {
40483 drm_put_dev(dev);
40484 }
40485 mutex_unlock(&drm_global_mutex);
40486diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40487index 076dd60..e4a4ba7 100644
40488--- a/drivers/gpu/drm/drm_fops.c
40489+++ b/drivers/gpu/drm/drm_fops.c
40490@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40491 return PTR_ERR(minor);
40492
40493 dev = minor->dev;
40494- if (!dev->open_count++)
40495+ if (local_inc_return(&dev->open_count) == 1)
40496 need_setup = 1;
40497
40498 /* share address_space across all char-devs of a single device */
40499@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40500 return 0;
40501
40502 err_undo:
40503- dev->open_count--;
40504+ local_dec(&dev->open_count);
40505 drm_minor_release(minor);
40506 return retcode;
40507 }
40508@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40509
40510 mutex_lock(&drm_global_mutex);
40511
40512- DRM_DEBUG("open_count = %d\n", dev->open_count);
40513+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40514
40515 mutex_lock(&dev->struct_mutex);
40516 list_del(&file_priv->lhead);
40517@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40518 * Begin inline drm_release
40519 */
40520
40521- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40522+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40523 task_pid_nr(current),
40524 (long)old_encode_dev(file_priv->minor->kdev->devt),
40525- dev->open_count);
40526+ local_read(&dev->open_count));
40527
40528 /* Release any auth tokens that might point to this file_priv,
40529 (do that under the drm_global_mutex) */
40530@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40531 * End inline drm_release
40532 */
40533
40534- if (!--dev->open_count) {
40535+ if (local_dec_and_test(&dev->open_count)) {
40536 retcode = drm_lastclose(dev);
40537 if (drm_device_is_unplugged(dev))
40538 drm_put_dev(dev);
40539diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40540index 3d2e91c..d31c4c9 100644
40541--- a/drivers/gpu/drm/drm_global.c
40542+++ b/drivers/gpu/drm/drm_global.c
40543@@ -36,7 +36,7 @@
40544 struct drm_global_item {
40545 struct mutex mutex;
40546 void *object;
40547- int refcount;
40548+ atomic_t refcount;
40549 };
40550
40551 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40552@@ -49,7 +49,7 @@ void drm_global_init(void)
40553 struct drm_global_item *item = &glob[i];
40554 mutex_init(&item->mutex);
40555 item->object = NULL;
40556- item->refcount = 0;
40557+ atomic_set(&item->refcount, 0);
40558 }
40559 }
40560
40561@@ -59,7 +59,7 @@ void drm_global_release(void)
40562 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40563 struct drm_global_item *item = &glob[i];
40564 BUG_ON(item->object != NULL);
40565- BUG_ON(item->refcount != 0);
40566+ BUG_ON(atomic_read(&item->refcount) != 0);
40567 }
40568 }
40569
40570@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40571 struct drm_global_item *item = &glob[ref->global_type];
40572
40573 mutex_lock(&item->mutex);
40574- if (item->refcount == 0) {
40575+ if (atomic_read(&item->refcount) == 0) {
40576 item->object = kzalloc(ref->size, GFP_KERNEL);
40577 if (unlikely(item->object == NULL)) {
40578 ret = -ENOMEM;
40579@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40580 goto out_err;
40581
40582 }
40583- ++item->refcount;
40584+ atomic_inc(&item->refcount);
40585 ref->object = item->object;
40586 mutex_unlock(&item->mutex);
40587 return 0;
40588@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40589 struct drm_global_item *item = &glob[ref->global_type];
40590
40591 mutex_lock(&item->mutex);
40592- BUG_ON(item->refcount == 0);
40593+ BUG_ON(atomic_read(&item->refcount) == 0);
40594 BUG_ON(ref->object != item->object);
40595- if (--item->refcount == 0) {
40596+ if (atomic_dec_and_test(&item->refcount)) {
40597 ref->release(ref);
40598 item->object = NULL;
40599 }
40600diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40601index f1b32f9..394f791 100644
40602--- a/drivers/gpu/drm/drm_info.c
40603+++ b/drivers/gpu/drm/drm_info.c
40604@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40605 struct drm_local_map *map;
40606 struct drm_map_list *r_list;
40607
40608- /* Hardcoded from _DRM_FRAME_BUFFER,
40609- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40610- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40611- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40612+ static const char * const types[] = {
40613+ [_DRM_FRAME_BUFFER] = "FB",
40614+ [_DRM_REGISTERS] = "REG",
40615+ [_DRM_SHM] = "SHM",
40616+ [_DRM_AGP] = "AGP",
40617+ [_DRM_SCATTER_GATHER] = "SG",
40618+ [_DRM_CONSISTENT] = "PCI"};
40619 const char *type;
40620 int i;
40621
40622@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40623 map = r_list->map;
40624 if (!map)
40625 continue;
40626- if (map->type < 0 || map->type > 5)
40627+ if (map->type >= ARRAY_SIZE(types))
40628 type = "??";
40629 else
40630 type = types[map->type];
40631diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40632index 2f4c4343..dd12cd2 100644
40633--- a/drivers/gpu/drm/drm_ioc32.c
40634+++ b/drivers/gpu/drm/drm_ioc32.c
40635@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40636 request = compat_alloc_user_space(nbytes);
40637 if (!access_ok(VERIFY_WRITE, request, nbytes))
40638 return -EFAULT;
40639- list = (struct drm_buf_desc *) (request + 1);
40640+ list = (struct drm_buf_desc __user *) (request + 1);
40641
40642 if (__put_user(count, &request->count)
40643 || __put_user(list, &request->list))
40644@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40645 request = compat_alloc_user_space(nbytes);
40646 if (!access_ok(VERIFY_WRITE, request, nbytes))
40647 return -EFAULT;
40648- list = (struct drm_buf_pub *) (request + 1);
40649+ list = (struct drm_buf_pub __user *) (request + 1);
40650
40651 if (__put_user(count, &request->count)
40652 || __put_user(list, &request->list))
40653@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40654 return 0;
40655 }
40656
40657-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40658+drm_ioctl_compat_t drm_compat_ioctls[] = {
40659 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40660 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40661 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40662@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40663 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40664 {
40665 unsigned int nr = DRM_IOCTL_NR(cmd);
40666- drm_ioctl_compat_t *fn;
40667 int ret;
40668
40669 /* Assume that ioctls without an explicit compat routine will just
40670@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40671 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40672 return drm_ioctl(filp, cmd, arg);
40673
40674- fn = drm_compat_ioctls[nr];
40675-
40676- if (fn != NULL)
40677- ret = (*fn) (filp, cmd, arg);
40678+ if (drm_compat_ioctls[nr] != NULL)
40679+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40680 else
40681 ret = drm_ioctl(filp, cmd, arg);
40682
40683diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40684index 3785d66..1c489ef 100644
40685--- a/drivers/gpu/drm/drm_ioctl.c
40686+++ b/drivers/gpu/drm/drm_ioctl.c
40687@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
40688 struct drm_file *file_priv = filp->private_data;
40689 struct drm_device *dev;
40690 const struct drm_ioctl_desc *ioctl = NULL;
40691- drm_ioctl_t *func;
40692+ drm_ioctl_no_const_t func;
40693 unsigned int nr = DRM_IOCTL_NR(cmd);
40694 int retcode = -EINVAL;
40695 char stack_kdata[128];
40696diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40697index d4813e0..6c1ab4d 100644
40698--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40699+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40700@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
40701 u32 pipeconf_reg = PIPEACONF;
40702 u32 dspcntr_reg = DSPACNTR;
40703
40704- u32 pipeconf = dev_priv->pipeconf[pipe];
40705- u32 dspcntr = dev_priv->dspcntr[pipe];
40706+ u32 pipeconf;
40707+ u32 dspcntr;
40708 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
40709
40710+ if (pipe == -1)
40711+ return;
40712+
40713+ pipeconf = dev_priv->pipeconf[pipe];
40714+ dspcntr = dev_priv->dspcntr[pipe];
40715+
40716 if (pipe) {
40717 pipeconf_reg = PIPECCONF;
40718 dspcntr_reg = DSPCCNTR;
40719diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40720index 93ec5dc..82acbaf 100644
40721--- a/drivers/gpu/drm/i810/i810_drv.h
40722+++ b/drivers/gpu/drm/i810/i810_drv.h
40723@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40724 int page_flipping;
40725
40726 wait_queue_head_t irq_queue;
40727- atomic_t irq_received;
40728- atomic_t irq_emitted;
40729+ atomic_unchecked_t irq_received;
40730+ atomic_unchecked_t irq_emitted;
40731
40732 int front_offset;
40733 } drm_i810_private_t;
40734diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40735index 1a46787..0ff2ff4 100644
40736--- a/drivers/gpu/drm/i915/i915_dma.c
40737+++ b/drivers/gpu/drm/i915/i915_dma.c
40738@@ -362,7 +362,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40739 * locking inversion with the driver load path. And the access here is
40740 * completely racy anyway. So don't bother with locking for now.
40741 */
40742- return dev->open_count == 0;
40743+ return local_read(&dev->open_count) == 0;
40744 }
40745
40746 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40747diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40748index 38a7425..5322b16 100644
40749--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40750+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40751@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40752 static int
40753 validate_exec_list(struct drm_device *dev,
40754 struct drm_i915_gem_exec_object2 *exec,
40755- int count)
40756+ unsigned int count)
40757 {
40758 unsigned relocs_total = 0;
40759 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40760 unsigned invalid_flags;
40761- int i;
40762+ unsigned int i;
40763
40764 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40765 if (USES_FULL_PPGTT(dev))
40766diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40767index 176de63..1ef9ac7 100644
40768--- a/drivers/gpu/drm/i915/i915_ioc32.c
40769+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40770@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40771 (unsigned long)request);
40772 }
40773
40774-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40775+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40776 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40777 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40778 [DRM_I915_GETPARAM] = compat_i915_getparam,
40779@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40780 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40781 {
40782 unsigned int nr = DRM_IOCTL_NR(cmd);
40783- drm_ioctl_compat_t *fn = NULL;
40784 int ret;
40785
40786 if (nr < DRM_COMMAND_BASE)
40787 return drm_compat_ioctl(filp, cmd, arg);
40788
40789- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40790- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40791-
40792- if (fn != NULL)
40793+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40794+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40795 ret = (*fn) (filp, cmd, arg);
40796- else
40797+ } else
40798 ret = drm_ioctl(filp, cmd, arg);
40799
40800 return ret;
40801diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40802index f75173c..f283e45 100644
40803--- a/drivers/gpu/drm/i915/intel_display.c
40804+++ b/drivers/gpu/drm/i915/intel_display.c
40805@@ -13056,13 +13056,13 @@ struct intel_quirk {
40806 int subsystem_vendor;
40807 int subsystem_device;
40808 void (*hook)(struct drm_device *dev);
40809-};
40810+} __do_const;
40811
40812 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40813 struct intel_dmi_quirk {
40814 void (*hook)(struct drm_device *dev);
40815 const struct dmi_system_id (*dmi_id_list)[];
40816-};
40817+} __do_const;
40818
40819 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40820 {
40821@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40822 return 1;
40823 }
40824
40825-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40826+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40827 {
40828- .dmi_id_list = &(const struct dmi_system_id[]) {
40829- {
40830- .callback = intel_dmi_reverse_brightness,
40831- .ident = "NCR Corporation",
40832- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40833- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40834- },
40835- },
40836- { } /* terminating entry */
40837+ .callback = intel_dmi_reverse_brightness,
40838+ .ident = "NCR Corporation",
40839+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40840+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40841 },
40842+ },
40843+ { } /* terminating entry */
40844+};
40845+
40846+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40847+ {
40848+ .dmi_id_list = &intel_dmi_quirks_table,
40849 .hook = quirk_invert_brightness,
40850 },
40851 };
40852diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40853index a002f53..0d60514 100644
40854--- a/drivers/gpu/drm/imx/imx-drm-core.c
40855+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40856@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40857 if (imxdrm->pipes >= MAX_CRTC)
40858 return -EINVAL;
40859
40860- if (imxdrm->drm->open_count)
40861+ if (local_read(&imxdrm->drm->open_count))
40862 return -EBUSY;
40863
40864 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40865diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40866index b4a20149..219ab78 100644
40867--- a/drivers/gpu/drm/mga/mga_drv.h
40868+++ b/drivers/gpu/drm/mga/mga_drv.h
40869@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40870 u32 clear_cmd;
40871 u32 maccess;
40872
40873- atomic_t vbl_received; /**< Number of vblanks received. */
40874+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40875 wait_queue_head_t fence_queue;
40876- atomic_t last_fence_retired;
40877+ atomic_unchecked_t last_fence_retired;
40878 u32 next_fence_to_post;
40879
40880 unsigned int fb_cpp;
40881diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40882index 729bfd5..ead8823 100644
40883--- a/drivers/gpu/drm/mga/mga_ioc32.c
40884+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40885@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40886 return 0;
40887 }
40888
40889-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40890+drm_ioctl_compat_t mga_compat_ioctls[] = {
40891 [DRM_MGA_INIT] = compat_mga_init,
40892 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40893 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40894@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40895 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40896 {
40897 unsigned int nr = DRM_IOCTL_NR(cmd);
40898- drm_ioctl_compat_t *fn = NULL;
40899 int ret;
40900
40901 if (nr < DRM_COMMAND_BASE)
40902 return drm_compat_ioctl(filp, cmd, arg);
40903
40904- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40905- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40906-
40907- if (fn != NULL)
40908+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40909+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40910 ret = (*fn) (filp, cmd, arg);
40911- else
40912+ } else
40913 ret = drm_ioctl(filp, cmd, arg);
40914
40915 return ret;
40916diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40917index 1b071b8..de8601a 100644
40918--- a/drivers/gpu/drm/mga/mga_irq.c
40919+++ b/drivers/gpu/drm/mga/mga_irq.c
40920@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40921 if (crtc != 0)
40922 return 0;
40923
40924- return atomic_read(&dev_priv->vbl_received);
40925+ return atomic_read_unchecked(&dev_priv->vbl_received);
40926 }
40927
40928
40929@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40930 /* VBLANK interrupt */
40931 if (status & MGA_VLINEPEN) {
40932 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40933- atomic_inc(&dev_priv->vbl_received);
40934+ atomic_inc_unchecked(&dev_priv->vbl_received);
40935 drm_handle_vblank(dev, 0);
40936 handled = 1;
40937 }
40938@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40939 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40940 MGA_WRITE(MGA_PRIMEND, prim_end);
40941
40942- atomic_inc(&dev_priv->last_fence_retired);
40943+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40944 wake_up(&dev_priv->fence_queue);
40945 handled = 1;
40946 }
40947@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40948 * using fences.
40949 */
40950 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40951- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40952+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40953 - *sequence) <= (1 << 23)));
40954
40955 *sequence = cur_fence;
40956diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40957index 0190b69..60c3eaf 100644
40958--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40959+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40960@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40961 struct bit_table {
40962 const char id;
40963 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40964-};
40965+} __no_const;
40966
40967 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40968
40969diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40970index fc68f09..0511d71 100644
40971--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40972+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40973@@ -121,7 +121,6 @@ struct nouveau_drm {
40974 struct drm_global_reference mem_global_ref;
40975 struct ttm_bo_global_ref bo_global_ref;
40976 struct ttm_bo_device bdev;
40977- atomic_t validate_sequence;
40978 int (*move)(struct nouveau_channel *,
40979 struct ttm_buffer_object *,
40980 struct ttm_mem_reg *, struct ttm_mem_reg *);
40981diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40982index 462679a..88e32a7 100644
40983--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40984+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40985@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40986 unsigned long arg)
40987 {
40988 unsigned int nr = DRM_IOCTL_NR(cmd);
40989- drm_ioctl_compat_t *fn = NULL;
40990+ drm_ioctl_compat_t fn = NULL;
40991 int ret;
40992
40993 if (nr < DRM_COMMAND_BASE)
40994diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40995index 273e501..3b6c0a2 100644
40996--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40997+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40998@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40999 }
41000
41001 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41002- nouveau_vram_manager_init,
41003- nouveau_vram_manager_fini,
41004- nouveau_vram_manager_new,
41005- nouveau_vram_manager_del,
41006- nouveau_vram_manager_debug
41007+ .init = nouveau_vram_manager_init,
41008+ .takedown = nouveau_vram_manager_fini,
41009+ .get_node = nouveau_vram_manager_new,
41010+ .put_node = nouveau_vram_manager_del,
41011+ .debug = nouveau_vram_manager_debug
41012 };
41013
41014 static int
41015@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41016 }
41017
41018 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41019- nouveau_gart_manager_init,
41020- nouveau_gart_manager_fini,
41021- nouveau_gart_manager_new,
41022- nouveau_gart_manager_del,
41023- nouveau_gart_manager_debug
41024+ .init = nouveau_gart_manager_init,
41025+ .takedown = nouveau_gart_manager_fini,
41026+ .get_node = nouveau_gart_manager_new,
41027+ .put_node = nouveau_gart_manager_del,
41028+ .debug = nouveau_gart_manager_debug
41029 };
41030
41031 /*XXX*/
41032@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41033 }
41034
41035 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41036- nv04_gart_manager_init,
41037- nv04_gart_manager_fini,
41038- nv04_gart_manager_new,
41039- nv04_gart_manager_del,
41040- nv04_gart_manager_debug
41041+ .init = nv04_gart_manager_init,
41042+ .takedown = nv04_gart_manager_fini,
41043+ .get_node = nv04_gart_manager_new,
41044+ .put_node = nv04_gart_manager_del,
41045+ .debug = nv04_gart_manager_debug
41046 };
41047
41048 int
41049diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41050index c7592ec..dd45ebc 100644
41051--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41052+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41053@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41054 * locking inversion with the driver load path. And the access here is
41055 * completely racy anyway. So don't bother with locking for now.
41056 */
41057- return dev->open_count == 0;
41058+ return local_read(&dev->open_count) == 0;
41059 }
41060
41061 static const struct vga_switcheroo_client_ops
41062diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41063index 9782364..89bd954 100644
41064--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41065+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41066@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41067 int ret;
41068
41069 mutex_lock(&qdev->async_io_mutex);
41070- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41071+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41072 if (qdev->last_sent_io_cmd > irq_num) {
41073 if (intr)
41074 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41075- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41076+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41077 else
41078 ret = wait_event_timeout(qdev->io_cmd_event,
41079- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41080+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41081 /* 0 is timeout, just bail the "hw" has gone away */
41082 if (ret <= 0)
41083 goto out;
41084- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41085+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41086 }
41087 outb(val, addr);
41088 qdev->last_sent_io_cmd = irq_num + 1;
41089 if (intr)
41090 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41091- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41092+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41093 else
41094 ret = wait_event_timeout(qdev->io_cmd_event,
41095- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41096+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41097 out:
41098 if (ret > 0)
41099 ret = 0;
41100diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41101index 6911b8c..89d6867 100644
41102--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41103+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41104@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41105 struct drm_info_node *node = (struct drm_info_node *) m->private;
41106 struct qxl_device *qdev = node->minor->dev->dev_private;
41107
41108- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41109- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41110- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41111- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41112+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41113+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41114+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41115+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41116 seq_printf(m, "%d\n", qdev->irq_received_error);
41117 return 0;
41118 }
41119diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41120index 7c6cafe..460f542 100644
41121--- a/drivers/gpu/drm/qxl/qxl_drv.h
41122+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41123@@ -290,10 +290,10 @@ struct qxl_device {
41124 unsigned int last_sent_io_cmd;
41125
41126 /* interrupt handling */
41127- atomic_t irq_received;
41128- atomic_t irq_received_display;
41129- atomic_t irq_received_cursor;
41130- atomic_t irq_received_io_cmd;
41131+ atomic_unchecked_t irq_received;
41132+ atomic_unchecked_t irq_received_display;
41133+ atomic_unchecked_t irq_received_cursor;
41134+ atomic_unchecked_t irq_received_io_cmd;
41135 unsigned irq_received_error;
41136 wait_queue_head_t display_event;
41137 wait_queue_head_t cursor_event;
41138diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41139index b110883..dd06418 100644
41140--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41141+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41142@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41143
41144 /* TODO copy slow path code from i915 */
41145 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41146- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41147+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41148
41149 {
41150 struct qxl_drawable *draw = fb_cmd;
41151@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41152 struct drm_qxl_reloc reloc;
41153
41154 if (copy_from_user(&reloc,
41155- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41156+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41157 sizeof(reloc))) {
41158 ret = -EFAULT;
41159 goto out_free_bos;
41160@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41161
41162 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41163
41164- struct drm_qxl_command *commands =
41165- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41166+ struct drm_qxl_command __user *commands =
41167+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41168
41169- if (copy_from_user(&user_cmd, &commands[cmd_num],
41170+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41171 sizeof(user_cmd)))
41172 return -EFAULT;
41173
41174diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41175index 0bf1e20..42a7310 100644
41176--- a/drivers/gpu/drm/qxl/qxl_irq.c
41177+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41178@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41179 if (!pending)
41180 return IRQ_NONE;
41181
41182- atomic_inc(&qdev->irq_received);
41183+ atomic_inc_unchecked(&qdev->irq_received);
41184
41185 if (pending & QXL_INTERRUPT_DISPLAY) {
41186- atomic_inc(&qdev->irq_received_display);
41187+ atomic_inc_unchecked(&qdev->irq_received_display);
41188 wake_up_all(&qdev->display_event);
41189 qxl_queue_garbage_collect(qdev, false);
41190 }
41191 if (pending & QXL_INTERRUPT_CURSOR) {
41192- atomic_inc(&qdev->irq_received_cursor);
41193+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41194 wake_up_all(&qdev->cursor_event);
41195 }
41196 if (pending & QXL_INTERRUPT_IO_CMD) {
41197- atomic_inc(&qdev->irq_received_io_cmd);
41198+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41199 wake_up_all(&qdev->io_cmd_event);
41200 }
41201 if (pending & QXL_INTERRUPT_ERROR) {
41202@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41203 init_waitqueue_head(&qdev->io_cmd_event);
41204 INIT_WORK(&qdev->client_monitors_config_work,
41205 qxl_client_monitors_config_work_func);
41206- atomic_set(&qdev->irq_received, 0);
41207- atomic_set(&qdev->irq_received_display, 0);
41208- atomic_set(&qdev->irq_received_cursor, 0);
41209- atomic_set(&qdev->irq_received_io_cmd, 0);
41210+ atomic_set_unchecked(&qdev->irq_received, 0);
41211+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41212+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41213+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41214 qdev->irq_received_error = 0;
41215 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41216 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41217diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41218index 0cbc4c9..0e46686 100644
41219--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41220+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41221@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41222 }
41223 }
41224
41225-static struct vm_operations_struct qxl_ttm_vm_ops;
41226+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41227 static const struct vm_operations_struct *ttm_vm_ops;
41228
41229 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41230@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41231 return r;
41232 if (unlikely(ttm_vm_ops == NULL)) {
41233 ttm_vm_ops = vma->vm_ops;
41234+ pax_open_kernel();
41235 qxl_ttm_vm_ops = *ttm_vm_ops;
41236 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41237+ pax_close_kernel();
41238 }
41239 vma->vm_ops = &qxl_ttm_vm_ops;
41240 return 0;
41241@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41242 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41243 {
41244 #if defined(CONFIG_DEBUG_FS)
41245- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41246- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41247- unsigned i;
41248+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41249+ {
41250+ .name = "qxl_mem_mm",
41251+ .show = &qxl_mm_dump_table,
41252+ },
41253+ {
41254+ .name = "qxl_surf_mm",
41255+ .show = &qxl_mm_dump_table,
41256+ }
41257+ };
41258
41259- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41260- if (i == 0)
41261- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41262- else
41263- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41264- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41265- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41266- qxl_mem_types_list[i].driver_features = 0;
41267- if (i == 0)
41268- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41269- else
41270- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41271+ pax_open_kernel();
41272+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41273+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41274+ pax_close_kernel();
41275
41276- }
41277- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41278+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41279 #else
41280 return 0;
41281 #endif
41282diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41283index 2c45ac9..5d740f8 100644
41284--- a/drivers/gpu/drm/r128/r128_cce.c
41285+++ b/drivers/gpu/drm/r128/r128_cce.c
41286@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41287
41288 /* GH: Simple idle check.
41289 */
41290- atomic_set(&dev_priv->idle_count, 0);
41291+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41292
41293 /* We don't support anything other than bus-mastering ring mode,
41294 * but the ring can be in either AGP or PCI space for the ring
41295diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41296index 723e5d6..102dbaf 100644
41297--- a/drivers/gpu/drm/r128/r128_drv.h
41298+++ b/drivers/gpu/drm/r128/r128_drv.h
41299@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41300 int is_pci;
41301 unsigned long cce_buffers_offset;
41302
41303- atomic_t idle_count;
41304+ atomic_unchecked_t idle_count;
41305
41306 int page_flipping;
41307 int current_page;
41308 u32 crtc_offset;
41309 u32 crtc_offset_cntl;
41310
41311- atomic_t vbl_received;
41312+ atomic_unchecked_t vbl_received;
41313
41314 u32 color_fmt;
41315 unsigned int front_offset;
41316diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41317index 663f38c..c689495 100644
41318--- a/drivers/gpu/drm/r128/r128_ioc32.c
41319+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41320@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41321 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41322 }
41323
41324-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41325+drm_ioctl_compat_t r128_compat_ioctls[] = {
41326 [DRM_R128_INIT] = compat_r128_init,
41327 [DRM_R128_DEPTH] = compat_r128_depth,
41328 [DRM_R128_STIPPLE] = compat_r128_stipple,
41329@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41330 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41331 {
41332 unsigned int nr = DRM_IOCTL_NR(cmd);
41333- drm_ioctl_compat_t *fn = NULL;
41334 int ret;
41335
41336 if (nr < DRM_COMMAND_BASE)
41337 return drm_compat_ioctl(filp, cmd, arg);
41338
41339- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41340- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41341-
41342- if (fn != NULL)
41343+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41344+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41345 ret = (*fn) (filp, cmd, arg);
41346- else
41347+ } else
41348 ret = drm_ioctl(filp, cmd, arg);
41349
41350 return ret;
41351diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41352index c2ae496..30b5993 100644
41353--- a/drivers/gpu/drm/r128/r128_irq.c
41354+++ b/drivers/gpu/drm/r128/r128_irq.c
41355@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41356 if (crtc != 0)
41357 return 0;
41358
41359- return atomic_read(&dev_priv->vbl_received);
41360+ return atomic_read_unchecked(&dev_priv->vbl_received);
41361 }
41362
41363 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41364@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41365 /* VBLANK interrupt */
41366 if (status & R128_CRTC_VBLANK_INT) {
41367 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41368- atomic_inc(&dev_priv->vbl_received);
41369+ atomic_inc_unchecked(&dev_priv->vbl_received);
41370 drm_handle_vblank(dev, 0);
41371 return IRQ_HANDLED;
41372 }
41373diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41374index 8fd2d9f..18c9660 100644
41375--- a/drivers/gpu/drm/r128/r128_state.c
41376+++ b/drivers/gpu/drm/r128/r128_state.c
41377@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41378
41379 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41380 {
41381- if (atomic_read(&dev_priv->idle_count) == 0)
41382+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41383 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41384 else
41385- atomic_set(&dev_priv->idle_count, 0);
41386+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41387 }
41388
41389 #endif
41390diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41391index b928c17..e5d9400 100644
41392--- a/drivers/gpu/drm/radeon/mkregtable.c
41393+++ b/drivers/gpu/drm/radeon/mkregtable.c
41394@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41395 regex_t mask_rex;
41396 regmatch_t match[4];
41397 char buf[1024];
41398- size_t end;
41399+ long end;
41400 int len;
41401 int done = 0;
41402 int r;
41403 unsigned o;
41404 struct offset *offset;
41405 char last_reg_s[10];
41406- int last_reg;
41407+ unsigned long last_reg;
41408
41409 if (regcomp
41410 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41411diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41412index bd7519f..e1c2cd95 100644
41413--- a/drivers/gpu/drm/radeon/radeon_device.c
41414+++ b/drivers/gpu/drm/radeon/radeon_device.c
41415@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41416 * locking inversion with the driver load path. And the access here is
41417 * completely racy anyway. So don't bother with locking for now.
41418 */
41419- return dev->open_count == 0;
41420+ return local_read(&dev->open_count) == 0;
41421 }
41422
41423 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41424diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41425index 46bd393..6ae4719 100644
41426--- a/drivers/gpu/drm/radeon/radeon_drv.h
41427+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41428@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41429
41430 /* SW interrupt */
41431 wait_queue_head_t swi_queue;
41432- atomic_t swi_emitted;
41433+ atomic_unchecked_t swi_emitted;
41434 int vblank_crtc;
41435 uint32_t irq_enable_reg;
41436 uint32_t r500_disp_irq_reg;
41437diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41438index 0b98ea1..0881827 100644
41439--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41440+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41441@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41442 request = compat_alloc_user_space(sizeof(*request));
41443 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41444 || __put_user(req32.param, &request->param)
41445- || __put_user((void __user *)(unsigned long)req32.value,
41446+ || __put_user((unsigned long)req32.value,
41447 &request->value))
41448 return -EFAULT;
41449
41450@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41451 #define compat_radeon_cp_setparam NULL
41452 #endif /* X86_64 || IA64 */
41453
41454-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41455+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41456 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41457 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41458 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41459@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41460 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41461 {
41462 unsigned int nr = DRM_IOCTL_NR(cmd);
41463- drm_ioctl_compat_t *fn = NULL;
41464 int ret;
41465
41466 if (nr < DRM_COMMAND_BASE)
41467 return drm_compat_ioctl(filp, cmd, arg);
41468
41469- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41470- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41471-
41472- if (fn != NULL)
41473+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41474+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41475 ret = (*fn) (filp, cmd, arg);
41476- else
41477+ } else
41478 ret = drm_ioctl(filp, cmd, arg);
41479
41480 return ret;
41481diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41482index 244b19b..c19226d 100644
41483--- a/drivers/gpu/drm/radeon/radeon_irq.c
41484+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41485@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41486 unsigned int ret;
41487 RING_LOCALS;
41488
41489- atomic_inc(&dev_priv->swi_emitted);
41490- ret = atomic_read(&dev_priv->swi_emitted);
41491+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41492+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41493
41494 BEGIN_RING(4);
41495 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41496@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41497 drm_radeon_private_t *dev_priv =
41498 (drm_radeon_private_t *) dev->dev_private;
41499
41500- atomic_set(&dev_priv->swi_emitted, 0);
41501+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41502 init_waitqueue_head(&dev_priv->swi_queue);
41503
41504 dev->max_vblank_count = 0x001fffff;
41505diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41506index 15aee72..cda326e 100644
41507--- a/drivers/gpu/drm/radeon/radeon_state.c
41508+++ b/drivers/gpu/drm/radeon/radeon_state.c
41509@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41510 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41511 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41512
41513- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41514+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41515 sarea_priv->nbox * sizeof(depth_boxes[0])))
41516 return -EFAULT;
41517
41518@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41519 {
41520 drm_radeon_private_t *dev_priv = dev->dev_private;
41521 drm_radeon_getparam_t *param = data;
41522- int value;
41523+ int value = 0;
41524
41525 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41526
41527diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41528index b292aca..4e338b5 100644
41529--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41530+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41531@@ -963,7 +963,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41532 man->size = size >> PAGE_SHIFT;
41533 }
41534
41535-static struct vm_operations_struct radeon_ttm_vm_ops;
41536+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41537 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41538
41539 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41540@@ -1004,8 +1004,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41541 }
41542 if (unlikely(ttm_vm_ops == NULL)) {
41543 ttm_vm_ops = vma->vm_ops;
41544+ pax_open_kernel();
41545 radeon_ttm_vm_ops = *ttm_vm_ops;
41546 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41547+ pax_close_kernel();
41548 }
41549 vma->vm_ops = &radeon_ttm_vm_ops;
41550 return 0;
41551diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41552index 1a52522..8e78043 100644
41553--- a/drivers/gpu/drm/tegra/dc.c
41554+++ b/drivers/gpu/drm/tegra/dc.c
41555@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41556 }
41557
41558 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41559- dc->debugfs_files[i].data = dc;
41560+ *(void **)&dc->debugfs_files[i].data = dc;
41561
41562 err = drm_debugfs_create_files(dc->debugfs_files,
41563 ARRAY_SIZE(debugfs_files),
41564diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41565index ed970f6..4eeea42 100644
41566--- a/drivers/gpu/drm/tegra/dsi.c
41567+++ b/drivers/gpu/drm/tegra/dsi.c
41568@@ -62,7 +62,7 @@ struct tegra_dsi {
41569 struct clk *clk_lp;
41570 struct clk *clk;
41571
41572- struct drm_info_list *debugfs_files;
41573+ drm_info_list_no_const *debugfs_files;
41574 struct drm_minor *minor;
41575 struct dentry *debugfs;
41576
41577diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41578index 7eaaee74..cc2bc04 100644
41579--- a/drivers/gpu/drm/tegra/hdmi.c
41580+++ b/drivers/gpu/drm/tegra/hdmi.c
41581@@ -64,7 +64,7 @@ struct tegra_hdmi {
41582 bool stereo;
41583 bool dvi;
41584
41585- struct drm_info_list *debugfs_files;
41586+ drm_info_list_no_const *debugfs_files;
41587 struct drm_minor *minor;
41588 struct dentry *debugfs;
41589 };
41590diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41591index aa0bd054..aea6a01 100644
41592--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41593+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41594@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41595 }
41596
41597 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41598- ttm_bo_man_init,
41599- ttm_bo_man_takedown,
41600- ttm_bo_man_get_node,
41601- ttm_bo_man_put_node,
41602- ttm_bo_man_debug
41603+ .init = ttm_bo_man_init,
41604+ .takedown = ttm_bo_man_takedown,
41605+ .get_node = ttm_bo_man_get_node,
41606+ .put_node = ttm_bo_man_put_node,
41607+ .debug = ttm_bo_man_debug
41608 };
41609 EXPORT_SYMBOL(ttm_bo_manager_func);
41610diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41611index a1803fb..c53f6b0 100644
41612--- a/drivers/gpu/drm/ttm/ttm_memory.c
41613+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41614@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41615 zone->glob = glob;
41616 glob->zone_kernel = zone;
41617 ret = kobject_init_and_add(
41618- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41619+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41620 if (unlikely(ret != 0)) {
41621 kobject_put(&zone->kobj);
41622 return ret;
41623@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41624 zone->glob = glob;
41625 glob->zone_dma32 = zone;
41626 ret = kobject_init_and_add(
41627- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41628+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41629 if (unlikely(ret != 0)) {
41630 kobject_put(&zone->kobj);
41631 return ret;
41632diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41633index 025c429..314062f 100644
41634--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41635+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41636@@ -54,7 +54,7 @@
41637
41638 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41639 #define SMALL_ALLOCATION 16
41640-#define FREE_ALL_PAGES (~0U)
41641+#define FREE_ALL_PAGES (~0UL)
41642 /* times are in msecs */
41643 #define PAGE_FREE_INTERVAL 1000
41644
41645@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41646 * @free_all: If set to true will free all pages in pool
41647 * @use_static: Safe to use static buffer
41648 **/
41649-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41650+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41651 bool use_static)
41652 {
41653 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41654 unsigned long irq_flags;
41655 struct page *p;
41656 struct page **pages_to_free;
41657- unsigned freed_pages = 0,
41658- npages_to_free = nr_free;
41659+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41660
41661 if (NUM_PAGES_TO_ALLOC < nr_free)
41662 npages_to_free = NUM_PAGES_TO_ALLOC;
41663@@ -371,7 +370,8 @@ restart:
41664 __list_del(&p->lru, &pool->list);
41665
41666 ttm_pool_update_free_locked(pool, freed_pages);
41667- nr_free -= freed_pages;
41668+ if (likely(nr_free != FREE_ALL_PAGES))
41669+ nr_free -= freed_pages;
41670 }
41671
41672 spin_unlock_irqrestore(&pool->lock, irq_flags);
41673@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41674 unsigned i;
41675 unsigned pool_offset;
41676 struct ttm_page_pool *pool;
41677- int shrink_pages = sc->nr_to_scan;
41678+ unsigned long shrink_pages = sc->nr_to_scan;
41679 unsigned long freed = 0;
41680
41681 if (!mutex_trylock(&lock))
41682@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41683 pool_offset = ++start_pool % NUM_POOLS;
41684 /* select start pool in round robin fashion */
41685 for (i = 0; i < NUM_POOLS; ++i) {
41686- unsigned nr_free = shrink_pages;
41687+ unsigned long nr_free = shrink_pages;
41688 if (shrink_pages == 0)
41689 break;
41690 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41691@@ -673,7 +673,7 @@ out:
41692 }
41693
41694 /* Put all pages in pages list to correct pool to wait for reuse */
41695-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41696+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41697 enum ttm_caching_state cstate)
41698 {
41699 unsigned long irq_flags;
41700@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41701 struct list_head plist;
41702 struct page *p = NULL;
41703 gfp_t gfp_flags = GFP_USER;
41704- unsigned count;
41705+ unsigned long count;
41706 int r;
41707
41708 /* set zero flag for page allocation if required */
41709diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41710index 01e1d27..aaa018a 100644
41711--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41712+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41713@@ -56,7 +56,7 @@
41714
41715 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41716 #define SMALL_ALLOCATION 4
41717-#define FREE_ALL_PAGES (~0U)
41718+#define FREE_ALL_PAGES (~0UL)
41719 /* times are in msecs */
41720 #define IS_UNDEFINED (0)
41721 #define IS_WC (1<<1)
41722@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41723 * @nr_free: If set to true will free all pages in pool
41724 * @use_static: Safe to use static buffer
41725 **/
41726-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41727+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41728 bool use_static)
41729 {
41730 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41731@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41732 struct dma_page *dma_p, *tmp;
41733 struct page **pages_to_free;
41734 struct list_head d_pages;
41735- unsigned freed_pages = 0,
41736- npages_to_free = nr_free;
41737+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41738
41739 if (NUM_PAGES_TO_ALLOC < nr_free)
41740 npages_to_free = NUM_PAGES_TO_ALLOC;
41741@@ -499,7 +498,8 @@ restart:
41742 /* remove range of pages from the pool */
41743 if (freed_pages) {
41744 ttm_pool_update_free_locked(pool, freed_pages);
41745- nr_free -= freed_pages;
41746+ if (likely(nr_free != FREE_ALL_PAGES))
41747+ nr_free -= freed_pages;
41748 }
41749
41750 spin_unlock_irqrestore(&pool->lock, irq_flags);
41751@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41752 struct dma_page *d_page, *next;
41753 enum pool_type type;
41754 bool is_cached = false;
41755- unsigned count = 0, i, npages = 0;
41756+ unsigned long count = 0, i, npages = 0;
41757 unsigned long irq_flags;
41758
41759 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41760@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41761 static unsigned start_pool;
41762 unsigned idx = 0;
41763 unsigned pool_offset;
41764- unsigned shrink_pages = sc->nr_to_scan;
41765+ unsigned long shrink_pages = sc->nr_to_scan;
41766 struct device_pools *p;
41767 unsigned long freed = 0;
41768
41769@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41770 goto out;
41771 pool_offset = ++start_pool % _manager->npools;
41772 list_for_each_entry(p, &_manager->pools, pools) {
41773- unsigned nr_free;
41774+ unsigned long nr_free;
41775
41776 if (!p->dev)
41777 continue;
41778@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41779 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41780 freed += nr_free - shrink_pages;
41781
41782- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41783+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41784 p->pool->dev_name, p->pool->name, current->pid,
41785 nr_free, shrink_pages);
41786 }
41787diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41788index 5fc16ce..1bd84ec 100644
41789--- a/drivers/gpu/drm/udl/udl_fb.c
41790+++ b/drivers/gpu/drm/udl/udl_fb.c
41791@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41792 fb_deferred_io_cleanup(info);
41793 kfree(info->fbdefio);
41794 info->fbdefio = NULL;
41795- info->fbops->fb_mmap = udl_fb_mmap;
41796 }
41797
41798 pr_warn("released /dev/fb%d user=%d count=%d\n",
41799diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41800index ef8c500..01030c8 100644
41801--- a/drivers/gpu/drm/via/via_drv.h
41802+++ b/drivers/gpu/drm/via/via_drv.h
41803@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41804 typedef uint32_t maskarray_t[5];
41805
41806 typedef struct drm_via_irq {
41807- atomic_t irq_received;
41808+ atomic_unchecked_t irq_received;
41809 uint32_t pending_mask;
41810 uint32_t enable_mask;
41811 wait_queue_head_t irq_queue;
41812@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41813 struct timeval last_vblank;
41814 int last_vblank_valid;
41815 unsigned usec_per_vblank;
41816- atomic_t vbl_received;
41817+ atomic_unchecked_t vbl_received;
41818 drm_via_state_t hc_state;
41819 char pci_buf[VIA_PCI_BUF_SIZE];
41820 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41821diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41822index 1319433..a993b0c 100644
41823--- a/drivers/gpu/drm/via/via_irq.c
41824+++ b/drivers/gpu/drm/via/via_irq.c
41825@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41826 if (crtc != 0)
41827 return 0;
41828
41829- return atomic_read(&dev_priv->vbl_received);
41830+ return atomic_read_unchecked(&dev_priv->vbl_received);
41831 }
41832
41833 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41834@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41835
41836 status = VIA_READ(VIA_REG_INTERRUPT);
41837 if (status & VIA_IRQ_VBLANK_PENDING) {
41838- atomic_inc(&dev_priv->vbl_received);
41839- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41840+ atomic_inc_unchecked(&dev_priv->vbl_received);
41841+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41842 do_gettimeofday(&cur_vblank);
41843 if (dev_priv->last_vblank_valid) {
41844 dev_priv->usec_per_vblank =
41845@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41846 dev_priv->last_vblank = cur_vblank;
41847 dev_priv->last_vblank_valid = 1;
41848 }
41849- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41850+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41851 DRM_DEBUG("US per vblank is: %u\n",
41852 dev_priv->usec_per_vblank);
41853 }
41854@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41855
41856 for (i = 0; i < dev_priv->num_irqs; ++i) {
41857 if (status & cur_irq->pending_mask) {
41858- atomic_inc(&cur_irq->irq_received);
41859+ atomic_inc_unchecked(&cur_irq->irq_received);
41860 wake_up(&cur_irq->irq_queue);
41861 handled = 1;
41862 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41863@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41864 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41865 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41866 masks[irq][4]));
41867- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41868+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41869 } else {
41870 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41871 (((cur_irq_sequence =
41872- atomic_read(&cur_irq->irq_received)) -
41873+ atomic_read_unchecked(&cur_irq->irq_received)) -
41874 *sequence) <= (1 << 23)));
41875 }
41876 *sequence = cur_irq_sequence;
41877@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41878 }
41879
41880 for (i = 0; i < dev_priv->num_irqs; ++i) {
41881- atomic_set(&cur_irq->irq_received, 0);
41882+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41883 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41884 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41885 init_waitqueue_head(&cur_irq->irq_queue);
41886@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41887 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41888 case VIA_IRQ_RELATIVE:
41889 irqwait->request.sequence +=
41890- atomic_read(&cur_irq->irq_received);
41891+ atomic_read_unchecked(&cur_irq->irq_received);
41892 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41893 case VIA_IRQ_ABSOLUTE:
41894 break;
41895diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41896index d26a6da..5fa41ed 100644
41897--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41898+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41899@@ -447,7 +447,7 @@ struct vmw_private {
41900 * Fencing and IRQs.
41901 */
41902
41903- atomic_t marker_seq;
41904+ atomic_unchecked_t marker_seq;
41905 wait_queue_head_t fence_queue;
41906 wait_queue_head_t fifo_queue;
41907 spinlock_t waiter_lock;
41908diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41909index 39f2b03..d1b0a64 100644
41910--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41911+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41912@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41913 (unsigned int) min,
41914 (unsigned int) fifo->capabilities);
41915
41916- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41917+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41918 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41919 vmw_marker_queue_init(&fifo->marker_queue);
41920 return vmw_fifo_send_fence(dev_priv, &dummy);
41921@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41922 if (reserveable)
41923 iowrite32(bytes, fifo_mem +
41924 SVGA_FIFO_RESERVED);
41925- return fifo_mem + (next_cmd >> 2);
41926+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41927 } else {
41928 need_bounce = true;
41929 }
41930@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41931
41932 fm = vmw_fifo_reserve(dev_priv, bytes);
41933 if (unlikely(fm == NULL)) {
41934- *seqno = atomic_read(&dev_priv->marker_seq);
41935+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41936 ret = -ENOMEM;
41937 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41938 false, 3*HZ);
41939@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41940 }
41941
41942 do {
41943- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41944+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41945 } while (*seqno == 0);
41946
41947 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41948diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41949index 170b61b..fec7348 100644
41950--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41951+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41952@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41953 }
41954
41955 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41956- vmw_gmrid_man_init,
41957- vmw_gmrid_man_takedown,
41958- vmw_gmrid_man_get_node,
41959- vmw_gmrid_man_put_node,
41960- vmw_gmrid_man_debug
41961+ .init = vmw_gmrid_man_init,
41962+ .takedown = vmw_gmrid_man_takedown,
41963+ .get_node = vmw_gmrid_man_get_node,
41964+ .put_node = vmw_gmrid_man_put_node,
41965+ .debug = vmw_gmrid_man_debug
41966 };
41967diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41968index 69c8ce2..cacb0ab 100644
41969--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41970+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41971@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41972 int ret;
41973
41974 num_clips = arg->num_clips;
41975- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41976+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41977
41978 if (unlikely(num_clips == 0))
41979 return 0;
41980@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41981 int ret;
41982
41983 num_clips = arg->num_clips;
41984- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41985+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41986
41987 if (unlikely(num_clips == 0))
41988 return 0;
41989diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41990index 9fe9827..0aa2fc0 100644
41991--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41992+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41993@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41994 * emitted. Then the fence is stale and signaled.
41995 */
41996
41997- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41998+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41999 > VMW_FENCE_WRAP);
42000
42001 return ret;
42002@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42003
42004 if (fifo_idle)
42005 down_read(&fifo_state->rwsem);
42006- signal_seq = atomic_read(&dev_priv->marker_seq);
42007+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42008 ret = 0;
42009
42010 for (;;) {
42011diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42012index efd1ffd..0ae13ca 100644
42013--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42014+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42015@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42016 while (!vmw_lag_lt(queue, us)) {
42017 spin_lock(&queue->lock);
42018 if (list_empty(&queue->head))
42019- seqno = atomic_read(&dev_priv->marker_seq);
42020+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42021 else {
42022 marker = list_first_entry(&queue->head,
42023 struct vmw_marker, head);
42024diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42025index 37ac7b5..d52a5c9 100644
42026--- a/drivers/gpu/vga/vga_switcheroo.c
42027+++ b/drivers/gpu/vga/vga_switcheroo.c
42028@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42029
42030 /* this version is for the case where the power switch is separate
42031 to the device being powered down. */
42032-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42033+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42034 {
42035 /* copy over all the bus versions */
42036 if (dev->bus && dev->bus->pm) {
42037@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42038 return ret;
42039 }
42040
42041-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42042+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42043 {
42044 /* copy over all the bus versions */
42045 if (dev->bus && dev->bus->pm) {
42046diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42047index 56ce8c2..32ce524 100644
42048--- a/drivers/hid/hid-core.c
42049+++ b/drivers/hid/hid-core.c
42050@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42051
42052 int hid_add_device(struct hid_device *hdev)
42053 {
42054- static atomic_t id = ATOMIC_INIT(0);
42055+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42056 int ret;
42057
42058 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42059@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42060 /* XXX hack, any other cleaner solution after the driver core
42061 * is converted to allow more than 20 bytes as the device name? */
42062 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42063- hdev->vendor, hdev->product, atomic_inc_return(&id));
42064+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42065
42066 hid_debug_register(hdev, dev_name(&hdev->dev));
42067 ret = device_add(&hdev->dev);
42068diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42069index c13fb5b..55a3802 100644
42070--- a/drivers/hid/hid-wiimote-debug.c
42071+++ b/drivers/hid/hid-wiimote-debug.c
42072@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42073 else if (size == 0)
42074 return -EIO;
42075
42076- if (copy_to_user(u, buf, size))
42077+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42078 return -EFAULT;
42079
42080 *off += size;
42081diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42082index 2978f5e..ac3a23c 100644
42083--- a/drivers/hv/channel.c
42084+++ b/drivers/hv/channel.c
42085@@ -367,7 +367,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42086 int ret = 0;
42087
42088 next_gpadl_handle =
42089- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42090+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42091
42092 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42093 if (ret)
42094diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42095index 50e51a5..b0bfd78 100644
42096--- a/drivers/hv/hv.c
42097+++ b/drivers/hv/hv.c
42098@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42099 u64 output_address = (output) ? virt_to_phys(output) : 0;
42100 u32 output_address_hi = output_address >> 32;
42101 u32 output_address_lo = output_address & 0xFFFFFFFF;
42102- void *hypercall_page = hv_context.hypercall_page;
42103+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42104
42105 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42106 "=a"(hv_status_lo) : "d" (control_hi),
42107@@ -164,7 +164,7 @@ int hv_init(void)
42108 /* See if the hypercall page is already set */
42109 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42110
42111- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42112+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42113
42114 if (!virtaddr)
42115 goto cleanup;
42116diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42117index ff16938..e60879c 100644
42118--- a/drivers/hv/hv_balloon.c
42119+++ b/drivers/hv/hv_balloon.c
42120@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42121
42122 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42123 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42124-static atomic_t trans_id = ATOMIC_INIT(0);
42125+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42126
42127 static int dm_ring_size = (5 * PAGE_SIZE);
42128
42129@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42130 pr_info("Memory hot add failed\n");
42131
42132 dm->state = DM_INITIALIZED;
42133- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42134+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42135 vmbus_sendpacket(dm->dev->channel, &resp,
42136 sizeof(struct dm_hot_add_response),
42137 (unsigned long)NULL,
42138@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42139 memset(&status, 0, sizeof(struct dm_status));
42140 status.hdr.type = DM_STATUS_REPORT;
42141 status.hdr.size = sizeof(struct dm_status);
42142- status.hdr.trans_id = atomic_inc_return(&trans_id);
42143+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42144
42145 /*
42146 * The host expects the guest to report free memory.
42147@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42148 * send the status. This can happen if we were interrupted
42149 * after we picked our transaction ID.
42150 */
42151- if (status.hdr.trans_id != atomic_read(&trans_id))
42152+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42153 return;
42154
42155 /*
42156@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42157 */
42158
42159 do {
42160- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42161+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42162 ret = vmbus_sendpacket(dm_device.dev->channel,
42163 bl_resp,
42164 bl_resp->hdr.size,
42165@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42166
42167 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42168 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42169- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42170+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42171 resp.hdr.size = sizeof(struct dm_unballoon_response);
42172
42173 vmbus_sendpacket(dm_device.dev->channel, &resp,
42174@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42175 memset(&version_req, 0, sizeof(struct dm_version_request));
42176 version_req.hdr.type = DM_VERSION_REQUEST;
42177 version_req.hdr.size = sizeof(struct dm_version_request);
42178- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42179+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42180 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42181 version_req.is_last_attempt = 1;
42182
42183@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42184 memset(&version_req, 0, sizeof(struct dm_version_request));
42185 version_req.hdr.type = DM_VERSION_REQUEST;
42186 version_req.hdr.size = sizeof(struct dm_version_request);
42187- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42188+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42189 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42190 version_req.is_last_attempt = 0;
42191
42192@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42193 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42194 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42195 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42196- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42197+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42198
42199 cap_msg.caps.cap_bits.balloon = 1;
42200 cap_msg.caps.cap_bits.hot_add = 1;
42201diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42202index 44b1c94..6dccc2c 100644
42203--- a/drivers/hv/hyperv_vmbus.h
42204+++ b/drivers/hv/hyperv_vmbus.h
42205@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42206 struct vmbus_connection {
42207 enum vmbus_connect_state conn_state;
42208
42209- atomic_t next_gpadl_handle;
42210+ atomic_unchecked_t next_gpadl_handle;
42211
42212 /*
42213 * Represents channel interrupts. Each bit position represents a
42214diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42215index f518b8d7..4bc0b64 100644
42216--- a/drivers/hv/vmbus_drv.c
42217+++ b/drivers/hv/vmbus_drv.c
42218@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42219 {
42220 int ret = 0;
42221
42222- static atomic_t device_num = ATOMIC_INIT(0);
42223+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42224
42225 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42226- atomic_inc_return(&device_num));
42227+ atomic_inc_return_unchecked(&device_num));
42228
42229 child_device_obj->device.bus = &hv_bus;
42230 child_device_obj->device.parent = &hv_acpi_dev->dev;
42231diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42232index 579bdf9..0dac21d5 100644
42233--- a/drivers/hwmon/acpi_power_meter.c
42234+++ b/drivers/hwmon/acpi_power_meter.c
42235@@ -116,7 +116,7 @@ struct sensor_template {
42236 struct device_attribute *devattr,
42237 const char *buf, size_t count);
42238 int index;
42239-};
42240+} __do_const;
42241
42242 /* Averaging interval */
42243 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42244@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42245 struct sensor_template *attrs)
42246 {
42247 struct device *dev = &resource->acpi_dev->dev;
42248- struct sensor_device_attribute *sensors =
42249+ sensor_device_attribute_no_const *sensors =
42250 &resource->sensors[resource->num_sensors];
42251 int res = 0;
42252
42253@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42254 return 0;
42255 }
42256
42257-static struct dmi_system_id __initdata pm_dmi_table[] = {
42258+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42259 {
42260 enable_cap_knobs, "IBM Active Energy Manager",
42261 {
42262diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42263index 0af63da..05a183a 100644
42264--- a/drivers/hwmon/applesmc.c
42265+++ b/drivers/hwmon/applesmc.c
42266@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42267 {
42268 struct applesmc_node_group *grp;
42269 struct applesmc_dev_attr *node;
42270- struct attribute *attr;
42271+ attribute_no_const *attr;
42272 int ret, i;
42273
42274 for (grp = groups; grp->format; grp++) {
42275diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42276index cccef87..06ce8ec 100644
42277--- a/drivers/hwmon/asus_atk0110.c
42278+++ b/drivers/hwmon/asus_atk0110.c
42279@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42280 struct atk_sensor_data {
42281 struct list_head list;
42282 struct atk_data *data;
42283- struct device_attribute label_attr;
42284- struct device_attribute input_attr;
42285- struct device_attribute limit1_attr;
42286- struct device_attribute limit2_attr;
42287+ device_attribute_no_const label_attr;
42288+ device_attribute_no_const input_attr;
42289+ device_attribute_no_const limit1_attr;
42290+ device_attribute_no_const limit2_attr;
42291 char label_attr_name[ATTR_NAME_SIZE];
42292 char input_attr_name[ATTR_NAME_SIZE];
42293 char limit1_attr_name[ATTR_NAME_SIZE];
42294@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42295 static struct device_attribute atk_name_attr =
42296 __ATTR(name, 0444, atk_name_show, NULL);
42297
42298-static void atk_init_attribute(struct device_attribute *attr, char *name,
42299+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42300 sysfs_show_func show)
42301 {
42302 sysfs_attr_init(&attr->attr);
42303diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42304index 5b7fec8..05c957a 100644
42305--- a/drivers/hwmon/coretemp.c
42306+++ b/drivers/hwmon/coretemp.c
42307@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42308 return NOTIFY_OK;
42309 }
42310
42311-static struct notifier_block coretemp_cpu_notifier __refdata = {
42312+static struct notifier_block coretemp_cpu_notifier = {
42313 .notifier_call = coretemp_cpu_callback,
42314 };
42315
42316diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42317index 7a8a6fb..015c1fd 100644
42318--- a/drivers/hwmon/ibmaem.c
42319+++ b/drivers/hwmon/ibmaem.c
42320@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42321 struct aem_rw_sensor_template *rw)
42322 {
42323 struct device *dev = &data->pdev->dev;
42324- struct sensor_device_attribute *sensors = data->sensors;
42325+ sensor_device_attribute_no_const *sensors = data->sensors;
42326 int err;
42327
42328 /* Set up read-only sensors */
42329diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42330index 17ae2eb..21b71dd 100644
42331--- a/drivers/hwmon/iio_hwmon.c
42332+++ b/drivers/hwmon/iio_hwmon.c
42333@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42334 {
42335 struct device *dev = &pdev->dev;
42336 struct iio_hwmon_state *st;
42337- struct sensor_device_attribute *a;
42338+ sensor_device_attribute_no_const *a;
42339 int ret, i;
42340 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42341 enum iio_chan_type type;
42342diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42343index f3830db..9f4d6d5 100644
42344--- a/drivers/hwmon/nct6683.c
42345+++ b/drivers/hwmon/nct6683.c
42346@@ -397,11 +397,11 @@ static struct attribute_group *
42347 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42348 int repeat)
42349 {
42350- struct sensor_device_attribute_2 *a2;
42351- struct sensor_device_attribute *a;
42352+ sensor_device_attribute_2_no_const *a2;
42353+ sensor_device_attribute_no_const *a;
42354 struct sensor_device_template **t;
42355 struct sensor_device_attr_u *su;
42356- struct attribute_group *group;
42357+ attribute_group_no_const *group;
42358 struct attribute **attrs;
42359 int i, j, count;
42360
42361diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42362index 1be4117..88ae1e1 100644
42363--- a/drivers/hwmon/nct6775.c
42364+++ b/drivers/hwmon/nct6775.c
42365@@ -952,10 +952,10 @@ static struct attribute_group *
42366 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42367 int repeat)
42368 {
42369- struct attribute_group *group;
42370+ attribute_group_no_const *group;
42371 struct sensor_device_attr_u *su;
42372- struct sensor_device_attribute *a;
42373- struct sensor_device_attribute_2 *a2;
42374+ sensor_device_attribute_no_const *a;
42375+ sensor_device_attribute_2_no_const *a2;
42376 struct attribute **attrs;
42377 struct sensor_device_template **t;
42378 int i, count;
42379diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42380index f2e47c7..45d7941 100644
42381--- a/drivers/hwmon/pmbus/pmbus_core.c
42382+++ b/drivers/hwmon/pmbus/pmbus_core.c
42383@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42384 return 0;
42385 }
42386
42387-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42388+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42389 const char *name,
42390 umode_t mode,
42391 ssize_t (*show)(struct device *dev,
42392@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42393 dev_attr->store = store;
42394 }
42395
42396-static void pmbus_attr_init(struct sensor_device_attribute *a,
42397+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42398 const char *name,
42399 umode_t mode,
42400 ssize_t (*show)(struct device *dev,
42401@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42402 u16 reg, u8 mask)
42403 {
42404 struct pmbus_boolean *boolean;
42405- struct sensor_device_attribute *a;
42406+ sensor_device_attribute_no_const *a;
42407
42408 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42409 if (!boolean)
42410@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42411 bool update, bool readonly)
42412 {
42413 struct pmbus_sensor *sensor;
42414- struct device_attribute *a;
42415+ device_attribute_no_const *a;
42416
42417 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42418 if (!sensor)
42419@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42420 const char *lstring, int index)
42421 {
42422 struct pmbus_label *label;
42423- struct device_attribute *a;
42424+ device_attribute_no_const *a;
42425
42426 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42427 if (!label)
42428diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42429index d4f0935..7420593 100644
42430--- a/drivers/hwmon/sht15.c
42431+++ b/drivers/hwmon/sht15.c
42432@@ -169,7 +169,7 @@ struct sht15_data {
42433 int supply_uv;
42434 bool supply_uv_valid;
42435 struct work_struct update_supply_work;
42436- atomic_t interrupt_handled;
42437+ atomic_unchecked_t interrupt_handled;
42438 };
42439
42440 /**
42441@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42442 ret = gpio_direction_input(data->pdata->gpio_data);
42443 if (ret)
42444 return ret;
42445- atomic_set(&data->interrupt_handled, 0);
42446+ atomic_set_unchecked(&data->interrupt_handled, 0);
42447
42448 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42449 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42450 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42451 /* Only relevant if the interrupt hasn't occurred. */
42452- if (!atomic_read(&data->interrupt_handled))
42453+ if (!atomic_read_unchecked(&data->interrupt_handled))
42454 schedule_work(&data->read_work);
42455 }
42456 ret = wait_event_timeout(data->wait_queue,
42457@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42458
42459 /* First disable the interrupt */
42460 disable_irq_nosync(irq);
42461- atomic_inc(&data->interrupt_handled);
42462+ atomic_inc_unchecked(&data->interrupt_handled);
42463 /* Then schedule a reading work struct */
42464 if (data->state != SHT15_READING_NOTHING)
42465 schedule_work(&data->read_work);
42466@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42467 * If not, then start the interrupt again - care here as could
42468 * have gone low in meantime so verify it hasn't!
42469 */
42470- atomic_set(&data->interrupt_handled, 0);
42471+ atomic_set_unchecked(&data->interrupt_handled, 0);
42472 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42473 /* If still not occurred or another handler was scheduled */
42474 if (gpio_get_value(data->pdata->gpio_data)
42475- || atomic_read(&data->interrupt_handled))
42476+ || atomic_read_unchecked(&data->interrupt_handled))
42477 return;
42478 }
42479
42480diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42481index ac91c07..8e69663 100644
42482--- a/drivers/hwmon/via-cputemp.c
42483+++ b/drivers/hwmon/via-cputemp.c
42484@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42485 return NOTIFY_OK;
42486 }
42487
42488-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42489+static struct notifier_block via_cputemp_cpu_notifier = {
42490 .notifier_call = via_cputemp_cpu_callback,
42491 };
42492
42493diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42494index 65e3240..e6c511d 100644
42495--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42496+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42497@@ -39,7 +39,7 @@
42498 extern struct i2c_adapter amd756_smbus;
42499
42500 static struct i2c_adapter *s4882_adapter;
42501-static struct i2c_algorithm *s4882_algo;
42502+static i2c_algorithm_no_const *s4882_algo;
42503
42504 /* Wrapper access functions for multiplexed SMBus */
42505 static DEFINE_MUTEX(amd756_lock);
42506diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42507index b19a310..d6eece0 100644
42508--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42509+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42510@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42511 /* usb layer */
42512
42513 /* Send command to device, and get response. */
42514-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42515+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42516 {
42517 int ret = 0;
42518 int actual;
42519diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42520index 88eda09..cf40434 100644
42521--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42522+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42523@@ -37,7 +37,7 @@
42524 extern struct i2c_adapter *nforce2_smbus;
42525
42526 static struct i2c_adapter *s4985_adapter;
42527-static struct i2c_algorithm *s4985_algo;
42528+static i2c_algorithm_no_const *s4985_algo;
42529
42530 /* Wrapper access functions for multiplexed SMBus */
42531 static DEFINE_MUTEX(nforce2_lock);
42532diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42533index 71c7a39..71dd3e0 100644
42534--- a/drivers/i2c/i2c-dev.c
42535+++ b/drivers/i2c/i2c-dev.c
42536@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42537 break;
42538 }
42539
42540- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42541+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42542 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42543 if (IS_ERR(rdwr_pa[i].buf)) {
42544 res = PTR_ERR(rdwr_pa[i].buf);
42545diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42546index 0b510ba..4fbb5085 100644
42547--- a/drivers/ide/ide-cd.c
42548+++ b/drivers/ide/ide-cd.c
42549@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42550 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42551 if ((unsigned long)buf & alignment
42552 || blk_rq_bytes(rq) & q->dma_pad_mask
42553- || object_is_on_stack(buf))
42554+ || object_starts_on_stack(buf))
42555 drive->dma = 0;
42556 }
42557 }
42558diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42559index 4df97f6..c751151 100644
42560--- a/drivers/iio/industrialio-core.c
42561+++ b/drivers/iio/industrialio-core.c
42562@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42563 }
42564
42565 static
42566-int __iio_device_attr_init(struct device_attribute *dev_attr,
42567+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42568 const char *postfix,
42569 struct iio_chan_spec const *chan,
42570 ssize_t (*readfunc)(struct device *dev,
42571diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42572index e28a494..f7c2671 100644
42573--- a/drivers/infiniband/core/cm.c
42574+++ b/drivers/infiniband/core/cm.c
42575@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42576
42577 struct cm_counter_group {
42578 struct kobject obj;
42579- atomic_long_t counter[CM_ATTR_COUNT];
42580+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42581 };
42582
42583 struct cm_counter_attribute {
42584@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42585 struct ib_mad_send_buf *msg = NULL;
42586 int ret;
42587
42588- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42589+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42590 counter[CM_REQ_COUNTER]);
42591
42592 /* Quick state check to discard duplicate REQs. */
42593@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42594 if (!cm_id_priv)
42595 return;
42596
42597- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42598+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42599 counter[CM_REP_COUNTER]);
42600 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42601 if (ret)
42602@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42603 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42604 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42605 spin_unlock_irq(&cm_id_priv->lock);
42606- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42607+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42608 counter[CM_RTU_COUNTER]);
42609 goto out;
42610 }
42611@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42612 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42613 dreq_msg->local_comm_id);
42614 if (!cm_id_priv) {
42615- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42616+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42617 counter[CM_DREQ_COUNTER]);
42618 cm_issue_drep(work->port, work->mad_recv_wc);
42619 return -EINVAL;
42620@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42621 case IB_CM_MRA_REP_RCVD:
42622 break;
42623 case IB_CM_TIMEWAIT:
42624- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42625+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42626 counter[CM_DREQ_COUNTER]);
42627 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42628 goto unlock;
42629@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42630 cm_free_msg(msg);
42631 goto deref;
42632 case IB_CM_DREQ_RCVD:
42633- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42634+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42635 counter[CM_DREQ_COUNTER]);
42636 goto unlock;
42637 default:
42638@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42639 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42640 cm_id_priv->msg, timeout)) {
42641 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42642- atomic_long_inc(&work->port->
42643+ atomic_long_inc_unchecked(&work->port->
42644 counter_group[CM_RECV_DUPLICATES].
42645 counter[CM_MRA_COUNTER]);
42646 goto out;
42647@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42648 break;
42649 case IB_CM_MRA_REQ_RCVD:
42650 case IB_CM_MRA_REP_RCVD:
42651- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42652+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42653 counter[CM_MRA_COUNTER]);
42654 /* fall through */
42655 default:
42656@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42657 case IB_CM_LAP_IDLE:
42658 break;
42659 case IB_CM_MRA_LAP_SENT:
42660- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42661+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42662 counter[CM_LAP_COUNTER]);
42663 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42664 goto unlock;
42665@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42666 cm_free_msg(msg);
42667 goto deref;
42668 case IB_CM_LAP_RCVD:
42669- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42670+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42671 counter[CM_LAP_COUNTER]);
42672 goto unlock;
42673 default:
42674@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42675 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42676 if (cur_cm_id_priv) {
42677 spin_unlock_irq(&cm.lock);
42678- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42679+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42680 counter[CM_SIDR_REQ_COUNTER]);
42681 goto out; /* Duplicate message. */
42682 }
42683@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42684 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42685 msg->retries = 1;
42686
42687- atomic_long_add(1 + msg->retries,
42688+ atomic_long_add_unchecked(1 + msg->retries,
42689 &port->counter_group[CM_XMIT].counter[attr_index]);
42690 if (msg->retries)
42691- atomic_long_add(msg->retries,
42692+ atomic_long_add_unchecked(msg->retries,
42693 &port->counter_group[CM_XMIT_RETRIES].
42694 counter[attr_index]);
42695
42696@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42697 }
42698
42699 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42700- atomic_long_inc(&port->counter_group[CM_RECV].
42701+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42702 counter[attr_id - CM_ATTR_ID_OFFSET]);
42703
42704 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42705@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42706 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42707
42708 return sprintf(buf, "%ld\n",
42709- atomic_long_read(&group->counter[cm_attr->index]));
42710+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42711 }
42712
42713 static const struct sysfs_ops cm_counter_ops = {
42714diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42715index 9f5ad7c..588cd84 100644
42716--- a/drivers/infiniband/core/fmr_pool.c
42717+++ b/drivers/infiniband/core/fmr_pool.c
42718@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42719
42720 struct task_struct *thread;
42721
42722- atomic_t req_ser;
42723- atomic_t flush_ser;
42724+ atomic_unchecked_t req_ser;
42725+ atomic_unchecked_t flush_ser;
42726
42727 wait_queue_head_t force_wait;
42728 };
42729@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42730 struct ib_fmr_pool *pool = pool_ptr;
42731
42732 do {
42733- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42734+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42735 ib_fmr_batch_release(pool);
42736
42737- atomic_inc(&pool->flush_ser);
42738+ atomic_inc_unchecked(&pool->flush_ser);
42739 wake_up_interruptible(&pool->force_wait);
42740
42741 if (pool->flush_function)
42742@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42743 }
42744
42745 set_current_state(TASK_INTERRUPTIBLE);
42746- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42747+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42748 !kthread_should_stop())
42749 schedule();
42750 __set_current_state(TASK_RUNNING);
42751@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42752 pool->dirty_watermark = params->dirty_watermark;
42753 pool->dirty_len = 0;
42754 spin_lock_init(&pool->pool_lock);
42755- atomic_set(&pool->req_ser, 0);
42756- atomic_set(&pool->flush_ser, 0);
42757+ atomic_set_unchecked(&pool->req_ser, 0);
42758+ atomic_set_unchecked(&pool->flush_ser, 0);
42759 init_waitqueue_head(&pool->force_wait);
42760
42761 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42762@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42763 }
42764 spin_unlock_irq(&pool->pool_lock);
42765
42766- serial = atomic_inc_return(&pool->req_ser);
42767+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42768 wake_up_process(pool->thread);
42769
42770 if (wait_event_interruptible(pool->force_wait,
42771- atomic_read(&pool->flush_ser) - serial >= 0))
42772+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42773 return -EINTR;
42774
42775 return 0;
42776@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42777 } else {
42778 list_add_tail(&fmr->list, &pool->dirty_list);
42779 if (++pool->dirty_len >= pool->dirty_watermark) {
42780- atomic_inc(&pool->req_ser);
42781+ atomic_inc_unchecked(&pool->req_ser);
42782 wake_up_process(pool->thread);
42783 }
42784 }
42785diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
42786index a9f0489..27a161b 100644
42787--- a/drivers/infiniband/core/uverbs_cmd.c
42788+++ b/drivers/infiniband/core/uverbs_cmd.c
42789@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
42790 if (copy_from_user(&cmd, buf, sizeof cmd))
42791 return -EFAULT;
42792
42793+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
42794+ return -EFAULT;
42795+
42796 INIT_UDATA(&udata, buf + sizeof cmd,
42797 (unsigned long) cmd.response + sizeof resp,
42798 in_len - sizeof cmd, out_len - sizeof resp);
42799diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42800index 6791fd1..78bdcdf 100644
42801--- a/drivers/infiniband/hw/cxgb4/mem.c
42802+++ b/drivers/infiniband/hw/cxgb4/mem.c
42803@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42804 int err;
42805 struct fw_ri_tpte tpt;
42806 u32 stag_idx;
42807- static atomic_t key;
42808+ static atomic_unchecked_t key;
42809
42810 if (c4iw_fatal_error(rdev))
42811 return -EIO;
42812@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42813 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42814 rdev->stats.stag.max = rdev->stats.stag.cur;
42815 mutex_unlock(&rdev->stats.lock);
42816- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42817+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42818 }
42819 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42820 __func__, stag_state, type, pdid, stag_idx);
42821diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42822index 79b3dbc..96e5fcc 100644
42823--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42824+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42825@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42826 struct ib_atomic_eth *ateth;
42827 struct ipath_ack_entry *e;
42828 u64 vaddr;
42829- atomic64_t *maddr;
42830+ atomic64_unchecked_t *maddr;
42831 u64 sdata;
42832 u32 rkey;
42833 u8 next;
42834@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42835 IB_ACCESS_REMOTE_ATOMIC)))
42836 goto nack_acc_unlck;
42837 /* Perform atomic OP and save result. */
42838- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42839+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42840 sdata = be64_to_cpu(ateth->swap_data);
42841 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42842 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42843- (u64) atomic64_add_return(sdata, maddr) - sdata :
42844+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42845 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42846 be64_to_cpu(ateth->compare_data),
42847 sdata);
42848diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42849index 1f95bba..9530f87 100644
42850--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42851+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42852@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42853 unsigned long flags;
42854 struct ib_wc wc;
42855 u64 sdata;
42856- atomic64_t *maddr;
42857+ atomic64_unchecked_t *maddr;
42858 enum ib_wc_status send_status;
42859
42860 /*
42861@@ -382,11 +382,11 @@ again:
42862 IB_ACCESS_REMOTE_ATOMIC)))
42863 goto acc_err;
42864 /* Perform atomic OP and save result. */
42865- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42866+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42867 sdata = wqe->wr.wr.atomic.compare_add;
42868 *(u64 *) sqp->s_sge.sge.vaddr =
42869 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42870- (u64) atomic64_add_return(sdata, maddr) - sdata :
42871+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42872 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42873 sdata, wqe->wr.wr.atomic.swap);
42874 goto send_comp;
42875diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42876index 5904026..f1c30e5 100644
42877--- a/drivers/infiniband/hw/mlx4/mad.c
42878+++ b/drivers/infiniband/hw/mlx4/mad.c
42879@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42880
42881 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42882 {
42883- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42884+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42885 cpu_to_be64(0xff00000000000000LL);
42886 }
42887
42888diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42889index ed327e6..ca1739e0 100644
42890--- a/drivers/infiniband/hw/mlx4/mcg.c
42891+++ b/drivers/infiniband/hw/mlx4/mcg.c
42892@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42893 {
42894 char name[20];
42895
42896- atomic_set(&ctx->tid, 0);
42897+ atomic_set_unchecked(&ctx->tid, 0);
42898 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42899 ctx->mcg_wq = create_singlethread_workqueue(name);
42900 if (!ctx->mcg_wq)
42901diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42902index f829fd9..1a8d436 100644
42903--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42904+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42905@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
42906 struct list_head mcg_mgid0_list;
42907 struct workqueue_struct *mcg_wq;
42908 struct mlx4_ib_demux_pv_ctx **tun;
42909- atomic_t tid;
42910+ atomic_unchecked_t tid;
42911 int flushing; /* flushing the work queue */
42912 };
42913
42914diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42915index 9d3e5c1..6f166df 100644
42916--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42917+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42918@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42919 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42920 }
42921
42922-int mthca_QUERY_FW(struct mthca_dev *dev)
42923+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42924 {
42925 struct mthca_mailbox *mailbox;
42926 u32 *outbox;
42927@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42928 CMD_TIME_CLASS_B);
42929 }
42930
42931-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42932+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42933 int num_mtt)
42934 {
42935 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42936@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42937 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42938 }
42939
42940-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42941+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42942 int eq_num)
42943 {
42944 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42945@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42946 CMD_TIME_CLASS_B);
42947 }
42948
42949-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42950+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42951 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42952 void *in_mad, void *response_mad)
42953 {
42954diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42955index ded76c1..0cf0a08 100644
42956--- a/drivers/infiniband/hw/mthca/mthca_main.c
42957+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42958@@ -692,7 +692,7 @@ err_close:
42959 return err;
42960 }
42961
42962-static int mthca_setup_hca(struct mthca_dev *dev)
42963+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42964 {
42965 int err;
42966
42967diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42968index ed9a989..6aa5dc2 100644
42969--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42970+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42971@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42972 * through the bitmaps)
42973 */
42974
42975-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42976+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42977 {
42978 int o;
42979 int m;
42980@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42981 return key;
42982 }
42983
42984-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42985+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42986 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42987 {
42988 struct mthca_mailbox *mailbox;
42989@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42990 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42991 }
42992
42993-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42994+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42995 u64 *buffer_list, int buffer_size_shift,
42996 int list_len, u64 iova, u64 total_size,
42997 u32 access, struct mthca_mr *mr)
42998diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42999index 415f8e1..e34214e 100644
43000--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43001+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43002@@ -764,7 +764,7 @@ unlock:
43003 return 0;
43004 }
43005
43006-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43007+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43008 {
43009 struct mthca_dev *dev = to_mdev(ibcq->device);
43010 struct mthca_cq *cq = to_mcq(ibcq);
43011diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43012index 3b2a6dc..bce26ff 100644
43013--- a/drivers/infiniband/hw/nes/nes.c
43014+++ b/drivers/infiniband/hw/nes/nes.c
43015@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43016 LIST_HEAD(nes_adapter_list);
43017 static LIST_HEAD(nes_dev_list);
43018
43019-atomic_t qps_destroyed;
43020+atomic_unchecked_t qps_destroyed;
43021
43022 static unsigned int ee_flsh_adapter;
43023 static unsigned int sysfs_nonidx_addr;
43024@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43025 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43026 struct nes_adapter *nesadapter = nesdev->nesadapter;
43027
43028- atomic_inc(&qps_destroyed);
43029+ atomic_inc_unchecked(&qps_destroyed);
43030
43031 /* Free the control structures */
43032
43033diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43034index bd9d132..70d84f4 100644
43035--- a/drivers/infiniband/hw/nes/nes.h
43036+++ b/drivers/infiniband/hw/nes/nes.h
43037@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43038 extern unsigned int wqm_quanta;
43039 extern struct list_head nes_adapter_list;
43040
43041-extern atomic_t cm_connects;
43042-extern atomic_t cm_accepts;
43043-extern atomic_t cm_disconnects;
43044-extern atomic_t cm_closes;
43045-extern atomic_t cm_connecteds;
43046-extern atomic_t cm_connect_reqs;
43047-extern atomic_t cm_rejects;
43048-extern atomic_t mod_qp_timouts;
43049-extern atomic_t qps_created;
43050-extern atomic_t qps_destroyed;
43051-extern atomic_t sw_qps_destroyed;
43052+extern atomic_unchecked_t cm_connects;
43053+extern atomic_unchecked_t cm_accepts;
43054+extern atomic_unchecked_t cm_disconnects;
43055+extern atomic_unchecked_t cm_closes;
43056+extern atomic_unchecked_t cm_connecteds;
43057+extern atomic_unchecked_t cm_connect_reqs;
43058+extern atomic_unchecked_t cm_rejects;
43059+extern atomic_unchecked_t mod_qp_timouts;
43060+extern atomic_unchecked_t qps_created;
43061+extern atomic_unchecked_t qps_destroyed;
43062+extern atomic_unchecked_t sw_qps_destroyed;
43063 extern u32 mh_detected;
43064 extern u32 mh_pauses_sent;
43065 extern u32 cm_packets_sent;
43066@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43067 extern u32 cm_packets_received;
43068 extern u32 cm_packets_dropped;
43069 extern u32 cm_packets_retrans;
43070-extern atomic_t cm_listens_created;
43071-extern atomic_t cm_listens_destroyed;
43072+extern atomic_unchecked_t cm_listens_created;
43073+extern atomic_unchecked_t cm_listens_destroyed;
43074 extern u32 cm_backlog_drops;
43075-extern atomic_t cm_loopbacks;
43076-extern atomic_t cm_nodes_created;
43077-extern atomic_t cm_nodes_destroyed;
43078-extern atomic_t cm_accel_dropped_pkts;
43079-extern atomic_t cm_resets_recvd;
43080-extern atomic_t pau_qps_created;
43081-extern atomic_t pau_qps_destroyed;
43082+extern atomic_unchecked_t cm_loopbacks;
43083+extern atomic_unchecked_t cm_nodes_created;
43084+extern atomic_unchecked_t cm_nodes_destroyed;
43085+extern atomic_unchecked_t cm_accel_dropped_pkts;
43086+extern atomic_unchecked_t cm_resets_recvd;
43087+extern atomic_unchecked_t pau_qps_created;
43088+extern atomic_unchecked_t pau_qps_destroyed;
43089
43090 extern u32 int_mod_timer_init;
43091 extern u32 int_mod_cq_depth_256;
43092diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43093index 6f09a72..cf4399d 100644
43094--- a/drivers/infiniband/hw/nes/nes_cm.c
43095+++ b/drivers/infiniband/hw/nes/nes_cm.c
43096@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43097 u32 cm_packets_retrans;
43098 u32 cm_packets_created;
43099 u32 cm_packets_received;
43100-atomic_t cm_listens_created;
43101-atomic_t cm_listens_destroyed;
43102+atomic_unchecked_t cm_listens_created;
43103+atomic_unchecked_t cm_listens_destroyed;
43104 u32 cm_backlog_drops;
43105-atomic_t cm_loopbacks;
43106-atomic_t cm_nodes_created;
43107-atomic_t cm_nodes_destroyed;
43108-atomic_t cm_accel_dropped_pkts;
43109-atomic_t cm_resets_recvd;
43110+atomic_unchecked_t cm_loopbacks;
43111+atomic_unchecked_t cm_nodes_created;
43112+atomic_unchecked_t cm_nodes_destroyed;
43113+atomic_unchecked_t cm_accel_dropped_pkts;
43114+atomic_unchecked_t cm_resets_recvd;
43115
43116 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43117 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43118@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43119 /* instance of function pointers for client API */
43120 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43121 static struct nes_cm_ops nes_cm_api = {
43122- mini_cm_accelerated,
43123- mini_cm_listen,
43124- mini_cm_del_listen,
43125- mini_cm_connect,
43126- mini_cm_close,
43127- mini_cm_accept,
43128- mini_cm_reject,
43129- mini_cm_recv_pkt,
43130- mini_cm_dealloc_core,
43131- mini_cm_get,
43132- mini_cm_set
43133+ .accelerated = mini_cm_accelerated,
43134+ .listen = mini_cm_listen,
43135+ .stop_listener = mini_cm_del_listen,
43136+ .connect = mini_cm_connect,
43137+ .close = mini_cm_close,
43138+ .accept = mini_cm_accept,
43139+ .reject = mini_cm_reject,
43140+ .recv_pkt = mini_cm_recv_pkt,
43141+ .destroy_cm_core = mini_cm_dealloc_core,
43142+ .get = mini_cm_get,
43143+ .set = mini_cm_set
43144 };
43145
43146 static struct nes_cm_core *g_cm_core;
43147
43148-atomic_t cm_connects;
43149-atomic_t cm_accepts;
43150-atomic_t cm_disconnects;
43151-atomic_t cm_closes;
43152-atomic_t cm_connecteds;
43153-atomic_t cm_connect_reqs;
43154-atomic_t cm_rejects;
43155+atomic_unchecked_t cm_connects;
43156+atomic_unchecked_t cm_accepts;
43157+atomic_unchecked_t cm_disconnects;
43158+atomic_unchecked_t cm_closes;
43159+atomic_unchecked_t cm_connecteds;
43160+atomic_unchecked_t cm_connect_reqs;
43161+atomic_unchecked_t cm_rejects;
43162
43163 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43164 {
43165@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43166 kfree(listener);
43167 listener = NULL;
43168 ret = 0;
43169- atomic_inc(&cm_listens_destroyed);
43170+ atomic_inc_unchecked(&cm_listens_destroyed);
43171 } else {
43172 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43173 }
43174@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43175 cm_node->rem_mac);
43176
43177 add_hte_node(cm_core, cm_node);
43178- atomic_inc(&cm_nodes_created);
43179+ atomic_inc_unchecked(&cm_nodes_created);
43180
43181 return cm_node;
43182 }
43183@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43184 }
43185
43186 atomic_dec(&cm_core->node_cnt);
43187- atomic_inc(&cm_nodes_destroyed);
43188+ atomic_inc_unchecked(&cm_nodes_destroyed);
43189 nesqp = cm_node->nesqp;
43190 if (nesqp) {
43191 nesqp->cm_node = NULL;
43192@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43193
43194 static void drop_packet(struct sk_buff *skb)
43195 {
43196- atomic_inc(&cm_accel_dropped_pkts);
43197+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43198 dev_kfree_skb_any(skb);
43199 }
43200
43201@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43202 {
43203
43204 int reset = 0; /* whether to send reset in case of err.. */
43205- atomic_inc(&cm_resets_recvd);
43206+ atomic_inc_unchecked(&cm_resets_recvd);
43207 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43208 " refcnt=%d\n", cm_node, cm_node->state,
43209 atomic_read(&cm_node->ref_count));
43210@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43211 rem_ref_cm_node(cm_node->cm_core, cm_node);
43212 return NULL;
43213 }
43214- atomic_inc(&cm_loopbacks);
43215+ atomic_inc_unchecked(&cm_loopbacks);
43216 loopbackremotenode->loopbackpartner = cm_node;
43217 loopbackremotenode->tcp_cntxt.rcv_wscale =
43218 NES_CM_DEFAULT_RCV_WND_SCALE;
43219@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43220 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43221 else {
43222 rem_ref_cm_node(cm_core, cm_node);
43223- atomic_inc(&cm_accel_dropped_pkts);
43224+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43225 dev_kfree_skb_any(skb);
43226 }
43227 break;
43228@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43229
43230 if ((cm_id) && (cm_id->event_handler)) {
43231 if (issue_disconn) {
43232- atomic_inc(&cm_disconnects);
43233+ atomic_inc_unchecked(&cm_disconnects);
43234 cm_event.event = IW_CM_EVENT_DISCONNECT;
43235 cm_event.status = disconn_status;
43236 cm_event.local_addr = cm_id->local_addr;
43237@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43238 }
43239
43240 if (issue_close) {
43241- atomic_inc(&cm_closes);
43242+ atomic_inc_unchecked(&cm_closes);
43243 nes_disconnect(nesqp, 1);
43244
43245 cm_id->provider_data = nesqp;
43246@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43247
43248 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43249 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43250- atomic_inc(&cm_accepts);
43251+ atomic_inc_unchecked(&cm_accepts);
43252
43253 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43254 netdev_refcnt_read(nesvnic->netdev));
43255@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43256 struct nes_cm_core *cm_core;
43257 u8 *start_buff;
43258
43259- atomic_inc(&cm_rejects);
43260+ atomic_inc_unchecked(&cm_rejects);
43261 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43262 loopback = cm_node->loopbackpartner;
43263 cm_core = cm_node->cm_core;
43264@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43265 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43266 ntohs(laddr->sin_port));
43267
43268- atomic_inc(&cm_connects);
43269+ atomic_inc_unchecked(&cm_connects);
43270 nesqp->active_conn = 1;
43271
43272 /* cache the cm_id in the qp */
43273@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43274 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43275 return err;
43276 }
43277- atomic_inc(&cm_listens_created);
43278+ atomic_inc_unchecked(&cm_listens_created);
43279 }
43280
43281 cm_id->add_ref(cm_id);
43282@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43283
43284 if (nesqp->destroyed)
43285 return;
43286- atomic_inc(&cm_connecteds);
43287+ atomic_inc_unchecked(&cm_connecteds);
43288 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43289 " local port 0x%04X. jiffies = %lu.\n",
43290 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43291@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43292
43293 cm_id->add_ref(cm_id);
43294 ret = cm_id->event_handler(cm_id, &cm_event);
43295- atomic_inc(&cm_closes);
43296+ atomic_inc_unchecked(&cm_closes);
43297 cm_event.event = IW_CM_EVENT_CLOSE;
43298 cm_event.status = 0;
43299 cm_event.provider_data = cm_id->provider_data;
43300@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43301 return;
43302 cm_id = cm_node->cm_id;
43303
43304- atomic_inc(&cm_connect_reqs);
43305+ atomic_inc_unchecked(&cm_connect_reqs);
43306 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43307 cm_node, cm_id, jiffies);
43308
43309@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43310 return;
43311 cm_id = cm_node->cm_id;
43312
43313- atomic_inc(&cm_connect_reqs);
43314+ atomic_inc_unchecked(&cm_connect_reqs);
43315 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43316 cm_node, cm_id, jiffies);
43317
43318diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43319index 4166452..fc952c3 100644
43320--- a/drivers/infiniband/hw/nes/nes_mgt.c
43321+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43322@@ -40,8 +40,8 @@
43323 #include "nes.h"
43324 #include "nes_mgt.h"
43325
43326-atomic_t pau_qps_created;
43327-atomic_t pau_qps_destroyed;
43328+atomic_unchecked_t pau_qps_created;
43329+atomic_unchecked_t pau_qps_destroyed;
43330
43331 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43332 {
43333@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43334 {
43335 struct sk_buff *skb;
43336 unsigned long flags;
43337- atomic_inc(&pau_qps_destroyed);
43338+ atomic_inc_unchecked(&pau_qps_destroyed);
43339
43340 /* Free packets that have not yet been forwarded */
43341 /* Lock is acquired by skb_dequeue when removing the skb */
43342@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43343 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43344 skb_queue_head_init(&nesqp->pau_list);
43345 spin_lock_init(&nesqp->pau_lock);
43346- atomic_inc(&pau_qps_created);
43347+ atomic_inc_unchecked(&pau_qps_created);
43348 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43349 }
43350
43351diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43352index 70acda9..a96de9d 100644
43353--- a/drivers/infiniband/hw/nes/nes_nic.c
43354+++ b/drivers/infiniband/hw/nes/nes_nic.c
43355@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43356 target_stat_values[++index] = mh_detected;
43357 target_stat_values[++index] = mh_pauses_sent;
43358 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43359- target_stat_values[++index] = atomic_read(&cm_connects);
43360- target_stat_values[++index] = atomic_read(&cm_accepts);
43361- target_stat_values[++index] = atomic_read(&cm_disconnects);
43362- target_stat_values[++index] = atomic_read(&cm_connecteds);
43363- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43364- target_stat_values[++index] = atomic_read(&cm_rejects);
43365- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43366- target_stat_values[++index] = atomic_read(&qps_created);
43367- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43368- target_stat_values[++index] = atomic_read(&qps_destroyed);
43369- target_stat_values[++index] = atomic_read(&cm_closes);
43370+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43371+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43372+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43373+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43374+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43375+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43376+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43377+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43378+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43379+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43380+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43381 target_stat_values[++index] = cm_packets_sent;
43382 target_stat_values[++index] = cm_packets_bounced;
43383 target_stat_values[++index] = cm_packets_created;
43384 target_stat_values[++index] = cm_packets_received;
43385 target_stat_values[++index] = cm_packets_dropped;
43386 target_stat_values[++index] = cm_packets_retrans;
43387- target_stat_values[++index] = atomic_read(&cm_listens_created);
43388- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43389+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43390+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43391 target_stat_values[++index] = cm_backlog_drops;
43392- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43393- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43394- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43395- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43396- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43397+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43398+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43399+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43400+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43401+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43402 target_stat_values[++index] = nesadapter->free_4kpbl;
43403 target_stat_values[++index] = nesadapter->free_256pbl;
43404 target_stat_values[++index] = int_mod_timer_init;
43405 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43406 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43407 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43408- target_stat_values[++index] = atomic_read(&pau_qps_created);
43409- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43410+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43411+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43412 }
43413
43414 /**
43415diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43416index c0d0296..3185f57 100644
43417--- a/drivers/infiniband/hw/nes/nes_verbs.c
43418+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43419@@ -46,9 +46,9 @@
43420
43421 #include <rdma/ib_umem.h>
43422
43423-atomic_t mod_qp_timouts;
43424-atomic_t qps_created;
43425-atomic_t sw_qps_destroyed;
43426+atomic_unchecked_t mod_qp_timouts;
43427+atomic_unchecked_t qps_created;
43428+atomic_unchecked_t sw_qps_destroyed;
43429
43430 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43431
43432@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43433 if (init_attr->create_flags)
43434 return ERR_PTR(-EINVAL);
43435
43436- atomic_inc(&qps_created);
43437+ atomic_inc_unchecked(&qps_created);
43438 switch (init_attr->qp_type) {
43439 case IB_QPT_RC:
43440 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43441@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43442 struct iw_cm_event cm_event;
43443 int ret = 0;
43444
43445- atomic_inc(&sw_qps_destroyed);
43446+ atomic_inc_unchecked(&sw_qps_destroyed);
43447 nesqp->destroyed = 1;
43448
43449 /* Blow away the connection if it exists. */
43450diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43451index ffd48bf..83cdb56 100644
43452--- a/drivers/infiniband/hw/qib/qib.h
43453+++ b/drivers/infiniband/hw/qib/qib.h
43454@@ -52,6 +52,7 @@
43455 #include <linux/kref.h>
43456 #include <linux/sched.h>
43457 #include <linux/kthread.h>
43458+#include <linux/slab.h>
43459
43460 #include "qib_common.h"
43461 #include "qib_verbs.h"
43462diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43463index cdc7df4..a2fdfdb 100644
43464--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43465+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43466@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43467 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43468 }
43469
43470-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43471+static struct rtnl_link_ops ipoib_link_ops = {
43472 .kind = "ipoib",
43473 .maxtype = IFLA_IPOIB_MAX,
43474 .policy = ipoib_policy,
43475diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43476index e853a21..56fc5a8 100644
43477--- a/drivers/input/gameport/gameport.c
43478+++ b/drivers/input/gameport/gameport.c
43479@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43480 */
43481 static void gameport_init_port(struct gameport *gameport)
43482 {
43483- static atomic_t gameport_no = ATOMIC_INIT(-1);
43484+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43485
43486 __module_get(THIS_MODULE);
43487
43488 mutex_init(&gameport->drv_mutex);
43489 device_initialize(&gameport->dev);
43490 dev_set_name(&gameport->dev, "gameport%lu",
43491- (unsigned long)atomic_inc_return(&gameport_no));
43492+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43493 gameport->dev.bus = &gameport_bus;
43494 gameport->dev.release = gameport_release_port;
43495 if (gameport->parent)
43496diff --git a/drivers/input/input.c b/drivers/input/input.c
43497index cc357f1..ee42fbc 100644
43498--- a/drivers/input/input.c
43499+++ b/drivers/input/input.c
43500@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
43501 */
43502 struct input_dev *input_allocate_device(void)
43503 {
43504- static atomic_t input_no = ATOMIC_INIT(-1);
43505+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43506 struct input_dev *dev;
43507
43508 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43509@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
43510 INIT_LIST_HEAD(&dev->node);
43511
43512 dev_set_name(&dev->dev, "input%lu",
43513- (unsigned long)atomic_inc_return(&input_no));
43514+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43515
43516 __module_get(THIS_MODULE);
43517 }
43518diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43519index 4a95b22..874c182 100644
43520--- a/drivers/input/joystick/sidewinder.c
43521+++ b/drivers/input/joystick/sidewinder.c
43522@@ -30,6 +30,7 @@
43523 #include <linux/kernel.h>
43524 #include <linux/module.h>
43525 #include <linux/slab.h>
43526+#include <linux/sched.h>
43527 #include <linux/input.h>
43528 #include <linux/gameport.h>
43529 #include <linux/jiffies.h>
43530diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43531index 3aa2f3f..53c00ea 100644
43532--- a/drivers/input/joystick/xpad.c
43533+++ b/drivers/input/joystick/xpad.c
43534@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43535
43536 static int xpad_led_probe(struct usb_xpad *xpad)
43537 {
43538- static atomic_t led_seq = ATOMIC_INIT(-1);
43539+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43540 unsigned long led_no;
43541 struct xpad_led *led;
43542 struct led_classdev *led_cdev;
43543@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43544 if (!led)
43545 return -ENOMEM;
43546
43547- led_no = atomic_inc_return(&led_seq);
43548+ led_no = atomic_inc_return_unchecked(&led_seq);
43549
43550 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43551 led->xpad = xpad;
43552diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43553index ac1fa5f..5f7502c 100644
43554--- a/drivers/input/misc/ims-pcu.c
43555+++ b/drivers/input/misc/ims-pcu.c
43556@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43557
43558 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43559 {
43560- static atomic_t device_no = ATOMIC_INIT(-1);
43561+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43562
43563 const struct ims_pcu_device_info *info;
43564 int error;
43565@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43566 }
43567
43568 /* Device appears to be operable, complete initialization */
43569- pcu->device_no = atomic_inc_return(&device_no);
43570+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43571
43572 /*
43573 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43574diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43575index d02e1bd..d719719 100644
43576--- a/drivers/input/mouse/psmouse.h
43577+++ b/drivers/input/mouse/psmouse.h
43578@@ -124,7 +124,7 @@ struct psmouse_attribute {
43579 ssize_t (*set)(struct psmouse *psmouse, void *data,
43580 const char *buf, size_t count);
43581 bool protect;
43582-};
43583+} __do_const;
43584 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43585
43586 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43587diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43588index b604564..3f14ae4 100644
43589--- a/drivers/input/mousedev.c
43590+++ b/drivers/input/mousedev.c
43591@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43592
43593 spin_unlock_irq(&client->packet_lock);
43594
43595- if (copy_to_user(buffer, data, count))
43596+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43597 return -EFAULT;
43598
43599 return count;
43600diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43601index a05a517..323a2fd 100644
43602--- a/drivers/input/serio/serio.c
43603+++ b/drivers/input/serio/serio.c
43604@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43605 */
43606 static void serio_init_port(struct serio *serio)
43607 {
43608- static atomic_t serio_no = ATOMIC_INIT(-1);
43609+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43610
43611 __module_get(THIS_MODULE);
43612
43613@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43614 mutex_init(&serio->drv_mutex);
43615 device_initialize(&serio->dev);
43616 dev_set_name(&serio->dev, "serio%lu",
43617- (unsigned long)atomic_inc_return(&serio_no));
43618+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43619 serio->dev.bus = &serio_bus;
43620 serio->dev.release = serio_release_port;
43621 serio->dev.groups = serio_device_attr_groups;
43622diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43623index 71ef5d6..93380a9 100644
43624--- a/drivers/input/serio/serio_raw.c
43625+++ b/drivers/input/serio/serio_raw.c
43626@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43627
43628 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43629 {
43630- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43631+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43632 struct serio_raw *serio_raw;
43633 int err;
43634
43635@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43636 }
43637
43638 snprintf(serio_raw->name, sizeof(serio_raw->name),
43639- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43640+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43641 kref_init(&serio_raw->kref);
43642 INIT_LIST_HEAD(&serio_raw->client_list);
43643 init_waitqueue_head(&serio_raw->wait);
43644diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
43645index 92e2243..8fd9092 100644
43646--- a/drivers/input/touchscreen/htcpen.c
43647+++ b/drivers/input/touchscreen/htcpen.c
43648@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
43649 }
43650 };
43651
43652-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
43653+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
43654 {
43655 .ident = "Shift",
43656 .matches = {
43657diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43658index 48882c1..93e0987 100644
43659--- a/drivers/iommu/amd_iommu.c
43660+++ b/drivers/iommu/amd_iommu.c
43661@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43662
43663 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43664 {
43665+ phys_addr_t physaddr;
43666 WARN_ON(address & 0x7ULL);
43667
43668 memset(cmd, 0, sizeof(*cmd));
43669- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43670- cmd->data[1] = upper_32_bits(__pa(address));
43671+
43672+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43673+ if (object_starts_on_stack((void *)address)) {
43674+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43675+ physaddr = __pa((u64)adjbuf);
43676+ } else
43677+#endif
43678+ physaddr = __pa(address);
43679+
43680+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43681+ cmd->data[1] = upper_32_bits(physaddr);
43682 cmd->data[2] = 1;
43683 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43684 }
43685diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43686index 72e683d..c9db262 100644
43687--- a/drivers/iommu/iommu.c
43688+++ b/drivers/iommu/iommu.c
43689@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43690 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43691 {
43692 int err;
43693- struct notifier_block *nb;
43694+ notifier_block_no_const *nb;
43695 struct iommu_callback_data cb = {
43696 .ops = ops,
43697 };
43698diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43699index 390079e..1da9d6c 100644
43700--- a/drivers/iommu/irq_remapping.c
43701+++ b/drivers/iommu/irq_remapping.c
43702@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43703 void panic_if_irq_remap(const char *msg)
43704 {
43705 if (irq_remapping_enabled)
43706- panic(msg);
43707+ panic("%s", msg);
43708 }
43709
43710 static void ir_ack_apic_edge(struct irq_data *data)
43711@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43712
43713 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43714 {
43715- chip->irq_print_chip = ir_print_prefix;
43716- chip->irq_ack = ir_ack_apic_edge;
43717- chip->irq_eoi = ir_ack_apic_level;
43718- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43719+ pax_open_kernel();
43720+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43721+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43722+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43723+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43724+ pax_close_kernel();
43725 }
43726
43727 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43728diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43729index 471e1cd..b53b870 100644
43730--- a/drivers/irqchip/irq-gic.c
43731+++ b/drivers/irqchip/irq-gic.c
43732@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43733 * Supported arch specific GIC irq extension.
43734 * Default make them NULL.
43735 */
43736-struct irq_chip gic_arch_extn = {
43737+irq_chip_no_const gic_arch_extn = {
43738 .irq_eoi = NULL,
43739 .irq_mask = NULL,
43740 .irq_unmask = NULL,
43741@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43742 chained_irq_exit(chip, desc);
43743 }
43744
43745-static struct irq_chip gic_chip = {
43746+static irq_chip_no_const gic_chip __read_only = {
43747 .name = "GIC",
43748 .irq_mask = gic_mask_irq,
43749 .irq_unmask = gic_unmask_irq,
43750diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43751index 9a0767b..5e5f86f 100644
43752--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43753+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43754@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43755 struct intc_irqpin_iomem *i;
43756 struct resource *io[INTC_IRQPIN_REG_NR];
43757 struct resource *irq;
43758- struct irq_chip *irq_chip;
43759+ irq_chip_no_const *irq_chip;
43760 void (*enable_fn)(struct irq_data *d);
43761 void (*disable_fn)(struct irq_data *d);
43762 const char *name = dev_name(dev);
43763diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43764index 384e6ed..7a771b2 100644
43765--- a/drivers/irqchip/irq-renesas-irqc.c
43766+++ b/drivers/irqchip/irq-renesas-irqc.c
43767@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43768 struct irqc_priv *p;
43769 struct resource *io;
43770 struct resource *irq;
43771- struct irq_chip *irq_chip;
43772+ irq_chip_no_const *irq_chip;
43773 const char *name = dev_name(&pdev->dev);
43774 int ret;
43775 int k;
43776diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43777index 6a2df32..dc962f1 100644
43778--- a/drivers/isdn/capi/capi.c
43779+++ b/drivers/isdn/capi/capi.c
43780@@ -81,8 +81,8 @@ struct capiminor {
43781
43782 struct capi20_appl *ap;
43783 u32 ncci;
43784- atomic_t datahandle;
43785- atomic_t msgid;
43786+ atomic_unchecked_t datahandle;
43787+ atomic_unchecked_t msgid;
43788
43789 struct tty_port port;
43790 int ttyinstop;
43791@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43792 capimsg_setu16(s, 2, mp->ap->applid);
43793 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43794 capimsg_setu8 (s, 5, CAPI_RESP);
43795- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43796+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43797 capimsg_setu32(s, 8, mp->ncci);
43798 capimsg_setu16(s, 12, datahandle);
43799 }
43800@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43801 mp->outbytes -= len;
43802 spin_unlock_bh(&mp->outlock);
43803
43804- datahandle = atomic_inc_return(&mp->datahandle);
43805+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43806 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43807 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43808 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43809 capimsg_setu16(skb->data, 2, mp->ap->applid);
43810 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43811 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43812- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43813+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43814 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43815 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43816 capimsg_setu16(skb->data, 16, len); /* Data length */
43817diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43818index aecec6d..11e13c5 100644
43819--- a/drivers/isdn/gigaset/bas-gigaset.c
43820+++ b/drivers/isdn/gigaset/bas-gigaset.c
43821@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43822
43823
43824 static const struct gigaset_ops gigops = {
43825- gigaset_write_cmd,
43826- gigaset_write_room,
43827- gigaset_chars_in_buffer,
43828- gigaset_brkchars,
43829- gigaset_init_bchannel,
43830- gigaset_close_bchannel,
43831- gigaset_initbcshw,
43832- gigaset_freebcshw,
43833- gigaset_reinitbcshw,
43834- gigaset_initcshw,
43835- gigaset_freecshw,
43836- gigaset_set_modem_ctrl,
43837- gigaset_baud_rate,
43838- gigaset_set_line_ctrl,
43839- gigaset_isoc_send_skb,
43840- gigaset_isoc_input,
43841+ .write_cmd = gigaset_write_cmd,
43842+ .write_room = gigaset_write_room,
43843+ .chars_in_buffer = gigaset_chars_in_buffer,
43844+ .brkchars = gigaset_brkchars,
43845+ .init_bchannel = gigaset_init_bchannel,
43846+ .close_bchannel = gigaset_close_bchannel,
43847+ .initbcshw = gigaset_initbcshw,
43848+ .freebcshw = gigaset_freebcshw,
43849+ .reinitbcshw = gigaset_reinitbcshw,
43850+ .initcshw = gigaset_initcshw,
43851+ .freecshw = gigaset_freecshw,
43852+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43853+ .baud_rate = gigaset_baud_rate,
43854+ .set_line_ctrl = gigaset_set_line_ctrl,
43855+ .send_skb = gigaset_isoc_send_skb,
43856+ .handle_input = gigaset_isoc_input,
43857 };
43858
43859 /* bas_gigaset_init
43860diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43861index 600c79b..3752bab 100644
43862--- a/drivers/isdn/gigaset/interface.c
43863+++ b/drivers/isdn/gigaset/interface.c
43864@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43865 }
43866 tty->driver_data = cs;
43867
43868- ++cs->port.count;
43869+ atomic_inc(&cs->port.count);
43870
43871- if (cs->port.count == 1) {
43872+ if (atomic_read(&cs->port.count) == 1) {
43873 tty_port_tty_set(&cs->port, tty);
43874 cs->port.low_latency = 1;
43875 }
43876@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43877
43878 if (!cs->connected)
43879 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43880- else if (!cs->port.count)
43881+ else if (!atomic_read(&cs->port.count))
43882 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43883- else if (!--cs->port.count)
43884+ else if (!atomic_dec_return(&cs->port.count))
43885 tty_port_tty_set(&cs->port, NULL);
43886
43887 mutex_unlock(&cs->mutex);
43888diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43889index 8c91fd5..14f13ce 100644
43890--- a/drivers/isdn/gigaset/ser-gigaset.c
43891+++ b/drivers/isdn/gigaset/ser-gigaset.c
43892@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43893 }
43894
43895 static const struct gigaset_ops ops = {
43896- gigaset_write_cmd,
43897- gigaset_write_room,
43898- gigaset_chars_in_buffer,
43899- gigaset_brkchars,
43900- gigaset_init_bchannel,
43901- gigaset_close_bchannel,
43902- gigaset_initbcshw,
43903- gigaset_freebcshw,
43904- gigaset_reinitbcshw,
43905- gigaset_initcshw,
43906- gigaset_freecshw,
43907- gigaset_set_modem_ctrl,
43908- gigaset_baud_rate,
43909- gigaset_set_line_ctrl,
43910- gigaset_m10x_send_skb, /* asyncdata.c */
43911- gigaset_m10x_input, /* asyncdata.c */
43912+ .write_cmd = gigaset_write_cmd,
43913+ .write_room = gigaset_write_room,
43914+ .chars_in_buffer = gigaset_chars_in_buffer,
43915+ .brkchars = gigaset_brkchars,
43916+ .init_bchannel = gigaset_init_bchannel,
43917+ .close_bchannel = gigaset_close_bchannel,
43918+ .initbcshw = gigaset_initbcshw,
43919+ .freebcshw = gigaset_freebcshw,
43920+ .reinitbcshw = gigaset_reinitbcshw,
43921+ .initcshw = gigaset_initcshw,
43922+ .freecshw = gigaset_freecshw,
43923+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43924+ .baud_rate = gigaset_baud_rate,
43925+ .set_line_ctrl = gigaset_set_line_ctrl,
43926+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43927+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43928 };
43929
43930
43931diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43932index 5f306e2..5342f88 100644
43933--- a/drivers/isdn/gigaset/usb-gigaset.c
43934+++ b/drivers/isdn/gigaset/usb-gigaset.c
43935@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43936 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43937 memcpy(cs->hw.usb->bchars, buf, 6);
43938 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43939- 0, 0, &buf, 6, 2000);
43940+ 0, 0, buf, 6, 2000);
43941 }
43942
43943 static void gigaset_freebcshw(struct bc_state *bcs)
43944@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43945 }
43946
43947 static const struct gigaset_ops ops = {
43948- gigaset_write_cmd,
43949- gigaset_write_room,
43950- gigaset_chars_in_buffer,
43951- gigaset_brkchars,
43952- gigaset_init_bchannel,
43953- gigaset_close_bchannel,
43954- gigaset_initbcshw,
43955- gigaset_freebcshw,
43956- gigaset_reinitbcshw,
43957- gigaset_initcshw,
43958- gigaset_freecshw,
43959- gigaset_set_modem_ctrl,
43960- gigaset_baud_rate,
43961- gigaset_set_line_ctrl,
43962- gigaset_m10x_send_skb,
43963- gigaset_m10x_input,
43964+ .write_cmd = gigaset_write_cmd,
43965+ .write_room = gigaset_write_room,
43966+ .chars_in_buffer = gigaset_chars_in_buffer,
43967+ .brkchars = gigaset_brkchars,
43968+ .init_bchannel = gigaset_init_bchannel,
43969+ .close_bchannel = gigaset_close_bchannel,
43970+ .initbcshw = gigaset_initbcshw,
43971+ .freebcshw = gigaset_freebcshw,
43972+ .reinitbcshw = gigaset_reinitbcshw,
43973+ .initcshw = gigaset_initcshw,
43974+ .freecshw = gigaset_freecshw,
43975+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43976+ .baud_rate = gigaset_baud_rate,
43977+ .set_line_ctrl = gigaset_set_line_ctrl,
43978+ .send_skb = gigaset_m10x_send_skb,
43979+ .handle_input = gigaset_m10x_input,
43980 };
43981
43982 /*
43983diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43984index 4d9b195..455075c 100644
43985--- a/drivers/isdn/hardware/avm/b1.c
43986+++ b/drivers/isdn/hardware/avm/b1.c
43987@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43988 }
43989 if (left) {
43990 if (t4file->user) {
43991- if (copy_from_user(buf, dp, left))
43992+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43993 return -EFAULT;
43994 } else {
43995 memcpy(buf, dp, left);
43996@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43997 }
43998 if (left) {
43999 if (config->user) {
44000- if (copy_from_user(buf, dp, left))
44001+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44002 return -EFAULT;
44003 } else {
44004 memcpy(buf, dp, left);
44005diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44006index 9b856e1..fa03c92 100644
44007--- a/drivers/isdn/i4l/isdn_common.c
44008+++ b/drivers/isdn/i4l/isdn_common.c
44009@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44010 } else
44011 return -EINVAL;
44012 case IIOCDBGVAR:
44013+ if (!capable(CAP_SYS_RAWIO))
44014+ return -EPERM;
44015 if (arg) {
44016 if (copy_to_user(argp, &dev, sizeof(ulong)))
44017 return -EFAULT;
44018diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44019index 91d5730..336523e 100644
44020--- a/drivers/isdn/i4l/isdn_concap.c
44021+++ b/drivers/isdn/i4l/isdn_concap.c
44022@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44023 }
44024
44025 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44026- &isdn_concap_dl_data_req,
44027- &isdn_concap_dl_connect_req,
44028- &isdn_concap_dl_disconn_req
44029+ .data_req = &isdn_concap_dl_data_req,
44030+ .connect_req = &isdn_concap_dl_connect_req,
44031+ .disconn_req = &isdn_concap_dl_disconn_req
44032 };
44033
44034 /* The following should better go into a dedicated source file such that
44035diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44036index bc91261..2ef7e36 100644
44037--- a/drivers/isdn/i4l/isdn_tty.c
44038+++ b/drivers/isdn/i4l/isdn_tty.c
44039@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44040
44041 #ifdef ISDN_DEBUG_MODEM_OPEN
44042 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44043- port->count);
44044+ atomic_read(&port->count));
44045 #endif
44046- port->count++;
44047+ atomic_inc(&port->count);
44048 port->tty = tty;
44049 /*
44050 * Start up serial port
44051@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44052 #endif
44053 return;
44054 }
44055- if ((tty->count == 1) && (port->count != 1)) {
44056+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44057 /*
44058 * Uh, oh. tty->count is 1, which means that the tty
44059 * structure will be freed. Info->count should always
44060@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44061 * serial port won't be shutdown.
44062 */
44063 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44064- "info->count is %d\n", port->count);
44065- port->count = 1;
44066+ "info->count is %d\n", atomic_read(&port->count));
44067+ atomic_set(&port->count, 1);
44068 }
44069- if (--port->count < 0) {
44070+ if (atomic_dec_return(&port->count) < 0) {
44071 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44072- info->line, port->count);
44073- port->count = 0;
44074+ info->line, atomic_read(&port->count));
44075+ atomic_set(&port->count, 0);
44076 }
44077- if (port->count) {
44078+ if (atomic_read(&port->count)) {
44079 #ifdef ISDN_DEBUG_MODEM_OPEN
44080 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44081 #endif
44082@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44083 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44084 return;
44085 isdn_tty_shutdown(info);
44086- port->count = 0;
44087+ atomic_set(&port->count, 0);
44088 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44089 port->tty = NULL;
44090 wake_up_interruptible(&port->open_wait);
44091@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44092 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44093 modem_info *info = &dev->mdm.info[i];
44094
44095- if (info->port.count == 0)
44096+ if (atomic_read(&info->port.count) == 0)
44097 continue;
44098 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44099 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44100diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44101index e2d4e58..40cd045 100644
44102--- a/drivers/isdn/i4l/isdn_x25iface.c
44103+++ b/drivers/isdn/i4l/isdn_x25iface.c
44104@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44105
44106
44107 static struct concap_proto_ops ix25_pops = {
44108- &isdn_x25iface_proto_new,
44109- &isdn_x25iface_proto_del,
44110- &isdn_x25iface_proto_restart,
44111- &isdn_x25iface_proto_close,
44112- &isdn_x25iface_xmit,
44113- &isdn_x25iface_receive,
44114- &isdn_x25iface_connect_ind,
44115- &isdn_x25iface_disconn_ind
44116+ .proto_new = &isdn_x25iface_proto_new,
44117+ .proto_del = &isdn_x25iface_proto_del,
44118+ .restart = &isdn_x25iface_proto_restart,
44119+ .close = &isdn_x25iface_proto_close,
44120+ .encap_and_xmit = &isdn_x25iface_xmit,
44121+ .data_ind = &isdn_x25iface_receive,
44122+ .connect_ind = &isdn_x25iface_connect_ind,
44123+ .disconn_ind = &isdn_x25iface_disconn_ind
44124 };
44125
44126 /* error message helper function */
44127diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44128index 358a574..b4987ea 100644
44129--- a/drivers/isdn/icn/icn.c
44130+++ b/drivers/isdn/icn/icn.c
44131@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44132 if (count > len)
44133 count = len;
44134 if (user) {
44135- if (copy_from_user(msg, buf, count))
44136+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44137 return -EFAULT;
44138 } else
44139 memcpy(msg, buf, count);
44140diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44141index 87f7dff..7300125 100644
44142--- a/drivers/isdn/mISDN/dsp_cmx.c
44143+++ b/drivers/isdn/mISDN/dsp_cmx.c
44144@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44145 static u16 dsp_count; /* last sample count */
44146 static int dsp_count_valid; /* if we have last sample count */
44147
44148-void
44149+void __intentional_overflow(-1)
44150 dsp_cmx_send(void *arg)
44151 {
44152 struct dsp_conf *conf;
44153diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44154index 0f9ed1e..2715d6f 100644
44155--- a/drivers/leds/leds-clevo-mail.c
44156+++ b/drivers/leds/leds-clevo-mail.c
44157@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44158 * detected as working, but in reality it is not) as low as
44159 * possible.
44160 */
44161-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44162+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44163 {
44164 .callback = clevo_mail_led_dmi_callback,
44165 .ident = "Clevo D410J",
44166diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44167index 046cb70..6b20d39 100644
44168--- a/drivers/leds/leds-ss4200.c
44169+++ b/drivers/leds/leds-ss4200.c
44170@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44171 * detected as working, but in reality it is not) as low as
44172 * possible.
44173 */
44174-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44175+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44176 {
44177 .callback = ss4200_led_dmi_callback,
44178 .ident = "Intel SS4200-E",
44179diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44180index 7dc93aa..8272379 100644
44181--- a/drivers/lguest/core.c
44182+++ b/drivers/lguest/core.c
44183@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44184 * The end address needs +1 because __get_vm_area allocates an
44185 * extra guard page, so we need space for that.
44186 */
44187+
44188+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44189+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44190+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44191+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44192+#else
44193 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44194 VM_ALLOC, switcher_addr, switcher_addr
44195 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44196+#endif
44197+
44198 if (!switcher_vma) {
44199 err = -ENOMEM;
44200 printk("lguest: could not map switcher pages high\n");
44201@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44202 * Now the Switcher is mapped at the right address, we can't fail!
44203 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44204 */
44205- memcpy(switcher_vma->addr, start_switcher_text,
44206+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44207 end_switcher_text - start_switcher_text);
44208
44209 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44210diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44211index e3abebc9..6a35328 100644
44212--- a/drivers/lguest/page_tables.c
44213+++ b/drivers/lguest/page_tables.c
44214@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44215 /*:*/
44216
44217 #ifdef CONFIG_X86_PAE
44218-static void release_pmd(pmd_t *spmd)
44219+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44220 {
44221 /* If the entry's not present, there's nothing to release. */
44222 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44223diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44224index 30f2aef..391c748 100644
44225--- a/drivers/lguest/x86/core.c
44226+++ b/drivers/lguest/x86/core.c
44227@@ -60,7 +60,7 @@ static struct {
44228 /* Offset from where switcher.S was compiled to where we've copied it */
44229 static unsigned long switcher_offset(void)
44230 {
44231- return switcher_addr - (unsigned long)start_switcher_text;
44232+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44233 }
44234
44235 /* This cpu's struct lguest_pages (after the Switcher text page) */
44236@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44237 * These copies are pretty cheap, so we do them unconditionally: */
44238 /* Save the current Host top-level page directory.
44239 */
44240+
44241+#ifdef CONFIG_PAX_PER_CPU_PGD
44242+ pages->state.host_cr3 = read_cr3();
44243+#else
44244 pages->state.host_cr3 = __pa(current->mm->pgd);
44245+#endif
44246+
44247 /*
44248 * Set up the Guest's page tables to see this CPU's pages (and no
44249 * other CPU's pages).
44250@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
44251 * compiled-in switcher code and the high-mapped copy we just made.
44252 */
44253 for (i = 0; i < IDT_ENTRIES; i++)
44254- default_idt_entries[i] += switcher_offset();
44255+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44256
44257 /*
44258 * Set up the Switcher's per-cpu areas.
44259@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
44260 * it will be undisturbed when we switch. To change %cs and jump we
44261 * need this structure to feed to Intel's "lcall" instruction.
44262 */
44263- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44264+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44265 lguest_entry.segment = LGUEST_CS;
44266
44267 /*
44268diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44269index 40634b0..4f5855e 100644
44270--- a/drivers/lguest/x86/switcher_32.S
44271+++ b/drivers/lguest/x86/switcher_32.S
44272@@ -87,6 +87,7 @@
44273 #include <asm/page.h>
44274 #include <asm/segment.h>
44275 #include <asm/lguest.h>
44276+#include <asm/processor-flags.h>
44277
44278 // We mark the start of the code to copy
44279 // It's placed in .text tho it's never run here
44280@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44281 // Changes type when we load it: damn Intel!
44282 // For after we switch over our page tables
44283 // That entry will be read-only: we'd crash.
44284+
44285+#ifdef CONFIG_PAX_KERNEXEC
44286+ mov %cr0, %edx
44287+ xor $X86_CR0_WP, %edx
44288+ mov %edx, %cr0
44289+#endif
44290+
44291 movl $(GDT_ENTRY_TSS*8), %edx
44292 ltr %dx
44293
44294@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44295 // Let's clear it again for our return.
44296 // The GDT descriptor of the Host
44297 // Points to the table after two "size" bytes
44298- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44299+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44300 // Clear "used" from type field (byte 5, bit 2)
44301- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44302+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44303+
44304+#ifdef CONFIG_PAX_KERNEXEC
44305+ mov %cr0, %eax
44306+ xor $X86_CR0_WP, %eax
44307+ mov %eax, %cr0
44308+#endif
44309
44310 // Once our page table's switched, the Guest is live!
44311 // The Host fades as we run this final step.
44312@@ -295,13 +309,12 @@ deliver_to_host:
44313 // I consulted gcc, and it gave
44314 // These instructions, which I gladly credit:
44315 leal (%edx,%ebx,8), %eax
44316- movzwl (%eax),%edx
44317- movl 4(%eax), %eax
44318- xorw %ax, %ax
44319- orl %eax, %edx
44320+ movl 4(%eax), %edx
44321+ movw (%eax), %dx
44322 // Now the address of the handler's in %edx
44323 // We call it now: its "iret" drops us home.
44324- jmp *%edx
44325+ ljmp $__KERNEL_CS, $1f
44326+1: jmp *%edx
44327
44328 // Every interrupt can come to us here
44329 // But we must truly tell each apart.
44330diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44331index a08e3ee..df8ade2 100644
44332--- a/drivers/md/bcache/closure.h
44333+++ b/drivers/md/bcache/closure.h
44334@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44335 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44336 struct workqueue_struct *wq)
44337 {
44338- BUG_ON(object_is_on_stack(cl));
44339+ BUG_ON(object_starts_on_stack(cl));
44340 closure_set_ip(cl);
44341 cl->fn = fn;
44342 cl->wq = wq;
44343diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44344index 3a57679..c58cdaf 100644
44345--- a/drivers/md/bitmap.c
44346+++ b/drivers/md/bitmap.c
44347@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44348 chunk_kb ? "KB" : "B");
44349 if (bitmap->storage.file) {
44350 seq_printf(seq, ", file: ");
44351- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44352+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44353 }
44354
44355 seq_printf(seq, "\n");
44356diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44357index c8a18e4..0ab43e5 100644
44358--- a/drivers/md/dm-ioctl.c
44359+++ b/drivers/md/dm-ioctl.c
44360@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44361 cmd == DM_LIST_VERSIONS_CMD)
44362 return 0;
44363
44364- if ((cmd == DM_DEV_CREATE_CMD)) {
44365+ if (cmd == DM_DEV_CREATE_CMD) {
44366 if (!*param->name) {
44367 DMWARN("name not supplied when creating device");
44368 return -EINVAL;
44369diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44370index 089d627..ef7352e 100644
44371--- a/drivers/md/dm-raid1.c
44372+++ b/drivers/md/dm-raid1.c
44373@@ -40,7 +40,7 @@ enum dm_raid1_error {
44374
44375 struct mirror {
44376 struct mirror_set *ms;
44377- atomic_t error_count;
44378+ atomic_unchecked_t error_count;
44379 unsigned long error_type;
44380 struct dm_dev *dev;
44381 sector_t offset;
44382@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44383 struct mirror *m;
44384
44385 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44386- if (!atomic_read(&m->error_count))
44387+ if (!atomic_read_unchecked(&m->error_count))
44388 return m;
44389
44390 return NULL;
44391@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44392 * simple way to tell if a device has encountered
44393 * errors.
44394 */
44395- atomic_inc(&m->error_count);
44396+ atomic_inc_unchecked(&m->error_count);
44397
44398 if (test_and_set_bit(error_type, &m->error_type))
44399 return;
44400@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44401 struct mirror *m = get_default_mirror(ms);
44402
44403 do {
44404- if (likely(!atomic_read(&m->error_count)))
44405+ if (likely(!atomic_read_unchecked(&m->error_count)))
44406 return m;
44407
44408 if (m-- == ms->mirror)
44409@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44410 {
44411 struct mirror *default_mirror = get_default_mirror(m->ms);
44412
44413- return !atomic_read(&default_mirror->error_count);
44414+ return !atomic_read_unchecked(&default_mirror->error_count);
44415 }
44416
44417 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44418@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44419 */
44420 if (likely(region_in_sync(ms, region, 1)))
44421 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44422- else if (m && atomic_read(&m->error_count))
44423+ else if (m && atomic_read_unchecked(&m->error_count))
44424 m = NULL;
44425
44426 if (likely(m))
44427@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44428 }
44429
44430 ms->mirror[mirror].ms = ms;
44431- atomic_set(&(ms->mirror[mirror].error_count), 0);
44432+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44433 ms->mirror[mirror].error_type = 0;
44434 ms->mirror[mirror].offset = offset;
44435
44436@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44437 */
44438 static char device_status_char(struct mirror *m)
44439 {
44440- if (!atomic_read(&(m->error_count)))
44441+ if (!atomic_read_unchecked(&(m->error_count)))
44442 return 'A';
44443
44444 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44445diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44446index f478a4c..4b8e5ef 100644
44447--- a/drivers/md/dm-stats.c
44448+++ b/drivers/md/dm-stats.c
44449@@ -382,7 +382,7 @@ do_sync_free:
44450 synchronize_rcu_expedited();
44451 dm_stat_free(&s->rcu_head);
44452 } else {
44453- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44454+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44455 call_rcu(&s->rcu_head, dm_stat_free);
44456 }
44457 return 0;
44458@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44459 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44460 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44461 ));
44462- ACCESS_ONCE(last->last_sector) = end_sector;
44463- ACCESS_ONCE(last->last_rw) = bi_rw;
44464+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44465+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44466 }
44467
44468 rcu_read_lock();
44469diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44470index f8b37d4..5c5cafd 100644
44471--- a/drivers/md/dm-stripe.c
44472+++ b/drivers/md/dm-stripe.c
44473@@ -21,7 +21,7 @@ struct stripe {
44474 struct dm_dev *dev;
44475 sector_t physical_start;
44476
44477- atomic_t error_count;
44478+ atomic_unchecked_t error_count;
44479 };
44480
44481 struct stripe_c {
44482@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44483 kfree(sc);
44484 return r;
44485 }
44486- atomic_set(&(sc->stripe[i].error_count), 0);
44487+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44488 }
44489
44490 ti->private = sc;
44491@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44492 DMEMIT("%d ", sc->stripes);
44493 for (i = 0; i < sc->stripes; i++) {
44494 DMEMIT("%s ", sc->stripe[i].dev->name);
44495- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44496+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44497 'D' : 'A';
44498 }
44499 buffer[i] = '\0';
44500@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44501 */
44502 for (i = 0; i < sc->stripes; i++)
44503 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44504- atomic_inc(&(sc->stripe[i].error_count));
44505- if (atomic_read(&(sc->stripe[i].error_count)) <
44506+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44507+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44508 DM_IO_ERROR_THRESHOLD)
44509 schedule_work(&sc->trigger_event);
44510 }
44511diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44512index 6554d91..b0221c2 100644
44513--- a/drivers/md/dm-table.c
44514+++ b/drivers/md/dm-table.c
44515@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44516 if (!dev_size)
44517 return 0;
44518
44519- if ((start >= dev_size) || (start + len > dev_size)) {
44520+ if ((start >= dev_size) || (len > dev_size - start)) {
44521 DMWARN("%s: %s too small for target: "
44522 "start=%llu, len=%llu, dev_size=%llu",
44523 dm_device_name(ti->table->md), bdevname(bdev, b),
44524diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44525index 79f6941..b33b4e0 100644
44526--- a/drivers/md/dm-thin-metadata.c
44527+++ b/drivers/md/dm-thin-metadata.c
44528@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44529 {
44530 pmd->info.tm = pmd->tm;
44531 pmd->info.levels = 2;
44532- pmd->info.value_type.context = pmd->data_sm;
44533+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44534 pmd->info.value_type.size = sizeof(__le64);
44535 pmd->info.value_type.inc = data_block_inc;
44536 pmd->info.value_type.dec = data_block_dec;
44537@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44538
44539 pmd->bl_info.tm = pmd->tm;
44540 pmd->bl_info.levels = 1;
44541- pmd->bl_info.value_type.context = pmd->data_sm;
44542+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44543 pmd->bl_info.value_type.size = sizeof(__le64);
44544 pmd->bl_info.value_type.inc = data_block_inc;
44545 pmd->bl_info.value_type.dec = data_block_dec;
44546diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44547index 8001fe9..abdd0d0 100644
44548--- a/drivers/md/dm.c
44549+++ b/drivers/md/dm.c
44550@@ -188,9 +188,9 @@ struct mapped_device {
44551 /*
44552 * Event handling.
44553 */
44554- atomic_t event_nr;
44555+ atomic_unchecked_t event_nr;
44556 wait_queue_head_t eventq;
44557- atomic_t uevent_seq;
44558+ atomic_unchecked_t uevent_seq;
44559 struct list_head uevent_list;
44560 spinlock_t uevent_lock; /* Protect access to uevent_list */
44561
44562@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor)
44563 spin_lock_init(&md->deferred_lock);
44564 atomic_set(&md->holders, 1);
44565 atomic_set(&md->open_count, 0);
44566- atomic_set(&md->event_nr, 0);
44567- atomic_set(&md->uevent_seq, 0);
44568+ atomic_set_unchecked(&md->event_nr, 0);
44569+ atomic_set_unchecked(&md->uevent_seq, 0);
44570 INIT_LIST_HEAD(&md->uevent_list);
44571 INIT_LIST_HEAD(&md->table_devices);
44572 spin_lock_init(&md->uevent_lock);
44573@@ -2329,7 +2329,7 @@ static void event_callback(void *context)
44574
44575 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44576
44577- atomic_inc(&md->event_nr);
44578+ atomic_inc_unchecked(&md->event_nr);
44579 wake_up(&md->eventq);
44580 }
44581
44582@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44583
44584 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44585 {
44586- return atomic_add_return(1, &md->uevent_seq);
44587+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44588 }
44589
44590 uint32_t dm_get_event_nr(struct mapped_device *md)
44591 {
44592- return atomic_read(&md->event_nr);
44593+ return atomic_read_unchecked(&md->event_nr);
44594 }
44595
44596 int dm_wait_event(struct mapped_device *md, int event_nr)
44597 {
44598 return wait_event_interruptible(md->eventq,
44599- (event_nr != atomic_read(&md->event_nr)));
44600+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44601 }
44602
44603 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44604diff --git a/drivers/md/md.c b/drivers/md/md.c
44605index 717daad..6dd103f 100644
44606--- a/drivers/md/md.c
44607+++ b/drivers/md/md.c
44608@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44609 * start build, activate spare
44610 */
44611 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44612-static atomic_t md_event_count;
44613+static atomic_unchecked_t md_event_count;
44614 void md_new_event(struct mddev *mddev)
44615 {
44616- atomic_inc(&md_event_count);
44617+ atomic_inc_unchecked(&md_event_count);
44618 wake_up(&md_event_waiters);
44619 }
44620 EXPORT_SYMBOL_GPL(md_new_event);
44621@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44622 */
44623 static void md_new_event_inintr(struct mddev *mddev)
44624 {
44625- atomic_inc(&md_event_count);
44626+ atomic_inc_unchecked(&md_event_count);
44627 wake_up(&md_event_waiters);
44628 }
44629
44630@@ -1438,7 +1438,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44631 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44632 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44633 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44634- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44635+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44636
44637 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44638 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44639@@ -1689,7 +1689,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44640 else
44641 sb->resync_offset = cpu_to_le64(0);
44642
44643- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44644+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44645
44646 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44647 sb->size = cpu_to_le64(mddev->dev_sectors);
44648@@ -2560,7 +2560,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
44649 static ssize_t
44650 errors_show(struct md_rdev *rdev, char *page)
44651 {
44652- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44653+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44654 }
44655
44656 static ssize_t
44657@@ -2569,7 +2569,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44658 char *e;
44659 unsigned long n = simple_strtoul(buf, &e, 10);
44660 if (*buf && (*e == 0 || *e == '\n')) {
44661- atomic_set(&rdev->corrected_errors, n);
44662+ atomic_set_unchecked(&rdev->corrected_errors, n);
44663 return len;
44664 }
44665 return -EINVAL;
44666@@ -3005,8 +3005,8 @@ int md_rdev_init(struct md_rdev *rdev)
44667 rdev->sb_loaded = 0;
44668 rdev->bb_page = NULL;
44669 atomic_set(&rdev->nr_pending, 0);
44670- atomic_set(&rdev->read_errors, 0);
44671- atomic_set(&rdev->corrected_errors, 0);
44672+ atomic_set_unchecked(&rdev->read_errors, 0);
44673+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44674
44675 INIT_LIST_HEAD(&rdev->same_set);
44676 init_waitqueue_head(&rdev->blocked_wait);
44677@@ -7079,7 +7079,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44678
44679 spin_unlock(&pers_lock);
44680 seq_printf(seq, "\n");
44681- seq->poll_event = atomic_read(&md_event_count);
44682+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44683 return 0;
44684 }
44685 if (v == (void*)2) {
44686@@ -7182,7 +7182,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44687 return error;
44688
44689 seq = file->private_data;
44690- seq->poll_event = atomic_read(&md_event_count);
44691+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44692 return error;
44693 }
44694
44695@@ -7199,7 +7199,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44696 /* always allow read */
44697 mask = POLLIN | POLLRDNORM;
44698
44699- if (seq->poll_event != atomic_read(&md_event_count))
44700+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44701 mask |= POLLERR | POLLPRI;
44702 return mask;
44703 }
44704@@ -7246,7 +7246,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44705 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44706 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44707 (int)part_stat_read(&disk->part0, sectors[1]) -
44708- atomic_read(&disk->sync_io);
44709+ atomic_read_unchecked(&disk->sync_io);
44710 /* sync IO will cause sync_io to increase before the disk_stats
44711 * as sync_io is counted when a request starts, and
44712 * disk_stats is counted when it completes.
44713diff --git a/drivers/md/md.h b/drivers/md/md.h
44714index 318ca8f..31e4478 100644
44715--- a/drivers/md/md.h
44716+++ b/drivers/md/md.h
44717@@ -94,13 +94,13 @@ struct md_rdev {
44718 * only maintained for arrays that
44719 * support hot removal
44720 */
44721- atomic_t read_errors; /* number of consecutive read errors that
44722+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44723 * we have tried to ignore.
44724 */
44725 struct timespec last_read_error; /* monotonic time since our
44726 * last read error
44727 */
44728- atomic_t corrected_errors; /* number of corrected read errors,
44729+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44730 * for reporting to userspace and storing
44731 * in superblock.
44732 */
44733@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
44734
44735 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44736 {
44737- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44738+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44739 }
44740
44741 struct md_personality
44742diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44743index e8a9042..35bd145 100644
44744--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44745+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44746@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44747 * Flick into a mode where all blocks get allocated in the new area.
44748 */
44749 smm->begin = old_len;
44750- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44751+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44752
44753 /*
44754 * Extend.
44755@@ -714,7 +714,7 @@ out:
44756 /*
44757 * Switch back to normal behaviour.
44758 */
44759- memcpy(sm, &ops, sizeof(*sm));
44760+ memcpy((void *)sm, &ops, sizeof(*sm));
44761 return r;
44762 }
44763
44764diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44765index 3e6d115..ffecdeb 100644
44766--- a/drivers/md/persistent-data/dm-space-map.h
44767+++ b/drivers/md/persistent-data/dm-space-map.h
44768@@ -71,6 +71,7 @@ struct dm_space_map {
44769 dm_sm_threshold_fn fn,
44770 void *context);
44771 };
44772+typedef struct dm_space_map __no_const dm_space_map_no_const;
44773
44774 /*----------------------------------------------------------------*/
44775
44776diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44777index d34e238..34f8d98 100644
44778--- a/drivers/md/raid1.c
44779+++ b/drivers/md/raid1.c
44780@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44781 if (r1_sync_page_io(rdev, sect, s,
44782 bio->bi_io_vec[idx].bv_page,
44783 READ) != 0)
44784- atomic_add(s, &rdev->corrected_errors);
44785+ atomic_add_unchecked(s, &rdev->corrected_errors);
44786 }
44787 sectors -= s;
44788 sect += s;
44789@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44790 !test_bit(Faulty, &rdev->flags)) {
44791 if (r1_sync_page_io(rdev, sect, s,
44792 conf->tmppage, READ)) {
44793- atomic_add(s, &rdev->corrected_errors);
44794+ atomic_add_unchecked(s, &rdev->corrected_errors);
44795 printk(KERN_INFO
44796 "md/raid1:%s: read error corrected "
44797 "(%d sectors at %llu on %s)\n",
44798diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44799index a7196c4..439f012 100644
44800--- a/drivers/md/raid10.c
44801+++ b/drivers/md/raid10.c
44802@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
44803 /* The write handler will notice the lack of
44804 * R10BIO_Uptodate and record any errors etc
44805 */
44806- atomic_add(r10_bio->sectors,
44807+ atomic_add_unchecked(r10_bio->sectors,
44808 &conf->mirrors[d].rdev->corrected_errors);
44809
44810 /* for reconstruct, we always reschedule after a read.
44811@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44812 {
44813 struct timespec cur_time_mon;
44814 unsigned long hours_since_last;
44815- unsigned int read_errors = atomic_read(&rdev->read_errors);
44816+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44817
44818 ktime_get_ts(&cur_time_mon);
44819
44820@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44821 * overflowing the shift of read_errors by hours_since_last.
44822 */
44823 if (hours_since_last >= 8 * sizeof(read_errors))
44824- atomic_set(&rdev->read_errors, 0);
44825+ atomic_set_unchecked(&rdev->read_errors, 0);
44826 else
44827- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44828+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44829 }
44830
44831 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44832@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44833 return;
44834
44835 check_decay_read_errors(mddev, rdev);
44836- atomic_inc(&rdev->read_errors);
44837- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44838+ atomic_inc_unchecked(&rdev->read_errors);
44839+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44840 char b[BDEVNAME_SIZE];
44841 bdevname(rdev->bdev, b);
44842
44843@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44844 "md/raid10:%s: %s: Raid device exceeded "
44845 "read_error threshold [cur %d:max %d]\n",
44846 mdname(mddev), b,
44847- atomic_read(&rdev->read_errors), max_read_errors);
44848+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44849 printk(KERN_NOTICE
44850 "md/raid10:%s: %s: Failing raid device\n",
44851 mdname(mddev), b);
44852@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44853 sect +
44854 choose_data_offset(r10_bio, rdev)),
44855 bdevname(rdev->bdev, b));
44856- atomic_add(s, &rdev->corrected_errors);
44857+ atomic_add_unchecked(s, &rdev->corrected_errors);
44858 }
44859
44860 rdev_dec_pending(rdev, mddev);
44861diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44862index cd2f96b..3876e63 100644
44863--- a/drivers/md/raid5.c
44864+++ b/drivers/md/raid5.c
44865@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
44866 struct bio_vec bvl;
44867 struct bvec_iter iter;
44868 struct page *bio_page;
44869- int page_offset;
44870+ s64 page_offset;
44871 struct async_submit_ctl submit;
44872 enum async_tx_flags flags = 0;
44873
44874 if (bio->bi_iter.bi_sector >= sector)
44875- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
44876+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
44877 else
44878- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
44879+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
44880
44881 if (frombio)
44882 flags |= ASYNC_TX_FENCE;
44883 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
44884
44885 bio_for_each_segment(bvl, bio, iter) {
44886- int len = bvl.bv_len;
44887- int clen;
44888- int b_offset = 0;
44889+ s64 len = bvl.bv_len;
44890+ s64 clen;
44891+ s64 b_offset = 0;
44892
44893 if (page_offset < 0) {
44894 b_offset = -page_offset;
44895@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44896 return 1;
44897 }
44898
44899+#ifdef CONFIG_GRKERNSEC_HIDESYM
44900+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44901+#endif
44902+
44903 static int grow_stripes(struct r5conf *conf, int num)
44904 {
44905 struct kmem_cache *sc;
44906@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44907 "raid%d-%s", conf->level, mdname(conf->mddev));
44908 else
44909 sprintf(conf->cache_name[0],
44910+#ifdef CONFIG_GRKERNSEC_HIDESYM
44911+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44912+#else
44913 "raid%d-%p", conf->level, conf->mddev);
44914+#endif
44915 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44916
44917 conf->active_name = 0;
44918@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44919 mdname(conf->mddev), STRIPE_SECTORS,
44920 (unsigned long long)s,
44921 bdevname(rdev->bdev, b));
44922- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44923+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44924 clear_bit(R5_ReadError, &sh->dev[i].flags);
44925 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44926 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44927 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44928
44929- if (atomic_read(&rdev->read_errors))
44930- atomic_set(&rdev->read_errors, 0);
44931+ if (atomic_read_unchecked(&rdev->read_errors))
44932+ atomic_set_unchecked(&rdev->read_errors, 0);
44933 } else {
44934 const char *bdn = bdevname(rdev->bdev, b);
44935 int retry = 0;
44936 int set_bad = 0;
44937
44938 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44939- atomic_inc(&rdev->read_errors);
44940+ atomic_inc_unchecked(&rdev->read_errors);
44941 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44942 printk_ratelimited(
44943 KERN_WARNING
44944@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44945 mdname(conf->mddev),
44946 (unsigned long long)s,
44947 bdn);
44948- } else if (atomic_read(&rdev->read_errors)
44949+ } else if (atomic_read_unchecked(&rdev->read_errors)
44950 > conf->max_nr_stripes)
44951 printk(KERN_WARNING
44952 "md/raid:%s: Too many read errors, failing device %s.\n",
44953diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44954index 983db75..ef9248c 100644
44955--- a/drivers/media/dvb-core/dvbdev.c
44956+++ b/drivers/media/dvb-core/dvbdev.c
44957@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44958 const struct dvb_device *template, void *priv, int type)
44959 {
44960 struct dvb_device *dvbdev;
44961- struct file_operations *dvbdevfops;
44962+ file_operations_no_const *dvbdevfops;
44963 struct device *clsdev;
44964 int minor;
44965 int id;
44966diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44967index 6ad22b6..6e90e2a 100644
44968--- a/drivers/media/dvb-frontends/af9033.h
44969+++ b/drivers/media/dvb-frontends/af9033.h
44970@@ -96,6 +96,6 @@ struct af9033_ops {
44971 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44972 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44973 int onoff);
44974-};
44975+} __no_const;
44976
44977 #endif /* AF9033_H */
44978diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44979index 9b6c3bb..baeb5c7 100644
44980--- a/drivers/media/dvb-frontends/dib3000.h
44981+++ b/drivers/media/dvb-frontends/dib3000.h
44982@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44983 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44984 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44985 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44986-};
44987+} __no_const;
44988
44989 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44990 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44991diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44992index 1fea0e9..321ce8f 100644
44993--- a/drivers/media/dvb-frontends/dib7000p.h
44994+++ b/drivers/media/dvb-frontends/dib7000p.h
44995@@ -64,7 +64,7 @@ struct dib7000p_ops {
44996 int (*get_adc_power)(struct dvb_frontend *fe);
44997 int (*slave_reset)(struct dvb_frontend *fe);
44998 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44999-};
45000+} __no_const;
45001
45002 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45003 void *dib7000p_attach(struct dib7000p_ops *ops);
45004diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45005index 84cc103..5780c54 100644
45006--- a/drivers/media/dvb-frontends/dib8000.h
45007+++ b/drivers/media/dvb-frontends/dib8000.h
45008@@ -61,7 +61,7 @@ struct dib8000_ops {
45009 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45010 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45011 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45012-};
45013+} __no_const;
45014
45015 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45016 void *dib8000_attach(struct dib8000_ops *ops);
45017diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45018index 860c98fc..497fa25 100644
45019--- a/drivers/media/pci/cx88/cx88-video.c
45020+++ b/drivers/media/pci/cx88/cx88-video.c
45021@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45022
45023 /* ------------------------------------------------------------------ */
45024
45025-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45026-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45027-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45028+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45029+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45030+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45031
45032 module_param_array(video_nr, int, NULL, 0444);
45033 module_param_array(vbi_nr, int, NULL, 0444);
45034diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45035index 802642d..5534900 100644
45036--- a/drivers/media/pci/ivtv/ivtv-driver.c
45037+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45038@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45039 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45040
45041 /* ivtv instance counter */
45042-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45043+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45044
45045 /* Parameter declarations */
45046 static int cardtype[IVTV_MAX_CARDS];
45047diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45048index 570d119..ed25830 100644
45049--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45050+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45051@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
45052
45053 static int solo_sysfs_init(struct solo_dev *solo_dev)
45054 {
45055- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45056+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45057 struct device *dev = &solo_dev->dev;
45058 const char *driver;
45059 int i;
45060diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45061index 7ddc767..1c24361 100644
45062--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45063+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45064@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45065
45066 int solo_g723_init(struct solo_dev *solo_dev)
45067 {
45068- static struct snd_device_ops ops = { NULL };
45069+ static struct snd_device_ops ops = { };
45070 struct snd_card *card;
45071 struct snd_kcontrol_new kctl;
45072 char name[32];
45073diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45074index 8c84846..27b4f83 100644
45075--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45076+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45077@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45078
45079 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45080 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45081- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45082+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45083 if (p2m_id < 0)
45084 p2m_id = -p2m_id;
45085 }
45086diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45087index 1ca54b0..7d7cb9a 100644
45088--- a/drivers/media/pci/solo6x10/solo6x10.h
45089+++ b/drivers/media/pci/solo6x10/solo6x10.h
45090@@ -218,7 +218,7 @@ struct solo_dev {
45091
45092 /* P2M DMA Engine */
45093 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45094- atomic_t p2m_count;
45095+ atomic_unchecked_t p2m_count;
45096 int p2m_jiffies;
45097 unsigned int p2m_timeouts;
45098
45099diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
45100index c135165..dc69499 100644
45101--- a/drivers/media/pci/tw68/tw68-core.c
45102+++ b/drivers/media/pci/tw68/tw68-core.c
45103@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45104 module_param_array(card, int, NULL, 0444);
45105 MODULE_PARM_DESC(card, "card type");
45106
45107-static atomic_t tw68_instance = ATOMIC_INIT(0);
45108+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45109
45110 /* ------------------------------------------------------------------ */
45111
45112diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45113index ba2d8f9..1566684 100644
45114--- a/drivers/media/platform/omap/omap_vout.c
45115+++ b/drivers/media/platform/omap/omap_vout.c
45116@@ -63,7 +63,6 @@ enum omap_vout_channels {
45117 OMAP_VIDEO2,
45118 };
45119
45120-static struct videobuf_queue_ops video_vbq_ops;
45121 /* Variables configurable through module params*/
45122 static u32 video1_numbuffers = 3;
45123 static u32 video2_numbuffers = 3;
45124@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45125 {
45126 struct videobuf_queue *q;
45127 struct omap_vout_device *vout = NULL;
45128+ static struct videobuf_queue_ops video_vbq_ops = {
45129+ .buf_setup = omap_vout_buffer_setup,
45130+ .buf_prepare = omap_vout_buffer_prepare,
45131+ .buf_release = omap_vout_buffer_release,
45132+ .buf_queue = omap_vout_buffer_queue,
45133+ };
45134
45135 vout = video_drvdata(file);
45136 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45137@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
45138 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45139
45140 q = &vout->vbq;
45141- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45142- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45143- video_vbq_ops.buf_release = omap_vout_buffer_release;
45144- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45145 spin_lock_init(&vout->vbq_lock);
45146
45147 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45148diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45149index fb2acc5..a2fcbdc4 100644
45150--- a/drivers/media/platform/s5p-tv/mixer.h
45151+++ b/drivers/media/platform/s5p-tv/mixer.h
45152@@ -156,7 +156,7 @@ struct mxr_layer {
45153 /** layer index (unique identifier) */
45154 int idx;
45155 /** callbacks for layer methods */
45156- struct mxr_layer_ops ops;
45157+ struct mxr_layer_ops *ops;
45158 /** format array */
45159 const struct mxr_format **fmt_array;
45160 /** size of format array */
45161diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45162index 74344c7..a39e70e 100644
45163--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45164+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45165@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45166 {
45167 struct mxr_layer *layer;
45168 int ret;
45169- struct mxr_layer_ops ops = {
45170+ static struct mxr_layer_ops ops = {
45171 .release = mxr_graph_layer_release,
45172 .buffer_set = mxr_graph_buffer_set,
45173 .stream_set = mxr_graph_stream_set,
45174diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45175index b713403..53cb5ad 100644
45176--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45177+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45178@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45179 layer->update_buf = next;
45180 }
45181
45182- layer->ops.buffer_set(layer, layer->update_buf);
45183+ layer->ops->buffer_set(layer, layer->update_buf);
45184
45185 if (done && done != layer->shadow_buf)
45186 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45187diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45188index 72d4f2e..4b2ea0d 100644
45189--- a/drivers/media/platform/s5p-tv/mixer_video.c
45190+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45191@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45192 layer->geo.src.height = layer->geo.src.full_height;
45193
45194 mxr_geometry_dump(mdev, &layer->geo);
45195- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45196+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45197 mxr_geometry_dump(mdev, &layer->geo);
45198 }
45199
45200@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45201 layer->geo.dst.full_width = mbus_fmt.width;
45202 layer->geo.dst.full_height = mbus_fmt.height;
45203 layer->geo.dst.field = mbus_fmt.field;
45204- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45205+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45206
45207 mxr_geometry_dump(mdev, &layer->geo);
45208 }
45209@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45210 /* set source size to highest accepted value */
45211 geo->src.full_width = max(geo->dst.full_width, pix->width);
45212 geo->src.full_height = max(geo->dst.full_height, pix->height);
45213- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45214+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45215 mxr_geometry_dump(mdev, &layer->geo);
45216 /* set cropping to total visible screen */
45217 geo->src.width = pix->width;
45218@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45219 geo->src.x_offset = 0;
45220 geo->src.y_offset = 0;
45221 /* assure consistency of geometry */
45222- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45223+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45224 mxr_geometry_dump(mdev, &layer->geo);
45225 /* set full size to lowest possible value */
45226 geo->src.full_width = 0;
45227 geo->src.full_height = 0;
45228- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45229+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45230 mxr_geometry_dump(mdev, &layer->geo);
45231
45232 /* returning results */
45233@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45234 target->width = s->r.width;
45235 target->height = s->r.height;
45236
45237- layer->ops.fix_geometry(layer, stage, s->flags);
45238+ layer->ops->fix_geometry(layer, stage, s->flags);
45239
45240 /* retrieve update selection rectangle */
45241 res.left = target->x_offset;
45242@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45243 mxr_output_get(mdev);
45244
45245 mxr_layer_update_output(layer);
45246- layer->ops.format_set(layer);
45247+ layer->ops->format_set(layer);
45248 /* enabling layer in hardware */
45249 spin_lock_irqsave(&layer->enq_slock, flags);
45250 layer->state = MXR_LAYER_STREAMING;
45251 spin_unlock_irqrestore(&layer->enq_slock, flags);
45252
45253- layer->ops.stream_set(layer, MXR_ENABLE);
45254+ layer->ops->stream_set(layer, MXR_ENABLE);
45255 mxr_streamer_get(mdev);
45256
45257 return 0;
45258@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
45259 spin_unlock_irqrestore(&layer->enq_slock, flags);
45260
45261 /* disabling layer in hardware */
45262- layer->ops.stream_set(layer, MXR_DISABLE);
45263+ layer->ops->stream_set(layer, MXR_DISABLE);
45264 /* remove one streamer */
45265 mxr_streamer_put(mdev);
45266 /* allow changes in output configuration */
45267@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45268
45269 void mxr_layer_release(struct mxr_layer *layer)
45270 {
45271- if (layer->ops.release)
45272- layer->ops.release(layer);
45273+ if (layer->ops->release)
45274+ layer->ops->release(layer);
45275 }
45276
45277 void mxr_base_layer_release(struct mxr_layer *layer)
45278@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45279
45280 layer->mdev = mdev;
45281 layer->idx = idx;
45282- layer->ops = *ops;
45283+ layer->ops = ops;
45284
45285 spin_lock_init(&layer->enq_slock);
45286 INIT_LIST_HEAD(&layer->enq_list);
45287diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45288index c9388c4..ce71ece 100644
45289--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45290+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45291@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45292 {
45293 struct mxr_layer *layer;
45294 int ret;
45295- struct mxr_layer_ops ops = {
45296+ static struct mxr_layer_ops ops = {
45297 .release = mxr_vp_layer_release,
45298 .buffer_set = mxr_vp_buffer_set,
45299 .stream_set = mxr_vp_stream_set,
45300diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45301index 82affae..42833ec 100644
45302--- a/drivers/media/radio/radio-cadet.c
45303+++ b/drivers/media/radio/radio-cadet.c
45304@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45305 unsigned char readbuf[RDS_BUFFER];
45306 int i = 0;
45307
45308+ if (count > RDS_BUFFER)
45309+ return -EFAULT;
45310 mutex_lock(&dev->lock);
45311 if (dev->rdsstat == 0)
45312 cadet_start_rds(dev);
45313@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45314 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45315 mutex_unlock(&dev->lock);
45316
45317- if (i && copy_to_user(data, readbuf, i))
45318- return -EFAULT;
45319+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45320+ i = -EFAULT;
45321+
45322 return i;
45323 }
45324
45325diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45326index 5236035..c622c74 100644
45327--- a/drivers/media/radio/radio-maxiradio.c
45328+++ b/drivers/media/radio/radio-maxiradio.c
45329@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45330 /* TEA5757 pin mappings */
45331 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45332
45333-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45334+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45335
45336 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45337 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45338diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45339index 050b3bb..79f62b9 100644
45340--- a/drivers/media/radio/radio-shark.c
45341+++ b/drivers/media/radio/radio-shark.c
45342@@ -79,7 +79,7 @@ struct shark_device {
45343 u32 last_val;
45344 };
45345
45346-static atomic_t shark_instance = ATOMIC_INIT(0);
45347+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45348
45349 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45350 {
45351diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45352index 8654e0d..0608a64 100644
45353--- a/drivers/media/radio/radio-shark2.c
45354+++ b/drivers/media/radio/radio-shark2.c
45355@@ -74,7 +74,7 @@ struct shark_device {
45356 u8 *transfer_buffer;
45357 };
45358
45359-static atomic_t shark_instance = ATOMIC_INIT(0);
45360+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45361
45362 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45363 {
45364diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45365index dccf586..d5db411 100644
45366--- a/drivers/media/radio/radio-si476x.c
45367+++ b/drivers/media/radio/radio-si476x.c
45368@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45369 struct si476x_radio *radio;
45370 struct v4l2_ctrl *ctrl;
45371
45372- static atomic_t instance = ATOMIC_INIT(0);
45373+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45374
45375 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45376 if (!radio)
45377diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45378index 704397f..4d05977 100644
45379--- a/drivers/media/radio/wl128x/fmdrv_common.c
45380+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45381@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45382 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45383
45384 /* Radio Nr */
45385-static u32 radio_nr = -1;
45386+static int radio_nr = -1;
45387 module_param(radio_nr, int, 0444);
45388 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45389
45390diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45391index 9fd1527..8927230 100644
45392--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45393+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45394@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45395
45396 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45397 {
45398- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45399- char result[64];
45400- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45401- sizeof(result), 0);
45402+ char *buf;
45403+ char *result;
45404+ int retval;
45405+
45406+ buf = kmalloc(2, GFP_KERNEL);
45407+ if (buf == NULL)
45408+ return -ENOMEM;
45409+ result = kmalloc(64, GFP_KERNEL);
45410+ if (result == NULL) {
45411+ kfree(buf);
45412+ return -ENOMEM;
45413+ }
45414+
45415+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45416+ buf[1] = enable ? 1 : 0;
45417+
45418+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45419+
45420+ kfree(buf);
45421+ kfree(result);
45422+ return retval;
45423 }
45424
45425 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45426 {
45427- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45428- char state[3];
45429- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45430+ char *buf;
45431+ char *state;
45432+ int retval;
45433+
45434+ buf = kmalloc(2, GFP_KERNEL);
45435+ if (buf == NULL)
45436+ return -ENOMEM;
45437+ state = kmalloc(3, GFP_KERNEL);
45438+ if (state == NULL) {
45439+ kfree(buf);
45440+ return -ENOMEM;
45441+ }
45442+
45443+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45444+ buf[1] = enable ? 1 : 0;
45445+
45446+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45447+
45448+ kfree(buf);
45449+ kfree(state);
45450+ return retval;
45451 }
45452
45453 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45454 {
45455- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45456- char state[3];
45457+ char *query;
45458+ char *state;
45459 int ret;
45460+ query = kmalloc(1, GFP_KERNEL);
45461+ if (query == NULL)
45462+ return -ENOMEM;
45463+ state = kmalloc(3, GFP_KERNEL);
45464+ if (state == NULL) {
45465+ kfree(query);
45466+ return -ENOMEM;
45467+ }
45468+
45469+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45470
45471 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45472
45473- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45474- sizeof(state), 0);
45475+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45476 if (ret < 0) {
45477 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45478 "state info\n");
45479@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45480
45481 /* Copy this pointer as we are gonna need it in the release phase */
45482 cinergyt2_usb_device = adap->dev;
45483-
45484+ kfree(query);
45485+ kfree(state);
45486 return 0;
45487 }
45488
45489@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45490 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45491 {
45492 struct cinergyt2_state *st = d->priv;
45493- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45494+ u8 *key, *cmd;
45495 int i;
45496
45497+ cmd = kmalloc(1, GFP_KERNEL);
45498+ if (cmd == NULL)
45499+ return -EINVAL;
45500+ key = kzalloc(5, GFP_KERNEL);
45501+ if (key == NULL) {
45502+ kfree(cmd);
45503+ return -EINVAL;
45504+ }
45505+
45506+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45507+
45508 *state = REMOTE_NO_KEY_PRESSED;
45509
45510- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45511+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45512 if (key[4] == 0xff) {
45513 /* key repeat */
45514 st->rc_counter++;
45515@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45516 *event = d->last_event;
45517 deb_rc("repeat key, event %x\n",
45518 *event);
45519- return 0;
45520+ goto out;
45521 }
45522 }
45523 deb_rc("repeated key (non repeatable)\n");
45524 }
45525- return 0;
45526+ goto out;
45527 }
45528
45529 /* hack to pass checksum on the custom field */
45530@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45531
45532 deb_rc("key: %*ph\n", 5, key);
45533 }
45534+out:
45535+ kfree(cmd);
45536+ kfree(key);
45537 return 0;
45538 }
45539
45540diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45541index c890fe4..f9b2ae6 100644
45542--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45543+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45544@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45545 fe_status_t *status)
45546 {
45547 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45548- struct dvbt_get_status_msg result;
45549- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45550+ struct dvbt_get_status_msg *result;
45551+ u8 *cmd;
45552 int ret;
45553
45554- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45555- sizeof(result), 0);
45556+ cmd = kmalloc(1, GFP_KERNEL);
45557+ if (cmd == NULL)
45558+ return -ENOMEM;
45559+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45560+ if (result == NULL) {
45561+ kfree(cmd);
45562+ return -ENOMEM;
45563+ }
45564+
45565+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45566+
45567+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45568+ sizeof(*result), 0);
45569 if (ret < 0)
45570- return ret;
45571+ goto out;
45572
45573 *status = 0;
45574
45575- if (0xffff - le16_to_cpu(result.gain) > 30)
45576+ if (0xffff - le16_to_cpu(result->gain) > 30)
45577 *status |= FE_HAS_SIGNAL;
45578- if (result.lock_bits & (1 << 6))
45579+ if (result->lock_bits & (1 << 6))
45580 *status |= FE_HAS_LOCK;
45581- if (result.lock_bits & (1 << 5))
45582+ if (result->lock_bits & (1 << 5))
45583 *status |= FE_HAS_SYNC;
45584- if (result.lock_bits & (1 << 4))
45585+ if (result->lock_bits & (1 << 4))
45586 *status |= FE_HAS_CARRIER;
45587- if (result.lock_bits & (1 << 1))
45588+ if (result->lock_bits & (1 << 1))
45589 *status |= FE_HAS_VITERBI;
45590
45591 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45592 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45593 *status &= ~FE_HAS_LOCK;
45594
45595- return 0;
45596+out:
45597+ kfree(cmd);
45598+ kfree(result);
45599+ return ret;
45600 }
45601
45602 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45603 {
45604 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45605- struct dvbt_get_status_msg status;
45606- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45607+ struct dvbt_get_status_msg *status;
45608+ char *cmd;
45609 int ret;
45610
45611- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45612- sizeof(status), 0);
45613+ cmd = kmalloc(1, GFP_KERNEL);
45614+ if (cmd == NULL)
45615+ return -ENOMEM;
45616+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45617+ if (status == NULL) {
45618+ kfree(cmd);
45619+ return -ENOMEM;
45620+ }
45621+
45622+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45623+
45624+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45625+ sizeof(*status), 0);
45626 if (ret < 0)
45627- return ret;
45628+ goto out;
45629
45630- *ber = le32_to_cpu(status.viterbi_error_rate);
45631+ *ber = le32_to_cpu(status->viterbi_error_rate);
45632+out:
45633+ kfree(cmd);
45634+ kfree(status);
45635 return 0;
45636 }
45637
45638 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45639 {
45640 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45641- struct dvbt_get_status_msg status;
45642- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45643+ struct dvbt_get_status_msg *status;
45644+ u8 *cmd;
45645 int ret;
45646
45647- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45648- sizeof(status), 0);
45649+ cmd = kmalloc(1, GFP_KERNEL);
45650+ if (cmd == NULL)
45651+ return -ENOMEM;
45652+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45653+ if (status == NULL) {
45654+ kfree(cmd);
45655+ return -ENOMEM;
45656+ }
45657+
45658+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45659+
45660+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45661+ sizeof(*status), 0);
45662 if (ret < 0) {
45663 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45664 ret);
45665- return ret;
45666+ goto out;
45667 }
45668- *unc = le32_to_cpu(status.uncorrected_block_count);
45669- return 0;
45670+ *unc = le32_to_cpu(status->uncorrected_block_count);
45671+
45672+out:
45673+ kfree(cmd);
45674+ kfree(status);
45675+ return ret;
45676 }
45677
45678 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45679 u16 *strength)
45680 {
45681 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45682- struct dvbt_get_status_msg status;
45683- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45684+ struct dvbt_get_status_msg *status;
45685+ char *cmd;
45686 int ret;
45687
45688- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45689- sizeof(status), 0);
45690+ cmd = kmalloc(1, GFP_KERNEL);
45691+ if (cmd == NULL)
45692+ return -ENOMEM;
45693+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45694+ if (status == NULL) {
45695+ kfree(cmd);
45696+ return -ENOMEM;
45697+ }
45698+
45699+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45700+
45701+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45702+ sizeof(*status), 0);
45703 if (ret < 0) {
45704 err("cinergyt2_fe_read_signal_strength() Failed!"
45705 " (Error=%d)\n", ret);
45706- return ret;
45707+ goto out;
45708 }
45709- *strength = (0xffff - le16_to_cpu(status.gain));
45710+ *strength = (0xffff - le16_to_cpu(status->gain));
45711+
45712+out:
45713+ kfree(cmd);
45714+ kfree(status);
45715 return 0;
45716 }
45717
45718 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45719 {
45720 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45721- struct dvbt_get_status_msg status;
45722- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45723+ struct dvbt_get_status_msg *status;
45724+ char *cmd;
45725 int ret;
45726
45727- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45728- sizeof(status), 0);
45729+ cmd = kmalloc(1, GFP_KERNEL);
45730+ if (cmd == NULL)
45731+ return -ENOMEM;
45732+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45733+ if (status == NULL) {
45734+ kfree(cmd);
45735+ return -ENOMEM;
45736+ }
45737+
45738+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45739+
45740+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45741+ sizeof(*status), 0);
45742 if (ret < 0) {
45743 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45744- return ret;
45745+ goto out;
45746 }
45747- *snr = (status.snr << 8) | status.snr;
45748- return 0;
45749+ *snr = (status->snr << 8) | status->snr;
45750+
45751+out:
45752+ kfree(cmd);
45753+ kfree(status);
45754+ return ret;
45755 }
45756
45757 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45758@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45759 {
45760 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45761 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45762- struct dvbt_set_parameters_msg param;
45763- char result[2];
45764+ struct dvbt_set_parameters_msg *param;
45765+ char *result;
45766 int err;
45767
45768- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45769- param.tps = cpu_to_le16(compute_tps(fep));
45770- param.freq = cpu_to_le32(fep->frequency / 1000);
45771- param.flags = 0;
45772+ result = kmalloc(2, GFP_KERNEL);
45773+ if (result == NULL)
45774+ return -ENOMEM;
45775+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45776+ if (param == NULL) {
45777+ kfree(result);
45778+ return -ENOMEM;
45779+ }
45780+
45781+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45782+ param->tps = cpu_to_le16(compute_tps(fep));
45783+ param->freq = cpu_to_le32(fep->frequency / 1000);
45784+ param->flags = 0;
45785
45786 switch (fep->bandwidth_hz) {
45787 default:
45788 case 8000000:
45789- param.bandwidth = 8;
45790+ param->bandwidth = 8;
45791 break;
45792 case 7000000:
45793- param.bandwidth = 7;
45794+ param->bandwidth = 7;
45795 break;
45796 case 6000000:
45797- param.bandwidth = 6;
45798+ param->bandwidth = 6;
45799 break;
45800 }
45801
45802 err = dvb_usb_generic_rw(state->d,
45803- (char *)&param, sizeof(param),
45804- result, sizeof(result), 0);
45805+ (char *)param, sizeof(*param),
45806+ result, 2, 0);
45807 if (err < 0)
45808 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45809
45810- return (err < 0) ? err : 0;
45811+ kfree(result);
45812+ kfree(param);
45813+ return err;
45814 }
45815
45816 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45817diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45818index 733a7ff..f8b52e3 100644
45819--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45820+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45821@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45822
45823 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45824 {
45825- struct hexline hx;
45826- u8 reset;
45827+ struct hexline *hx;
45828+ u8 *reset;
45829 int ret,pos=0;
45830
45831+ reset = kmalloc(1, GFP_KERNEL);
45832+ if (reset == NULL)
45833+ return -ENOMEM;
45834+
45835+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45836+ if (hx == NULL) {
45837+ kfree(reset);
45838+ return -ENOMEM;
45839+ }
45840+
45841 /* stop the CPU */
45842- reset = 1;
45843- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45844+ reset[0] = 1;
45845+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45846 err("could not stop the USB controller CPU.");
45847
45848- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45849- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45850- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45851+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45852+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45853+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45854
45855- if (ret != hx.len) {
45856+ if (ret != hx->len) {
45857 err("error while transferring firmware "
45858 "(transferred size: %d, block size: %d)",
45859- ret,hx.len);
45860+ ret,hx->len);
45861 ret = -EINVAL;
45862 break;
45863 }
45864 }
45865 if (ret < 0) {
45866 err("firmware download failed at %d with %d",pos,ret);
45867+ kfree(reset);
45868+ kfree(hx);
45869 return ret;
45870 }
45871
45872 if (ret == 0) {
45873 /* restart the CPU */
45874- reset = 0;
45875- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45876+ reset[0] = 0;
45877+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45878 err("could not restart the USB controller CPU.");
45879 ret = -EINVAL;
45880 }
45881 } else
45882 ret = -EIO;
45883
45884+ kfree(reset);
45885+ kfree(hx);
45886+
45887 return ret;
45888 }
45889 EXPORT_SYMBOL(usb_cypress_load_firmware);
45890diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45891index 1a3df10..57997a5 100644
45892--- a/drivers/media/usb/dvb-usb/dw2102.c
45893+++ b/drivers/media/usb/dvb-usb/dw2102.c
45894@@ -118,7 +118,7 @@ struct su3000_state {
45895
45896 struct s6x0_state {
45897 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45898-};
45899+} __no_const;
45900
45901 /* debug */
45902 static int dvb_usb_dw2102_debug;
45903diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45904index 5801ae7..83f71fa 100644
45905--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45906+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45907@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45908 static int technisat_usb2_i2c_access(struct usb_device *udev,
45909 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45910 {
45911- u8 b[64];
45912- int ret, actual_length;
45913+ u8 *b = kmalloc(64, GFP_KERNEL);
45914+ int ret, actual_length, error = 0;
45915+
45916+ if (b == NULL)
45917+ return -ENOMEM;
45918
45919 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45920 debug_dump(tx, txlen, deb_i2c);
45921@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45922
45923 if (ret < 0) {
45924 err("i2c-error: out failed %02x = %d", device_addr, ret);
45925- return -ENODEV;
45926+ error = -ENODEV;
45927+ goto out;
45928 }
45929
45930 ret = usb_bulk_msg(udev,
45931@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45932 b, 64, &actual_length, 1000);
45933 if (ret < 0) {
45934 err("i2c-error: in failed %02x = %d", device_addr, ret);
45935- return -ENODEV;
45936+ error = -ENODEV;
45937+ goto out;
45938 }
45939
45940 if (b[0] != I2C_STATUS_OK) {
45941@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45942 /* handle tuner-i2c-nak */
45943 if (!(b[0] == I2C_STATUS_NAK &&
45944 device_addr == 0x60
45945- /* && device_is_technisat_usb2 */))
45946- return -ENODEV;
45947+ /* && device_is_technisat_usb2 */)) {
45948+ error = -ENODEV;
45949+ goto out;
45950+ }
45951 }
45952
45953 deb_i2c("status: %d, ", b[0]);
45954@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45955
45956 deb_i2c("\n");
45957
45958- return 0;
45959+out:
45960+ kfree(b);
45961+ return error;
45962 }
45963
45964 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45965@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45966 {
45967 int ret;
45968
45969- u8 led[8] = {
45970- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45971- 0
45972- };
45973+ u8 *led = kzalloc(8, GFP_KERNEL);
45974+
45975+ if (led == NULL)
45976+ return -ENOMEM;
45977
45978 if (disable_led_control && state != TECH_LED_OFF)
45979 return 0;
45980
45981+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45982+
45983 switch (state) {
45984 case TECH_LED_ON:
45985 led[1] = 0x82;
45986@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45987 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45988 USB_TYPE_VENDOR | USB_DIR_OUT,
45989 0, 0,
45990- led, sizeof(led), 500);
45991+ led, 8, 500);
45992
45993 mutex_unlock(&d->i2c_mutex);
45994+
45995+ kfree(led);
45996+
45997 return ret;
45998 }
45999
46000 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46001 {
46002 int ret;
46003- u8 b = 0;
46004+ u8 *b = kzalloc(1, GFP_KERNEL);
46005+
46006+ if (b == NULL)
46007+ return -ENOMEM;
46008
46009 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46010 return -EAGAIN;
46011@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46012 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46013 USB_TYPE_VENDOR | USB_DIR_OUT,
46014 (red << 8) | green, 0,
46015- &b, 1, 500);
46016+ b, 1, 500);
46017
46018 mutex_unlock(&d->i2c_mutex);
46019
46020+ kfree(b);
46021+
46022 return ret;
46023 }
46024
46025@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46026 struct dvb_usb_device_description **desc, int *cold)
46027 {
46028 int ret;
46029- u8 version[3];
46030+ u8 *version = kmalloc(3, GFP_KERNEL);
46031
46032 /* first select the interface */
46033 if (usb_set_interface(udev, 0, 1) != 0)
46034@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46035
46036 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46037
46038+ if (version == NULL)
46039+ return 0;
46040+
46041 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46042 GET_VERSION_INFO_VENDOR_REQUEST,
46043 USB_TYPE_VENDOR | USB_DIR_IN,
46044 0, 0,
46045- version, sizeof(version), 500);
46046+ version, 3, 500);
46047
46048 if (ret < 0)
46049 *cold = 1;
46050@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46051 *cold = 0;
46052 }
46053
46054+ kfree(version);
46055+
46056 return 0;
46057 }
46058
46059@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46060
46061 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46062 {
46063- u8 buf[62], *b;
46064+ u8 *buf, *b;
46065 int ret;
46066 struct ir_raw_event ev;
46067
46068+ buf = kmalloc(62, GFP_KERNEL);
46069+
46070+ if (buf == NULL)
46071+ return -ENOMEM;
46072+
46073 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46074 buf[1] = 0x08;
46075 buf[2] = 0x8f;
46076@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46077 GET_IR_DATA_VENDOR_REQUEST,
46078 USB_TYPE_VENDOR | USB_DIR_IN,
46079 0x8080, 0,
46080- buf, sizeof(buf), 500);
46081+ buf, 62, 500);
46082
46083 unlock:
46084 mutex_unlock(&d->i2c_mutex);
46085
46086- if (ret < 0)
46087+ if (ret < 0) {
46088+ kfree(buf);
46089 return ret;
46090+ }
46091
46092- if (ret == 1)
46093+ if (ret == 1) {
46094+ kfree(buf);
46095 return 0; /* no key pressed */
46096+ }
46097
46098 /* decoding */
46099 b = buf+1;
46100@@ -656,6 +689,8 @@ unlock:
46101
46102 ir_raw_event_handle(d->rc_dev);
46103
46104+ kfree(buf);
46105+
46106 return 1;
46107 }
46108
46109diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46110index af63543..0436f20 100644
46111--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46112+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46113@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46114 * by passing a very big num_planes value */
46115 uplane = compat_alloc_user_space(num_planes *
46116 sizeof(struct v4l2_plane));
46117- kp->m.planes = (__force struct v4l2_plane *)uplane;
46118+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46119
46120 while (--num_planes >= 0) {
46121 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46122@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46123 if (num_planes == 0)
46124 return 0;
46125
46126- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
46127+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46128 if (get_user(p, &up->m.planes))
46129 return -EFAULT;
46130 uplane32 = compat_ptr(p);
46131@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46132 get_user(kp->flags, &up->flags) ||
46133 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46134 return -EFAULT;
46135- kp->base = (__force void *)compat_ptr(tmp);
46136+ kp->base = (__force_kernel void *)compat_ptr(tmp);
46137 return 0;
46138 }
46139
46140@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46141 n * sizeof(struct v4l2_ext_control32)))
46142 return -EFAULT;
46143 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46144- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
46145+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
46146 while (--n >= 0) {
46147 u32 id;
46148
46149@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46150 {
46151 struct v4l2_ext_control32 __user *ucontrols;
46152 struct v4l2_ext_control __user *kcontrols =
46153- (__force struct v4l2_ext_control __user *)kp->controls;
46154+ (struct v4l2_ext_control __force_user *)kp->controls;
46155 int n = kp->count;
46156 compat_caddr_t p;
46157
46158@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46159 get_user(tmp, &up->edid) ||
46160 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46161 return -EFAULT;
46162- kp->edid = (__force u8 *)compat_ptr(tmp);
46163+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
46164 return 0;
46165 }
46166
46167diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46168index 015f92a..59e311e 100644
46169--- a/drivers/media/v4l2-core/v4l2-device.c
46170+++ b/drivers/media/v4l2-core/v4l2-device.c
46171@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46172 EXPORT_SYMBOL_GPL(v4l2_device_put);
46173
46174 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46175- atomic_t *instance)
46176+ atomic_unchecked_t *instance)
46177 {
46178- int num = atomic_inc_return(instance) - 1;
46179+ int num = atomic_inc_return_unchecked(instance) - 1;
46180 int len = strlen(basename);
46181
46182 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46183diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46184index b084072..36706d7 100644
46185--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46186+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46187@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
46188 struct file *file, void *fh, void *p);
46189 } u;
46190 void (*debug)(const void *arg, bool write_only);
46191-};
46192+} __do_const;
46193+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46194
46195 /* This control needs a priority check */
46196 #define INFO_FL_PRIO (1 << 0)
46197@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
46198 struct video_device *vfd = video_devdata(file);
46199 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46200 bool write_only = false;
46201- struct v4l2_ioctl_info default_info;
46202+ v4l2_ioctl_info_no_const default_info;
46203 const struct v4l2_ioctl_info *info;
46204 void *fh = file->private_data;
46205 struct v4l2_fh *vfh = NULL;
46206@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46207 ret = -EINVAL;
46208 break;
46209 }
46210- *user_ptr = (void __user *)buf->m.planes;
46211+ *user_ptr = (void __force_user *)buf->m.planes;
46212 *kernel_ptr = (void **)&buf->m.planes;
46213 *array_size = sizeof(struct v4l2_plane) * buf->length;
46214 ret = 1;
46215@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46216 ret = -EINVAL;
46217 break;
46218 }
46219- *user_ptr = (void __user *)edid->edid;
46220+ *user_ptr = (void __force_user *)edid->edid;
46221 *kernel_ptr = (void **)&edid->edid;
46222 *array_size = edid->blocks * 128;
46223 ret = 1;
46224@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46225 ret = -EINVAL;
46226 break;
46227 }
46228- *user_ptr = (void __user *)ctrls->controls;
46229+ *user_ptr = (void __force_user *)ctrls->controls;
46230 *kernel_ptr = (void **)&ctrls->controls;
46231 *array_size = sizeof(struct v4l2_ext_control)
46232 * ctrls->count;
46233@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46234 }
46235
46236 if (has_array_args) {
46237- *kernel_ptr = (void __force *)user_ptr;
46238+ *kernel_ptr = (void __force_kernel *)user_ptr;
46239 if (copy_to_user(user_ptr, mbuf, array_size))
46240 err = -EFAULT;
46241 goto out_array_args;
46242diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
46243index 24696f5..3637780 100644
46244--- a/drivers/memory/omap-gpmc.c
46245+++ b/drivers/memory/omap-gpmc.c
46246@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
46247 };
46248
46249 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
46250-static struct irq_chip gpmc_irq_chip;
46251 static int gpmc_irq_start;
46252
46253 static struct resource gpmc_mem_root;
46254@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
46255
46256 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
46257
46258+static struct irq_chip gpmc_irq_chip = {
46259+ .name = "gpmc",
46260+ .irq_startup = gpmc_irq_noop_ret,
46261+ .irq_enable = gpmc_irq_enable,
46262+ .irq_disable = gpmc_irq_disable,
46263+ .irq_shutdown = gpmc_irq_noop,
46264+ .irq_ack = gpmc_irq_noop,
46265+ .irq_mask = gpmc_irq_noop,
46266+ .irq_unmask = gpmc_irq_noop,
46267+};
46268+
46269 static int gpmc_setup_irq(void)
46270 {
46271 int i;
46272@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46273 return gpmc_irq_start;
46274 }
46275
46276- gpmc_irq_chip.name = "gpmc";
46277- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46278- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46279- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46280- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46281- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46282- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46283- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46284-
46285 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46286 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46287
46288diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46289index 187f836..679544b 100644
46290--- a/drivers/message/fusion/mptbase.c
46291+++ b/drivers/message/fusion/mptbase.c
46292@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46293 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46294 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46295
46296+#ifdef CONFIG_GRKERNSEC_HIDESYM
46297+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46298+#else
46299 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46300 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46301+#endif
46302+
46303 /*
46304 * Rounding UP to nearest 4-kB boundary here...
46305 */
46306@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46307 ioc->facts.GlobalCredits);
46308
46309 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46310+#ifdef CONFIG_GRKERNSEC_HIDESYM
46311+ NULL, NULL);
46312+#else
46313 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46314+#endif
46315 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46316 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46317 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46318diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46319index 5bdaae1..eced16f 100644
46320--- a/drivers/message/fusion/mptsas.c
46321+++ b/drivers/message/fusion/mptsas.c
46322@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46323 return 0;
46324 }
46325
46326+static inline void
46327+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46328+{
46329+ if (phy_info->port_details) {
46330+ phy_info->port_details->rphy = rphy;
46331+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46332+ ioc->name, rphy));
46333+ }
46334+
46335+ if (rphy) {
46336+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46337+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46338+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46339+ ioc->name, rphy, rphy->dev.release));
46340+ }
46341+}
46342+
46343 /* no mutex */
46344 static void
46345 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46346@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46347 return NULL;
46348 }
46349
46350-static inline void
46351-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46352-{
46353- if (phy_info->port_details) {
46354- phy_info->port_details->rphy = rphy;
46355- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46356- ioc->name, rphy));
46357- }
46358-
46359- if (rphy) {
46360- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46361- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46362- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46363- ioc->name, rphy, rphy->dev.release));
46364- }
46365-}
46366-
46367 static inline struct sas_port *
46368 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46369 {
46370diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46371index 9a8e185..27ff17d 100644
46372--- a/drivers/mfd/ab8500-debugfs.c
46373+++ b/drivers/mfd/ab8500-debugfs.c
46374@@ -100,7 +100,7 @@ static int irq_last;
46375 static u32 *irq_count;
46376 static int num_irqs;
46377
46378-static struct device_attribute **dev_attr;
46379+static device_attribute_no_const **dev_attr;
46380 static char **event_name;
46381
46382 static u8 avg_sample = SAMPLE_16;
46383diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
46384index 5615522..1eb6f3dc 100644
46385--- a/drivers/mfd/kempld-core.c
46386+++ b/drivers/mfd/kempld-core.c
46387@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
46388 .remove = kempld_remove,
46389 };
46390
46391-static struct dmi_system_id kempld_dmi_table[] __initdata = {
46392+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
46393 {
46394 .ident = "BHL6",
46395 .matches = {
46396diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46397index c880c89..45a7c68 100644
46398--- a/drivers/mfd/max8925-i2c.c
46399+++ b/drivers/mfd/max8925-i2c.c
46400@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46401 const struct i2c_device_id *id)
46402 {
46403 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46404- static struct max8925_chip *chip;
46405+ struct max8925_chip *chip;
46406 struct device_node *node = client->dev.of_node;
46407
46408 if (node && !pdata) {
46409diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46410index 7612d89..70549c2 100644
46411--- a/drivers/mfd/tps65910.c
46412+++ b/drivers/mfd/tps65910.c
46413@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46414 struct tps65910_platform_data *pdata)
46415 {
46416 int ret = 0;
46417- static struct regmap_irq_chip *tps6591x_irqs_chip;
46418+ struct regmap_irq_chip *tps6591x_irqs_chip;
46419
46420 if (!irq) {
46421 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46422diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46423index 1b772ef..01e77d33 100644
46424--- a/drivers/mfd/twl4030-irq.c
46425+++ b/drivers/mfd/twl4030-irq.c
46426@@ -34,6 +34,7 @@
46427 #include <linux/of.h>
46428 #include <linux/irqdomain.h>
46429 #include <linux/i2c/twl.h>
46430+#include <asm/pgtable.h>
46431
46432 #include "twl-core.h"
46433
46434@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46435 * Install an irq handler for each of the SIH modules;
46436 * clone dummy irq_chip since PIH can't *do* anything
46437 */
46438- twl4030_irq_chip = dummy_irq_chip;
46439- twl4030_irq_chip.name = "twl4030";
46440+ pax_open_kernel();
46441+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46442+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46443
46444- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46445+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46446+ pax_close_kernel();
46447
46448 for (i = irq_base; i < irq_end; i++) {
46449 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46450diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46451index 464419b..64bae8d 100644
46452--- a/drivers/misc/c2port/core.c
46453+++ b/drivers/misc/c2port/core.c
46454@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46455 goto error_idr_alloc;
46456 c2dev->id = ret;
46457
46458- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46459+ pax_open_kernel();
46460+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46461+ pax_close_kernel();
46462
46463 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46464 "c2port%d", c2dev->id);
46465diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46466index 8385177..2f54635 100644
46467--- a/drivers/misc/eeprom/sunxi_sid.c
46468+++ b/drivers/misc/eeprom/sunxi_sid.c
46469@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46470
46471 platform_set_drvdata(pdev, sid_data);
46472
46473- sid_bin_attr.size = sid_data->keysize;
46474+ pax_open_kernel();
46475+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46476+ pax_close_kernel();
46477 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46478 return -ENODEV;
46479
46480diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46481index 36f5d52..32311c3 100644
46482--- a/drivers/misc/kgdbts.c
46483+++ b/drivers/misc/kgdbts.c
46484@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46485 char before[BREAK_INSTR_SIZE];
46486 char after[BREAK_INSTR_SIZE];
46487
46488- probe_kernel_read(before, (char *)kgdbts_break_test,
46489+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46490 BREAK_INSTR_SIZE);
46491 init_simple_test();
46492 ts.tst = plant_and_detach_test;
46493@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46494 /* Activate test with initial breakpoint */
46495 if (!is_early)
46496 kgdb_breakpoint();
46497- probe_kernel_read(after, (char *)kgdbts_break_test,
46498+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46499 BREAK_INSTR_SIZE);
46500 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46501 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46502diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46503index 3ef4627..8d00486 100644
46504--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46505+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46506@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46507 * the lid is closed. This leads to interrupts as soon as a little move
46508 * is done.
46509 */
46510- atomic_inc(&lis3->count);
46511+ atomic_inc_unchecked(&lis3->count);
46512
46513 wake_up_interruptible(&lis3->misc_wait);
46514 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46515@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46516 if (lis3->pm_dev)
46517 pm_runtime_get_sync(lis3->pm_dev);
46518
46519- atomic_set(&lis3->count, 0);
46520+ atomic_set_unchecked(&lis3->count, 0);
46521 return 0;
46522 }
46523
46524@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46525 add_wait_queue(&lis3->misc_wait, &wait);
46526 while (true) {
46527 set_current_state(TASK_INTERRUPTIBLE);
46528- data = atomic_xchg(&lis3->count, 0);
46529+ data = atomic_xchg_unchecked(&lis3->count, 0);
46530 if (data)
46531 break;
46532
46533@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46534 struct lis3lv02d, miscdev);
46535
46536 poll_wait(file, &lis3->misc_wait, wait);
46537- if (atomic_read(&lis3->count))
46538+ if (atomic_read_unchecked(&lis3->count))
46539 return POLLIN | POLLRDNORM;
46540 return 0;
46541 }
46542diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46543index c439c82..1f20f57 100644
46544--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46545+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46546@@ -297,7 +297,7 @@ struct lis3lv02d {
46547 struct input_polled_dev *idev; /* input device */
46548 struct platform_device *pdev; /* platform device */
46549 struct regulator_bulk_data regulators[2];
46550- atomic_t count; /* interrupt count after last read */
46551+ atomic_unchecked_t count; /* interrupt count after last read */
46552 union axis_conversion ac; /* hw -> logical axis */
46553 int mapped_btns[3];
46554
46555diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46556index 2f30bad..c4c13d0 100644
46557--- a/drivers/misc/sgi-gru/gruhandles.c
46558+++ b/drivers/misc/sgi-gru/gruhandles.c
46559@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46560 unsigned long nsec;
46561
46562 nsec = CLKS2NSEC(clks);
46563- atomic_long_inc(&mcs_op_statistics[op].count);
46564- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46565+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46566+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46567 if (mcs_op_statistics[op].max < nsec)
46568 mcs_op_statistics[op].max = nsec;
46569 }
46570diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46571index 4f76359..cdfcb2e 100644
46572--- a/drivers/misc/sgi-gru/gruprocfs.c
46573+++ b/drivers/misc/sgi-gru/gruprocfs.c
46574@@ -32,9 +32,9 @@
46575
46576 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46577
46578-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46579+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46580 {
46581- unsigned long val = atomic_long_read(v);
46582+ unsigned long val = atomic_long_read_unchecked(v);
46583
46584 seq_printf(s, "%16lu %s\n", val, id);
46585 }
46586@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46587
46588 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46589 for (op = 0; op < mcsop_last; op++) {
46590- count = atomic_long_read(&mcs_op_statistics[op].count);
46591- total = atomic_long_read(&mcs_op_statistics[op].total);
46592+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46593+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46594 max = mcs_op_statistics[op].max;
46595 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46596 count ? total / count : 0, max);
46597diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46598index 5c3ce24..4915ccb 100644
46599--- a/drivers/misc/sgi-gru/grutables.h
46600+++ b/drivers/misc/sgi-gru/grutables.h
46601@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46602 * GRU statistics.
46603 */
46604 struct gru_stats_s {
46605- atomic_long_t vdata_alloc;
46606- atomic_long_t vdata_free;
46607- atomic_long_t gts_alloc;
46608- atomic_long_t gts_free;
46609- atomic_long_t gms_alloc;
46610- atomic_long_t gms_free;
46611- atomic_long_t gts_double_allocate;
46612- atomic_long_t assign_context;
46613- atomic_long_t assign_context_failed;
46614- atomic_long_t free_context;
46615- atomic_long_t load_user_context;
46616- atomic_long_t load_kernel_context;
46617- atomic_long_t lock_kernel_context;
46618- atomic_long_t unlock_kernel_context;
46619- atomic_long_t steal_user_context;
46620- atomic_long_t steal_kernel_context;
46621- atomic_long_t steal_context_failed;
46622- atomic_long_t nopfn;
46623- atomic_long_t asid_new;
46624- atomic_long_t asid_next;
46625- atomic_long_t asid_wrap;
46626- atomic_long_t asid_reuse;
46627- atomic_long_t intr;
46628- atomic_long_t intr_cbr;
46629- atomic_long_t intr_tfh;
46630- atomic_long_t intr_spurious;
46631- atomic_long_t intr_mm_lock_failed;
46632- atomic_long_t call_os;
46633- atomic_long_t call_os_wait_queue;
46634- atomic_long_t user_flush_tlb;
46635- atomic_long_t user_unload_context;
46636- atomic_long_t user_exception;
46637- atomic_long_t set_context_option;
46638- atomic_long_t check_context_retarget_intr;
46639- atomic_long_t check_context_unload;
46640- atomic_long_t tlb_dropin;
46641- atomic_long_t tlb_preload_page;
46642- atomic_long_t tlb_dropin_fail_no_asid;
46643- atomic_long_t tlb_dropin_fail_upm;
46644- atomic_long_t tlb_dropin_fail_invalid;
46645- atomic_long_t tlb_dropin_fail_range_active;
46646- atomic_long_t tlb_dropin_fail_idle;
46647- atomic_long_t tlb_dropin_fail_fmm;
46648- atomic_long_t tlb_dropin_fail_no_exception;
46649- atomic_long_t tfh_stale_on_fault;
46650- atomic_long_t mmu_invalidate_range;
46651- atomic_long_t mmu_invalidate_page;
46652- atomic_long_t flush_tlb;
46653- atomic_long_t flush_tlb_gru;
46654- atomic_long_t flush_tlb_gru_tgh;
46655- atomic_long_t flush_tlb_gru_zero_asid;
46656+ atomic_long_unchecked_t vdata_alloc;
46657+ atomic_long_unchecked_t vdata_free;
46658+ atomic_long_unchecked_t gts_alloc;
46659+ atomic_long_unchecked_t gts_free;
46660+ atomic_long_unchecked_t gms_alloc;
46661+ atomic_long_unchecked_t gms_free;
46662+ atomic_long_unchecked_t gts_double_allocate;
46663+ atomic_long_unchecked_t assign_context;
46664+ atomic_long_unchecked_t assign_context_failed;
46665+ atomic_long_unchecked_t free_context;
46666+ atomic_long_unchecked_t load_user_context;
46667+ atomic_long_unchecked_t load_kernel_context;
46668+ atomic_long_unchecked_t lock_kernel_context;
46669+ atomic_long_unchecked_t unlock_kernel_context;
46670+ atomic_long_unchecked_t steal_user_context;
46671+ atomic_long_unchecked_t steal_kernel_context;
46672+ atomic_long_unchecked_t steal_context_failed;
46673+ atomic_long_unchecked_t nopfn;
46674+ atomic_long_unchecked_t asid_new;
46675+ atomic_long_unchecked_t asid_next;
46676+ atomic_long_unchecked_t asid_wrap;
46677+ atomic_long_unchecked_t asid_reuse;
46678+ atomic_long_unchecked_t intr;
46679+ atomic_long_unchecked_t intr_cbr;
46680+ atomic_long_unchecked_t intr_tfh;
46681+ atomic_long_unchecked_t intr_spurious;
46682+ atomic_long_unchecked_t intr_mm_lock_failed;
46683+ atomic_long_unchecked_t call_os;
46684+ atomic_long_unchecked_t call_os_wait_queue;
46685+ atomic_long_unchecked_t user_flush_tlb;
46686+ atomic_long_unchecked_t user_unload_context;
46687+ atomic_long_unchecked_t user_exception;
46688+ atomic_long_unchecked_t set_context_option;
46689+ atomic_long_unchecked_t check_context_retarget_intr;
46690+ atomic_long_unchecked_t check_context_unload;
46691+ atomic_long_unchecked_t tlb_dropin;
46692+ atomic_long_unchecked_t tlb_preload_page;
46693+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46694+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46695+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46696+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46697+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46698+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46699+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46700+ atomic_long_unchecked_t tfh_stale_on_fault;
46701+ atomic_long_unchecked_t mmu_invalidate_range;
46702+ atomic_long_unchecked_t mmu_invalidate_page;
46703+ atomic_long_unchecked_t flush_tlb;
46704+ atomic_long_unchecked_t flush_tlb_gru;
46705+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46706+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46707
46708- atomic_long_t copy_gpa;
46709- atomic_long_t read_gpa;
46710+ atomic_long_unchecked_t copy_gpa;
46711+ atomic_long_unchecked_t read_gpa;
46712
46713- atomic_long_t mesq_receive;
46714- atomic_long_t mesq_receive_none;
46715- atomic_long_t mesq_send;
46716- atomic_long_t mesq_send_failed;
46717- atomic_long_t mesq_noop;
46718- atomic_long_t mesq_send_unexpected_error;
46719- atomic_long_t mesq_send_lb_overflow;
46720- atomic_long_t mesq_send_qlimit_reached;
46721- atomic_long_t mesq_send_amo_nacked;
46722- atomic_long_t mesq_send_put_nacked;
46723- atomic_long_t mesq_page_overflow;
46724- atomic_long_t mesq_qf_locked;
46725- atomic_long_t mesq_qf_noop_not_full;
46726- atomic_long_t mesq_qf_switch_head_failed;
46727- atomic_long_t mesq_qf_unexpected_error;
46728- atomic_long_t mesq_noop_unexpected_error;
46729- atomic_long_t mesq_noop_lb_overflow;
46730- atomic_long_t mesq_noop_qlimit_reached;
46731- atomic_long_t mesq_noop_amo_nacked;
46732- atomic_long_t mesq_noop_put_nacked;
46733- atomic_long_t mesq_noop_page_overflow;
46734+ atomic_long_unchecked_t mesq_receive;
46735+ atomic_long_unchecked_t mesq_receive_none;
46736+ atomic_long_unchecked_t mesq_send;
46737+ atomic_long_unchecked_t mesq_send_failed;
46738+ atomic_long_unchecked_t mesq_noop;
46739+ atomic_long_unchecked_t mesq_send_unexpected_error;
46740+ atomic_long_unchecked_t mesq_send_lb_overflow;
46741+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46742+ atomic_long_unchecked_t mesq_send_amo_nacked;
46743+ atomic_long_unchecked_t mesq_send_put_nacked;
46744+ atomic_long_unchecked_t mesq_page_overflow;
46745+ atomic_long_unchecked_t mesq_qf_locked;
46746+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46747+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46748+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46749+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46750+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46751+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46752+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46753+ atomic_long_unchecked_t mesq_noop_put_nacked;
46754+ atomic_long_unchecked_t mesq_noop_page_overflow;
46755
46756 };
46757
46758@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46759 tghop_invalidate, mcsop_last};
46760
46761 struct mcs_op_statistic {
46762- atomic_long_t count;
46763- atomic_long_t total;
46764+ atomic_long_unchecked_t count;
46765+ atomic_long_unchecked_t total;
46766 unsigned long max;
46767 };
46768
46769@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46770
46771 #define STAT(id) do { \
46772 if (gru_options & OPT_STATS) \
46773- atomic_long_inc(&gru_stats.id); \
46774+ atomic_long_inc_unchecked(&gru_stats.id); \
46775 } while (0)
46776
46777 #ifdef CONFIG_SGI_GRU_DEBUG
46778diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46779index c862cd4..0d176fe 100644
46780--- a/drivers/misc/sgi-xp/xp.h
46781+++ b/drivers/misc/sgi-xp/xp.h
46782@@ -288,7 +288,7 @@ struct xpc_interface {
46783 xpc_notify_func, void *);
46784 void (*received) (short, int, void *);
46785 enum xp_retval (*partid_to_nasids) (short, void *);
46786-};
46787+} __no_const;
46788
46789 extern struct xpc_interface xpc_interface;
46790
46791diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46792index 01be66d..e3a0c7e 100644
46793--- a/drivers/misc/sgi-xp/xp_main.c
46794+++ b/drivers/misc/sgi-xp/xp_main.c
46795@@ -78,13 +78,13 @@ xpc_notloaded(void)
46796 }
46797
46798 struct xpc_interface xpc_interface = {
46799- (void (*)(int))xpc_notloaded,
46800- (void (*)(int))xpc_notloaded,
46801- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46802- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46803+ .connect = (void (*)(int))xpc_notloaded,
46804+ .disconnect = (void (*)(int))xpc_notloaded,
46805+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46806+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46807 void *))xpc_notloaded,
46808- (void (*)(short, int, void *))xpc_notloaded,
46809- (enum xp_retval(*)(short, void *))xpc_notloaded
46810+ .received = (void (*)(short, int, void *))xpc_notloaded,
46811+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46812 };
46813 EXPORT_SYMBOL_GPL(xpc_interface);
46814
46815diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46816index b94d5f7..7f494c5 100644
46817--- a/drivers/misc/sgi-xp/xpc.h
46818+++ b/drivers/misc/sgi-xp/xpc.h
46819@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46820 void (*received_payload) (struct xpc_channel *, void *);
46821 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46822 };
46823+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46824
46825 /* struct xpc_partition act_state values (for XPC HB) */
46826
46827@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46828 /* found in xpc_main.c */
46829 extern struct device *xpc_part;
46830 extern struct device *xpc_chan;
46831-extern struct xpc_arch_operations xpc_arch_ops;
46832+extern xpc_arch_operations_no_const xpc_arch_ops;
46833 extern int xpc_disengage_timelimit;
46834 extern int xpc_disengage_timedout;
46835 extern int xpc_activate_IRQ_rcvd;
46836diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46837index 82dc574..8539ab2 100644
46838--- a/drivers/misc/sgi-xp/xpc_main.c
46839+++ b/drivers/misc/sgi-xp/xpc_main.c
46840@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46841 .notifier_call = xpc_system_die,
46842 };
46843
46844-struct xpc_arch_operations xpc_arch_ops;
46845+xpc_arch_operations_no_const xpc_arch_ops;
46846
46847 /*
46848 * Timer function to enforce the timelimit on the partition disengage.
46849@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46850
46851 if (((die_args->trapnr == X86_TRAP_MF) ||
46852 (die_args->trapnr == X86_TRAP_XF)) &&
46853- !user_mode_vm(die_args->regs))
46854+ !user_mode(die_args->regs))
46855 xpc_die_deactivate();
46856
46857 break;
46858diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46859index c69afb5..5c2d0f5 100644
46860--- a/drivers/mmc/card/block.c
46861+++ b/drivers/mmc/card/block.c
46862@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46863 if (idata->ic.postsleep_min_us)
46864 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46865
46866- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46867+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46868 err = -EFAULT;
46869 goto cmd_rel_host;
46870 }
46871diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46872index 18c4afe..43be71e 100644
46873--- a/drivers/mmc/host/dw_mmc.h
46874+++ b/drivers/mmc/host/dw_mmc.h
46875@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
46876 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
46877 int (*parse_dt)(struct dw_mci *host);
46878 int (*execute_tuning)(struct dw_mci_slot *slot);
46879-};
46880+} __do_const;
46881 #endif /* _DW_MMC_H_ */
46882diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46883index 7fe1619..ae0781b 100644
46884--- a/drivers/mmc/host/mmci.c
46885+++ b/drivers/mmc/host/mmci.c
46886@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
46887 mmc->caps |= MMC_CAP_CMD23;
46888
46889 if (variant->busy_detect) {
46890- mmci_ops.card_busy = mmci_card_busy;
46891+ pax_open_kernel();
46892+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46893+ pax_close_kernel();
46894 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46895 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46896 mmc->max_busy_timeout = 0;
46897diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46898index f84cfb0..aebe5d6 100644
46899--- a/drivers/mmc/host/omap_hsmmc.c
46900+++ b/drivers/mmc/host/omap_hsmmc.c
46901@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46902
46903 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46904 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46905- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46906+ pax_open_kernel();
46907+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46908+ pax_close_kernel();
46909 }
46910
46911 pm_runtime_enable(host->dev);
46912diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46913index 10ef824..88461a2 100644
46914--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46915+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46916@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46917 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46918 }
46919
46920- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46921- sdhci_esdhc_ops.platform_execute_tuning =
46922+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46923+ pax_open_kernel();
46924+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46925 esdhc_executing_tuning;
46926+ pax_close_kernel();
46927+ }
46928
46929 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46930 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46931diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46932index c6d2dd7..81b1ca3 100644
46933--- a/drivers/mmc/host/sdhci-s3c.c
46934+++ b/drivers/mmc/host/sdhci-s3c.c
46935@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46936 * we can use overriding functions instead of default.
46937 */
46938 if (sc->no_divider) {
46939- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46940- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46941- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46942+ pax_open_kernel();
46943+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46944+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46945+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46946+ pax_close_kernel();
46947 }
46948
46949 /* It supports additional host capabilities if needed */
46950diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46951index 423666b..81ff5eb 100644
46952--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46953+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46954@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46955 size_t totlen = 0, thislen;
46956 int ret = 0;
46957 size_t buflen = 0;
46958- static char *buffer;
46959+ char *buffer;
46960
46961 if (!ECCBUF_SIZE) {
46962 /* We should fall back to a general writev implementation.
46963diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46964index f44c606..aa4e804 100644
46965--- a/drivers/mtd/nand/denali.c
46966+++ b/drivers/mtd/nand/denali.c
46967@@ -24,6 +24,7 @@
46968 #include <linux/slab.h>
46969 #include <linux/mtd/mtd.h>
46970 #include <linux/module.h>
46971+#include <linux/slab.h>
46972
46973 #include "denali.h"
46974
46975diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46976index 33f3c3c..d6bbe6a 100644
46977--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46978+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46979@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46980
46981 /* first try to map the upper buffer directly */
46982 if (virt_addr_valid(this->upper_buf) &&
46983- !object_is_on_stack(this->upper_buf)) {
46984+ !object_starts_on_stack(this->upper_buf)) {
46985 sg_init_one(sgl, this->upper_buf, this->upper_len);
46986 ret = dma_map_sg(this->dev, sgl, 1, dr);
46987 if (ret == 0)
46988diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46989index a5dfbfb..8042ab4 100644
46990--- a/drivers/mtd/nftlmount.c
46991+++ b/drivers/mtd/nftlmount.c
46992@@ -24,6 +24,7 @@
46993 #include <asm/errno.h>
46994 #include <linux/delay.h>
46995 #include <linux/slab.h>
46996+#include <linux/sched.h>
46997 #include <linux/mtd/mtd.h>
46998 #include <linux/mtd/nand.h>
46999 #include <linux/mtd/nftl.h>
47000diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47001index c23184a..4115c41 100644
47002--- a/drivers/mtd/sm_ftl.c
47003+++ b/drivers/mtd/sm_ftl.c
47004@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47005 #define SM_CIS_VENDOR_OFFSET 0x59
47006 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47007 {
47008- struct attribute_group *attr_group;
47009+ attribute_group_no_const *attr_group;
47010 struct attribute **attributes;
47011 struct sm_sysfs_attribute *vendor_attribute;
47012 char *vendor;
47013diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47014index 7b11243..b3278a3 100644
47015--- a/drivers/net/bonding/bond_netlink.c
47016+++ b/drivers/net/bonding/bond_netlink.c
47017@@ -585,7 +585,7 @@ nla_put_failure:
47018 return -EMSGSIZE;
47019 }
47020
47021-struct rtnl_link_ops bond_link_ops __read_mostly = {
47022+struct rtnl_link_ops bond_link_ops = {
47023 .kind = "bond",
47024 .priv_size = sizeof(struct bonding),
47025 .setup = bond_setup,
47026diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47027index b3b922a..80bba38 100644
47028--- a/drivers/net/caif/caif_hsi.c
47029+++ b/drivers/net/caif/caif_hsi.c
47030@@ -1444,7 +1444,7 @@ err:
47031 return -ENODEV;
47032 }
47033
47034-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47035+static struct rtnl_link_ops caif_hsi_link_ops = {
47036 .kind = "cfhsi",
47037 .priv_size = sizeof(struct cfhsi),
47038 .setup = cfhsi_setup,
47039diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47040index 58808f65..0bdc7b3 100644
47041--- a/drivers/net/can/Kconfig
47042+++ b/drivers/net/can/Kconfig
47043@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47044
47045 config CAN_FLEXCAN
47046 tristate "Support for Freescale FLEXCAN based chips"
47047- depends on ARM || PPC
47048+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47049 ---help---
47050 Say Y here if you want to support for Freescale FlexCAN.
47051
47052diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47053index b0f6924..59e9640 100644
47054--- a/drivers/net/can/dev.c
47055+++ b/drivers/net/can/dev.c
47056@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47057 return -EOPNOTSUPP;
47058 }
47059
47060-static struct rtnl_link_ops can_link_ops __read_mostly = {
47061+static struct rtnl_link_ops can_link_ops = {
47062 .kind = "can",
47063 .maxtype = IFLA_CAN_MAX,
47064 .policy = can_policy,
47065diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47066index 674f367..ec3a31f 100644
47067--- a/drivers/net/can/vcan.c
47068+++ b/drivers/net/can/vcan.c
47069@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47070 dev->destructor = free_netdev;
47071 }
47072
47073-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47074+static struct rtnl_link_ops vcan_link_ops = {
47075 .kind = "vcan",
47076 .setup = vcan_setup,
47077 };
47078diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47079index 49adbf1..fff7ff8 100644
47080--- a/drivers/net/dummy.c
47081+++ b/drivers/net/dummy.c
47082@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47083 return 0;
47084 }
47085
47086-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47087+static struct rtnl_link_ops dummy_link_ops = {
47088 .kind = DRV_NAME,
47089 .setup = dummy_setup,
47090 .validate = dummy_validate,
47091diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47092index 0443654..4f0aa18 100644
47093--- a/drivers/net/ethernet/8390/ax88796.c
47094+++ b/drivers/net/ethernet/8390/ax88796.c
47095@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47096 if (ax->plat->reg_offsets)
47097 ei_local->reg_offset = ax->plat->reg_offsets;
47098 else {
47099+ resource_size_t _mem_size = mem_size;
47100+ do_div(_mem_size, 0x18);
47101 ei_local->reg_offset = ax->reg_offsets;
47102 for (ret = 0; ret < 0x18; ret++)
47103- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47104+ ax->reg_offsets[ret] = _mem_size * ret;
47105 }
47106
47107 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47108diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47109index 6725dc0..163549c 100644
47110--- a/drivers/net/ethernet/altera/altera_tse_main.c
47111+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47112@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
47113 return 0;
47114 }
47115
47116-static struct net_device_ops altera_tse_netdev_ops = {
47117+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47118 .ndo_open = tse_open,
47119 .ndo_stop = tse_shutdown,
47120 .ndo_start_xmit = tse_start_xmit,
47121@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47122 ndev->netdev_ops = &altera_tse_netdev_ops;
47123 altera_tse_set_ethtool_ops(ndev);
47124
47125+ pax_open_kernel();
47126 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47127
47128 if (priv->hash_filter)
47129 altera_tse_netdev_ops.ndo_set_rx_mode =
47130 tse_set_rx_mode_hashfilter;
47131+ pax_close_kernel();
47132
47133 /* Scatter/gather IO is not supported,
47134 * so it is turned off
47135diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47136index 29a0927..5a348e24 100644
47137--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47138+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47139@@ -1122,14 +1122,14 @@ do { \
47140 * operations, everything works on mask values.
47141 */
47142 #define XMDIO_READ(_pdata, _mmd, _reg) \
47143- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47144+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47145 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47146
47147 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47148 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47149
47150 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47151- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47152+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47153 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47154
47155 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47156diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47157index 8a50b01..39c1ad0 100644
47158--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47159+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47160@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47161
47162 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47163
47164- pdata->hw_if.config_dcb_tc(pdata);
47165+ pdata->hw_if->config_dcb_tc(pdata);
47166
47167 return 0;
47168 }
47169@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47170
47171 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47172
47173- pdata->hw_if.config_dcb_pfc(pdata);
47174+ pdata->hw_if->config_dcb_pfc(pdata);
47175
47176 return 0;
47177 }
47178diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47179index d81fc6b..6f8ab25 100644
47180--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47181+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47182@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47183
47184 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47185 {
47186- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47187+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47188 struct xgbe_channel *channel;
47189 struct xgbe_ring *ring;
47190 struct xgbe_ring_data *rdata;
47191@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47192
47193 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47194 {
47195- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47196+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47197 struct xgbe_channel *channel;
47198 struct xgbe_ring *ring;
47199 struct xgbe_ring_desc *rdesc;
47200@@ -620,17 +620,12 @@ err_out:
47201 return 0;
47202 }
47203
47204-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47205-{
47206- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47207-
47208- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47209- desc_if->free_ring_resources = xgbe_free_ring_resources;
47210- desc_if->map_tx_skb = xgbe_map_tx_skb;
47211- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
47212- desc_if->unmap_rdata = xgbe_unmap_rdata;
47213- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47214- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47215-
47216- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47217-}
47218+const struct xgbe_desc_if default_xgbe_desc_if = {
47219+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47220+ .free_ring_resources = xgbe_free_ring_resources,
47221+ .map_tx_skb = xgbe_map_tx_skb,
47222+ .map_rx_buffer = xgbe_map_rx_buffer,
47223+ .unmap_rdata = xgbe_unmap_rdata,
47224+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47225+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47226+};
47227diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47228index 400757b..d8c53f6 100644
47229--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47230+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47231@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47232
47233 static int xgbe_init(struct xgbe_prv_data *pdata)
47234 {
47235- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47236+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47237 int ret;
47238
47239 DBGPR("-->xgbe_init\n");
47240@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47241 return 0;
47242 }
47243
47244-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47245-{
47246- DBGPR("-->xgbe_init_function_ptrs\n");
47247-
47248- hw_if->tx_complete = xgbe_tx_complete;
47249-
47250- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47251- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47252- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47253- hw_if->set_mac_address = xgbe_set_mac_address;
47254-
47255- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47256- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47257-
47258- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47259- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47260- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47261- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47262- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47263-
47264- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47265- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47266-
47267- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47268- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47269- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47270-
47271- hw_if->enable_tx = xgbe_enable_tx;
47272- hw_if->disable_tx = xgbe_disable_tx;
47273- hw_if->enable_rx = xgbe_enable_rx;
47274- hw_if->disable_rx = xgbe_disable_rx;
47275-
47276- hw_if->powerup_tx = xgbe_powerup_tx;
47277- hw_if->powerdown_tx = xgbe_powerdown_tx;
47278- hw_if->powerup_rx = xgbe_powerup_rx;
47279- hw_if->powerdown_rx = xgbe_powerdown_rx;
47280-
47281- hw_if->dev_xmit = xgbe_dev_xmit;
47282- hw_if->dev_read = xgbe_dev_read;
47283- hw_if->enable_int = xgbe_enable_int;
47284- hw_if->disable_int = xgbe_disable_int;
47285- hw_if->init = xgbe_init;
47286- hw_if->exit = xgbe_exit;
47287+const struct xgbe_hw_if default_xgbe_hw_if = {
47288+ .tx_complete = xgbe_tx_complete,
47289+
47290+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47291+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47292+ .add_mac_addresses = xgbe_add_mac_addresses,
47293+ .set_mac_address = xgbe_set_mac_address,
47294+
47295+ .enable_rx_csum = xgbe_enable_rx_csum,
47296+ .disable_rx_csum = xgbe_disable_rx_csum,
47297+
47298+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47299+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47300+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47301+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47302+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47303+
47304+ .read_mmd_regs = xgbe_read_mmd_regs,
47305+ .write_mmd_regs = xgbe_write_mmd_regs,
47306+
47307+ .set_gmii_speed = xgbe_set_gmii_speed,
47308+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47309+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47310+
47311+ .enable_tx = xgbe_enable_tx,
47312+ .disable_tx = xgbe_disable_tx,
47313+ .enable_rx = xgbe_enable_rx,
47314+ .disable_rx = xgbe_disable_rx,
47315+
47316+ .powerup_tx = xgbe_powerup_tx,
47317+ .powerdown_tx = xgbe_powerdown_tx,
47318+ .powerup_rx = xgbe_powerup_rx,
47319+ .powerdown_rx = xgbe_powerdown_rx,
47320+
47321+ .dev_xmit = xgbe_dev_xmit,
47322+ .dev_read = xgbe_dev_read,
47323+ .enable_int = xgbe_enable_int,
47324+ .disable_int = xgbe_disable_int,
47325+ .init = xgbe_init,
47326+ .exit = xgbe_exit,
47327
47328 /* Descriptor related Sequences have to be initialized here */
47329- hw_if->tx_desc_init = xgbe_tx_desc_init;
47330- hw_if->rx_desc_init = xgbe_rx_desc_init;
47331- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47332- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47333- hw_if->is_last_desc = xgbe_is_last_desc;
47334- hw_if->is_context_desc = xgbe_is_context_desc;
47335- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47336+ .tx_desc_init = xgbe_tx_desc_init,
47337+ .rx_desc_init = xgbe_rx_desc_init,
47338+ .tx_desc_reset = xgbe_tx_desc_reset,
47339+ .rx_desc_reset = xgbe_rx_desc_reset,
47340+ .is_last_desc = xgbe_is_last_desc,
47341+ .is_context_desc = xgbe_is_context_desc,
47342+ .tx_start_xmit = xgbe_tx_start_xmit,
47343
47344 /* For FLOW ctrl */
47345- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47346- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47347+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47348+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47349
47350 /* For RX coalescing */
47351- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47352- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47353- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47354- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47355+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47356+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47357+ .usec_to_riwt = xgbe_usec_to_riwt,
47358+ .riwt_to_usec = xgbe_riwt_to_usec,
47359
47360 /* For RX and TX threshold config */
47361- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47362- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47363+ .config_rx_threshold = xgbe_config_rx_threshold,
47364+ .config_tx_threshold = xgbe_config_tx_threshold,
47365
47366 /* For RX and TX Store and Forward Mode config */
47367- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47368- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47369+ .config_rsf_mode = xgbe_config_rsf_mode,
47370+ .config_tsf_mode = xgbe_config_tsf_mode,
47371
47372 /* For TX DMA Operating on Second Frame config */
47373- hw_if->config_osp_mode = xgbe_config_osp_mode;
47374+ .config_osp_mode = xgbe_config_osp_mode,
47375
47376 /* For RX and TX PBL config */
47377- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47378- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47379- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47380- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47381- hw_if->config_pblx8 = xgbe_config_pblx8;
47382+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47383+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47384+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47385+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47386+ .config_pblx8 = xgbe_config_pblx8,
47387
47388 /* For MMC statistics support */
47389- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47390- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47391- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47392+ .tx_mmc_int = xgbe_tx_mmc_int,
47393+ .rx_mmc_int = xgbe_rx_mmc_int,
47394+ .read_mmc_stats = xgbe_read_mmc_stats,
47395
47396 /* For PTP config */
47397- hw_if->config_tstamp = xgbe_config_tstamp;
47398- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47399- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47400- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47401- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47402+ .config_tstamp = xgbe_config_tstamp,
47403+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47404+ .set_tstamp_time = xgbe_set_tstamp_time,
47405+ .get_tstamp_time = xgbe_get_tstamp_time,
47406+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47407
47408 /* For Data Center Bridging config */
47409- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47410- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47411+ .config_dcb_tc = xgbe_config_dcb_tc,
47412+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47413
47414 /* For Receive Side Scaling */
47415- hw_if->enable_rss = xgbe_enable_rss;
47416- hw_if->disable_rss = xgbe_disable_rss;
47417- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47418- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47419-
47420- DBGPR("<--xgbe_init_function_ptrs\n");
47421-}
47422+ .enable_rss = xgbe_enable_rss,
47423+ .disable_rss = xgbe_disable_rss,
47424+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47425+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47426+};
47427diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47428index 885b02b..4b31a4c 100644
47429--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47430+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47431@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47432 * support, tell it now
47433 */
47434 if (ring->tx.xmit_more)
47435- pdata->hw_if.tx_start_xmit(channel, ring);
47436+ pdata->hw_if->tx_start_xmit(channel, ring);
47437
47438 return NETDEV_TX_BUSY;
47439 }
47440@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47441
47442 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47443 {
47444- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47445+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47446 struct xgbe_channel *channel;
47447 enum xgbe_int int_id;
47448 unsigned int i;
47449@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47450
47451 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47452 {
47453- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47454+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47455 struct xgbe_channel *channel;
47456 enum xgbe_int int_id;
47457 unsigned int i;
47458@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47459 static irqreturn_t xgbe_isr(int irq, void *data)
47460 {
47461 struct xgbe_prv_data *pdata = data;
47462- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47463+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47464 struct xgbe_channel *channel;
47465 unsigned int dma_isr, dma_ch_isr;
47466 unsigned int mac_isr, mac_tssr;
47467@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
47468
47469 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47470 {
47471- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47472+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47473
47474 DBGPR("-->xgbe_init_tx_coalesce\n");
47475
47476@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47477
47478 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47479 {
47480- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47481+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47482
47483 DBGPR("-->xgbe_init_rx_coalesce\n");
47484
47485@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47486
47487 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47488 {
47489- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47490+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47491 struct xgbe_channel *channel;
47492 struct xgbe_ring *ring;
47493 struct xgbe_ring_data *rdata;
47494@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47495
47496 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47497 {
47498- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47499+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47500 struct xgbe_channel *channel;
47501 struct xgbe_ring *ring;
47502 struct xgbe_ring_data *rdata;
47503@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47504 static void xgbe_adjust_link(struct net_device *netdev)
47505 {
47506 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47507- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47508+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47509 struct phy_device *phydev = pdata->phydev;
47510 int new_state = 0;
47511
47512@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47513 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47514 {
47515 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47516- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47517+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47518 unsigned long flags;
47519
47520 DBGPR("-->xgbe_powerdown\n");
47521@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47522 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47523 {
47524 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47525- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47526+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47527 unsigned long flags;
47528
47529 DBGPR("-->xgbe_powerup\n");
47530@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47531
47532 static int xgbe_start(struct xgbe_prv_data *pdata)
47533 {
47534- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47535+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47536 struct net_device *netdev = pdata->netdev;
47537 int ret;
47538
47539@@ -976,7 +976,7 @@ err_napi:
47540
47541 static void xgbe_stop(struct xgbe_prv_data *pdata)
47542 {
47543- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47544+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47545 struct xgbe_channel *channel;
47546 struct net_device *netdev = pdata->netdev;
47547 struct netdev_queue *txq;
47548@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47549 return -ERANGE;
47550 }
47551
47552- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47553+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47554
47555 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47556
47557@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47558 static int xgbe_open(struct net_device *netdev)
47559 {
47560 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47561- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47562+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47563 int ret;
47564
47565 DBGPR("-->xgbe_open\n");
47566@@ -1424,7 +1424,7 @@ err_phy_init:
47567 static int xgbe_close(struct net_device *netdev)
47568 {
47569 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47570- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47571+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47572
47573 DBGPR("-->xgbe_close\n");
47574
47575@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
47576 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47577 {
47578 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47579- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47580- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47581+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47582+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47583 struct xgbe_channel *channel;
47584 struct xgbe_ring *ring;
47585 struct xgbe_packet_data *packet;
47586@@ -1521,7 +1521,7 @@ tx_netdev_return:
47587 static void xgbe_set_rx_mode(struct net_device *netdev)
47588 {
47589 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47590- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47591+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47592 unsigned int pr_mode, am_mode;
47593
47594 DBGPR("-->xgbe_set_rx_mode\n");
47595@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47596 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47597 {
47598 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47599- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47600+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47601 struct sockaddr *saddr = addr;
47602
47603 DBGPR("-->xgbe_set_mac_address\n");
47604@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47605
47606 DBGPR("-->%s\n", __func__);
47607
47608- pdata->hw_if.read_mmc_stats(pdata);
47609+ pdata->hw_if->read_mmc_stats(pdata);
47610
47611 s->rx_packets = pstats->rxframecount_gb;
47612 s->rx_bytes = pstats->rxoctetcount_gb;
47613@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47614 u16 vid)
47615 {
47616 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47617- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47618+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47619
47620 DBGPR("-->%s\n", __func__);
47621
47622@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47623 u16 vid)
47624 {
47625 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47626- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47627+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47628
47629 DBGPR("-->%s\n", __func__);
47630
47631@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
47632 netdev_features_t features)
47633 {
47634 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47635- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47636+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47637 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47638 int ret = 0;
47639
47640@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47641 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47642 {
47643 struct xgbe_prv_data *pdata = channel->pdata;
47644- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47645- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47646+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47647+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47648 struct xgbe_ring *ring = channel->rx_ring;
47649 struct xgbe_ring_data *rdata;
47650
47651@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47652 static int xgbe_tx_poll(struct xgbe_channel *channel)
47653 {
47654 struct xgbe_prv_data *pdata = channel->pdata;
47655- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47656- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47657+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47658+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47659 struct xgbe_ring *ring = channel->tx_ring;
47660 struct xgbe_ring_data *rdata;
47661 struct xgbe_ring_desc *rdesc;
47662@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
47663 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47664 {
47665 struct xgbe_prv_data *pdata = channel->pdata;
47666- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47667+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47668 struct xgbe_ring *ring = channel->rx_ring;
47669 struct xgbe_ring_data *rdata;
47670 struct xgbe_packet_data *packet;
47671diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47672index ebf4893..a8f51c6 100644
47673--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47674+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47675@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47676
47677 DBGPR("-->%s\n", __func__);
47678
47679- pdata->hw_if.read_mmc_stats(pdata);
47680+ pdata->hw_if->read_mmc_stats(pdata);
47681 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47682 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47683 *data++ = *(u64 *)stat;
47684@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47685 struct ethtool_coalesce *ec)
47686 {
47687 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47688- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47689+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47690 unsigned int riwt;
47691
47692 DBGPR("-->xgbe_get_coalesce\n");
47693@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47694 struct ethtool_coalesce *ec)
47695 {
47696 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47697- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47698+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47699 unsigned int rx_frames, rx_riwt, rx_usecs;
47700 unsigned int tx_frames, tx_usecs;
47701
47702@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
47703 const u8 *key, const u8 hfunc)
47704 {
47705 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47706- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47707+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47708 unsigned int ret;
47709
47710 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
47711diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47712index 32dd651..225cca3 100644
47713--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47714+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47715@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47716 DBGPR("<--xgbe_default_config\n");
47717 }
47718
47719-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47720-{
47721- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47722- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47723-}
47724-
47725 #ifdef CONFIG_ACPI
47726 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
47727 {
47728@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
47729 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
47730
47731 /* Set all the function pointers */
47732- xgbe_init_all_fptrs(pdata);
47733- hw_if = &pdata->hw_if;
47734- desc_if = &pdata->desc_if;
47735+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47736+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47737
47738 /* Issue software reset to device */
47739 hw_if->exit(pdata);
47740diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47741index 59e267f..0842a88 100644
47742--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47743+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47744@@ -126,7 +126,7 @@
47745 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47746 {
47747 struct xgbe_prv_data *pdata = mii->priv;
47748- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47749+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47750 int mmd_data;
47751
47752 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47753@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47754 u16 mmd_val)
47755 {
47756 struct xgbe_prv_data *pdata = mii->priv;
47757- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47758+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47759 int mmd_data = mmd_val;
47760
47761 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47762diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47763index f326178..8bd7daf 100644
47764--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47765+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47766@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47767 tstamp_cc);
47768 u64 nsec;
47769
47770- nsec = pdata->hw_if.get_tstamp_time(pdata);
47771+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47772
47773 return nsec;
47774 }
47775@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47776
47777 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47778
47779- pdata->hw_if.update_tstamp_addend(pdata, addend);
47780+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47781
47782 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47783
47784diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47785index 13e8f95..1d8beef 100644
47786--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47787+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47788@@ -675,8 +675,8 @@ struct xgbe_prv_data {
47789 int dev_irq;
47790 unsigned int per_channel_irq;
47791
47792- struct xgbe_hw_if hw_if;
47793- struct xgbe_desc_if desc_if;
47794+ struct xgbe_hw_if *hw_if;
47795+ struct xgbe_desc_if *desc_if;
47796
47797 /* AXI DMA settings */
47798 unsigned int coherent;
47799@@ -798,6 +798,9 @@ struct xgbe_prv_data {
47800 #endif
47801 };
47802
47803+extern const struct xgbe_hw_if default_xgbe_hw_if;
47804+extern const struct xgbe_desc_if default_xgbe_desc_if;
47805+
47806 /* Function prototypes*/
47807
47808 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47809diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47810index adcacda..fa6e0ae 100644
47811--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47812+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47813@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47814 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47815 {
47816 /* RX_MODE controlling object */
47817- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47818+ bnx2x_init_rx_mode_obj(bp);
47819
47820 /* multicast configuration controlling object */
47821 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47822diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47823index 07cdf9b..b08ecc7 100644
47824--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47825+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47826@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47827 return rc;
47828 }
47829
47830-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47831- struct bnx2x_rx_mode_obj *o)
47832+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47833 {
47834 if (CHIP_IS_E1x(bp)) {
47835- o->wait_comp = bnx2x_empty_rx_mode_wait;
47836- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47837+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47838+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47839 } else {
47840- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47841- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47842+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47843+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47844 }
47845 }
47846
47847diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47848index 86baecb..ff3bb46 100644
47849--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47850+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47851@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47852
47853 /********************* RX MODE ****************/
47854
47855-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47856- struct bnx2x_rx_mode_obj *o);
47857+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47858
47859 /**
47860 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47861diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47862index 31c9f82..e65e986 100644
47863--- a/drivers/net/ethernet/broadcom/tg3.h
47864+++ b/drivers/net/ethernet/broadcom/tg3.h
47865@@ -150,6 +150,7 @@
47866 #define CHIPREV_ID_5750_A0 0x4000
47867 #define CHIPREV_ID_5750_A1 0x4001
47868 #define CHIPREV_ID_5750_A3 0x4003
47869+#define CHIPREV_ID_5750_C1 0x4201
47870 #define CHIPREV_ID_5750_C2 0x4202
47871 #define CHIPREV_ID_5752_A0_HW 0x5000
47872 #define CHIPREV_ID_5752_A0 0x6000
47873diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47874index 903466e..b285864 100644
47875--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47876+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47877@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47878 }
47879
47880 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47881- bna_cb_ioceth_enable,
47882- bna_cb_ioceth_disable,
47883- bna_cb_ioceth_hbfail,
47884- bna_cb_ioceth_reset
47885+ .enable_cbfn = bna_cb_ioceth_enable,
47886+ .disable_cbfn = bna_cb_ioceth_disable,
47887+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47888+ .reset_cbfn = bna_cb_ioceth_reset
47889 };
47890
47891 static void bna_attr_init(struct bna_ioceth *ioceth)
47892diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47893index 8cffcdf..aadf043 100644
47894--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47895+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47896@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47897 */
47898 struct l2t_skb_cb {
47899 arp_failure_handler_func arp_failure_handler;
47900-};
47901+} __no_const;
47902
47903 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47904
47905diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47906index d929951..a2c23f5 100644
47907--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47908+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47909@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47910
47911 int i;
47912 struct adapter *ap = netdev2adap(dev);
47913- static const unsigned int *reg_ranges;
47914+ const unsigned int *reg_ranges;
47915 int arr_size = 0, buf_size = 0;
47916
47917 if (is_t4(ap->params.chip)) {
47918diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47919index badff18..e15c4ec 100644
47920--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47921+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47922@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47923 for (i=0; i<ETH_ALEN; i++) {
47924 tmp.addr[i] = dev->dev_addr[i];
47925 }
47926- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47927+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47928 break;
47929
47930 case DE4X5_SET_HWADDR: /* Set the hardware address */
47931@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47932 spin_lock_irqsave(&lp->lock, flags);
47933 memcpy(&statbuf, &lp->pktStats, ioc->len);
47934 spin_unlock_irqrestore(&lp->lock, flags);
47935- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47936+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47937 return -EFAULT;
47938 break;
47939 }
47940diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47941index e6b790f..051ba2d 100644
47942--- a/drivers/net/ethernet/emulex/benet/be_main.c
47943+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47944@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47945
47946 if (wrapped)
47947 newacc += 65536;
47948- ACCESS_ONCE(*acc) = newacc;
47949+ ACCESS_ONCE_RW(*acc) = newacc;
47950 }
47951
47952 static void populate_erx_stats(struct be_adapter *adapter,
47953diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47954index 6d0c5d5..55be363 100644
47955--- a/drivers/net/ethernet/faraday/ftgmac100.c
47956+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47957@@ -30,6 +30,8 @@
47958 #include <linux/netdevice.h>
47959 #include <linux/phy.h>
47960 #include <linux/platform_device.h>
47961+#include <linux/interrupt.h>
47962+#include <linux/irqreturn.h>
47963 #include <net/ip.h>
47964
47965 #include "ftgmac100.h"
47966diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47967index dce5f7b..2433466 100644
47968--- a/drivers/net/ethernet/faraday/ftmac100.c
47969+++ b/drivers/net/ethernet/faraday/ftmac100.c
47970@@ -31,6 +31,8 @@
47971 #include <linux/module.h>
47972 #include <linux/netdevice.h>
47973 #include <linux/platform_device.h>
47974+#include <linux/interrupt.h>
47975+#include <linux/irqreturn.h>
47976
47977 #include "ftmac100.h"
47978
47979diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47980index fabcfa1..188fd22 100644
47981--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47982+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47983@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
47984 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
47985
47986 /* Update the base adjustement value. */
47987- ACCESS_ONCE(pf->ptp_base_adj) = incval;
47988+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
47989 smp_mb(); /* Force the above update. */
47990 }
47991
47992diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47993index 79c00f5..8da39f6 100644
47994--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47995+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
47996@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
47997 }
47998
47999 /* update the base incval used to calculate frequency adjustment */
48000- ACCESS_ONCE(adapter->base_incval) = incval;
48001+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48002 smp_mb();
48003
48004 /* need lock to prevent incorrect read while modifying cyclecounter */
48005diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48006index 55f9f5c..18cc64b 100644
48007--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48008+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48009@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48010 wmb();
48011
48012 /* we want to dirty this cache line once */
48013- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48014- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48015+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48016+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48017
48018 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48019
48020diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48021index 6223930..975033d 100644
48022--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48023+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48024@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48025 struct __vxge_hw_fifo *fifo;
48026 struct vxge_hw_fifo_config *config;
48027 u32 txdl_size, txdl_per_memblock;
48028- struct vxge_hw_mempool_cbs fifo_mp_callback;
48029+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48030+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48031+ };
48032+
48033 struct __vxge_hw_virtualpath *vpath;
48034
48035 if ((vp == NULL) || (attr == NULL)) {
48036@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48037 goto exit;
48038 }
48039
48040- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48041-
48042 fifo->mempool =
48043 __vxge_hw_mempool_create(vpath->hldev,
48044 fifo->config->memblock_size,
48045diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48046index 2bb48d5..d1a865d 100644
48047--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48048+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48049@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48050 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48051 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48052 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48053- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48054+ pax_open_kernel();
48055+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48056+ pax_close_kernel();
48057 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48058 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48059 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48060diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48061index be7d7a6..a8983f8 100644
48062--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48063+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48064@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48065 case QLCNIC_NON_PRIV_FUNC:
48066 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48067 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48068- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48069+ pax_open_kernel();
48070+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48071+ pax_close_kernel();
48072 break;
48073 case QLCNIC_PRIV_FUNC:
48074 ahw->op_mode = QLCNIC_PRIV_FUNC;
48075 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48076- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48077+ pax_open_kernel();
48078+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48079+ pax_close_kernel();
48080 break;
48081 case QLCNIC_MGMT_FUNC:
48082 ahw->op_mode = QLCNIC_MGMT_FUNC;
48083 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48084- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48085+ pax_open_kernel();
48086+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48087+ pax_close_kernel();
48088 break;
48089 default:
48090 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48091diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48092index 332bb8a..e6adcd1 100644
48093--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48094+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48095@@ -1285,7 +1285,7 @@ flash_temp:
48096 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48097 {
48098 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48099- static const struct qlcnic_dump_operations *fw_dump_ops;
48100+ const struct qlcnic_dump_operations *fw_dump_ops;
48101 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48102 u32 entry_offset, dump, no_entries, buf_offset = 0;
48103 int i, k, ops_cnt, ops_index, dump_size = 0;
48104diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48105index c70ab40..00b28e0 100644
48106--- a/drivers/net/ethernet/realtek/r8169.c
48107+++ b/drivers/net/ethernet/realtek/r8169.c
48108@@ -788,22 +788,22 @@ struct rtl8169_private {
48109 struct mdio_ops {
48110 void (*write)(struct rtl8169_private *, int, int);
48111 int (*read)(struct rtl8169_private *, int);
48112- } mdio_ops;
48113+ } __no_const mdio_ops;
48114
48115 struct pll_power_ops {
48116 void (*down)(struct rtl8169_private *);
48117 void (*up)(struct rtl8169_private *);
48118- } pll_power_ops;
48119+ } __no_const pll_power_ops;
48120
48121 struct jumbo_ops {
48122 void (*enable)(struct rtl8169_private *);
48123 void (*disable)(struct rtl8169_private *);
48124- } jumbo_ops;
48125+ } __no_const jumbo_ops;
48126
48127 struct csi_ops {
48128 void (*write)(struct rtl8169_private *, int, int);
48129 u32 (*read)(struct rtl8169_private *, int);
48130- } csi_ops;
48131+ } __no_const csi_ops;
48132
48133 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48134 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48135diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48136index 6b861e3..204ac86 100644
48137--- a/drivers/net/ethernet/sfc/ptp.c
48138+++ b/drivers/net/ethernet/sfc/ptp.c
48139@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48140 ptp->start.dma_addr);
48141
48142 /* Clear flag that signals MC ready */
48143- ACCESS_ONCE(*start) = 0;
48144+ ACCESS_ONCE_RW(*start) = 0;
48145 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48146 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48147 EFX_BUG_ON_PARANOID(rc);
48148diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
48149index 10b6173..b605dfd5 100644
48150--- a/drivers/net/ethernet/sfc/selftest.c
48151+++ b/drivers/net/ethernet/sfc/selftest.c
48152@@ -46,7 +46,7 @@ struct efx_loopback_payload {
48153 struct iphdr ip;
48154 struct udphdr udp;
48155 __be16 iteration;
48156- const char msg[64];
48157+ char msg[64];
48158 } __packed;
48159
48160 /* Loopback test source MAC address */
48161diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48162index 08c483b..2c4a553 100644
48163--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48164+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48165@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48166
48167 writel(value, ioaddr + MMC_CNTRL);
48168
48169- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48170- MMC_CNTRL, value);
48171+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48172+// MMC_CNTRL, value);
48173 }
48174
48175 /* To mask all all interrupts.*/
48176diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
48177index 17e2766..c332f1e 100644
48178--- a/drivers/net/ethernet/via/via-rhine.c
48179+++ b/drivers/net/ethernet/via/via-rhine.c
48180@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
48181 }
48182 };
48183
48184-static struct dmi_system_id rhine_dmi_table[] __initdata = {
48185+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
48186 {
48187 .ident = "EPIA-M",
48188 .matches = {
48189diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48190index 384ca4f..dd7d4f9 100644
48191--- a/drivers/net/hyperv/hyperv_net.h
48192+++ b/drivers/net/hyperv/hyperv_net.h
48193@@ -171,7 +171,7 @@ struct rndis_device {
48194 enum rndis_device_state state;
48195 bool link_state;
48196 bool link_change;
48197- atomic_t new_req_id;
48198+ atomic_unchecked_t new_req_id;
48199
48200 spinlock_t request_lock;
48201 struct list_head req_list;
48202diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48203index 7816d98..7890614 100644
48204--- a/drivers/net/hyperv/rndis_filter.c
48205+++ b/drivers/net/hyperv/rndis_filter.c
48206@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48207 * template
48208 */
48209 set = &rndis_msg->msg.set_req;
48210- set->req_id = atomic_inc_return(&dev->new_req_id);
48211+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48212
48213 /* Add to the request list */
48214 spin_lock_irqsave(&dev->request_lock, flags);
48215@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48216
48217 /* Setup the rndis set */
48218 halt = &request->request_msg.msg.halt_req;
48219- halt->req_id = atomic_inc_return(&dev->new_req_id);
48220+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48221
48222 /* Ignore return since this msg is optional. */
48223 rndis_filter_send_request(dev, request);
48224diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48225index 34f846b..4a0d5b1 100644
48226--- a/drivers/net/ifb.c
48227+++ b/drivers/net/ifb.c
48228@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48229 return 0;
48230 }
48231
48232-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48233+static struct rtnl_link_ops ifb_link_ops = {
48234 .kind = "ifb",
48235 .priv_size = sizeof(struct ifb_private),
48236 .setup = ifb_setup,
48237diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48238index 1df38bd..4bc20b0 100644
48239--- a/drivers/net/macvlan.c
48240+++ b/drivers/net/macvlan.c
48241@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48242 free_nskb:
48243 kfree_skb(nskb);
48244 err:
48245- atomic_long_inc(&skb->dev->rx_dropped);
48246+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48247 }
48248
48249 static void macvlan_flush_sources(struct macvlan_port *port,
48250@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48251 int macvlan_link_register(struct rtnl_link_ops *ops)
48252 {
48253 /* common fields */
48254- ops->priv_size = sizeof(struct macvlan_dev);
48255- ops->validate = macvlan_validate;
48256- ops->maxtype = IFLA_MACVLAN_MAX;
48257- ops->policy = macvlan_policy;
48258- ops->changelink = macvlan_changelink;
48259- ops->get_size = macvlan_get_size;
48260- ops->fill_info = macvlan_fill_info;
48261+ pax_open_kernel();
48262+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48263+ *(void **)&ops->validate = macvlan_validate;
48264+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48265+ *(const void **)&ops->policy = macvlan_policy;
48266+ *(void **)&ops->changelink = macvlan_changelink;
48267+ *(void **)&ops->get_size = macvlan_get_size;
48268+ *(void **)&ops->fill_info = macvlan_fill_info;
48269+ pax_close_kernel();
48270
48271 return rtnl_link_register(ops);
48272 };
48273@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48274 return NOTIFY_DONE;
48275 }
48276
48277-static struct notifier_block macvlan_notifier_block __read_mostly = {
48278+static struct notifier_block macvlan_notifier_block = {
48279 .notifier_call = macvlan_device_event,
48280 };
48281
48282diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48283index 27ecc5c..f636328 100644
48284--- a/drivers/net/macvtap.c
48285+++ b/drivers/net/macvtap.c
48286@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48287 dev->tx_queue_len = TUN_READQ_SIZE;
48288 }
48289
48290-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48291+static struct rtnl_link_ops macvtap_link_ops = {
48292 .kind = "macvtap",
48293 .setup = macvtap_setup,
48294 .newlink = macvtap_newlink,
48295@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48296
48297 ret = 0;
48298 u = q->flags;
48299- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48300+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48301 put_user(u, &ifr->ifr_flags))
48302 ret = -EFAULT;
48303 macvtap_put_vlan(vlan);
48304@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48305 return NOTIFY_DONE;
48306 }
48307
48308-static struct notifier_block macvtap_notifier_block __read_mostly = {
48309+static struct notifier_block macvtap_notifier_block = {
48310 .notifier_call = macvtap_device_event,
48311 };
48312
48313diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48314index 34924df..a747360 100644
48315--- a/drivers/net/nlmon.c
48316+++ b/drivers/net/nlmon.c
48317@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48318 return 0;
48319 }
48320
48321-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48322+static struct rtnl_link_ops nlmon_link_ops = {
48323 .kind = "nlmon",
48324 .priv_size = sizeof(struct nlmon),
48325 .setup = nlmon_setup,
48326diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48327index bdfe51f..e7845c7 100644
48328--- a/drivers/net/phy/phy_device.c
48329+++ b/drivers/net/phy/phy_device.c
48330@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48331 * zero on success.
48332 *
48333 */
48334-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48335+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48336 struct phy_c45_device_ids *c45_ids) {
48337 int phy_reg;
48338 int i, reg_addr;
48339@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48340 * its return value is in turn returned.
48341 *
48342 */
48343-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48344+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48345 bool is_c45, struct phy_c45_device_ids *c45_ids)
48346 {
48347 int phy_reg;
48348@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48349 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48350 {
48351 struct phy_c45_device_ids c45_ids = {0};
48352- u32 phy_id = 0;
48353+ int phy_id = 0;
48354 int r;
48355
48356 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48357diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48358index af034db..1611c0b2 100644
48359--- a/drivers/net/ppp/ppp_generic.c
48360+++ b/drivers/net/ppp/ppp_generic.c
48361@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48362 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48363 struct ppp_stats stats;
48364 struct ppp_comp_stats cstats;
48365- char *vers;
48366
48367 switch (cmd) {
48368 case SIOCGPPPSTATS:
48369@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48370 break;
48371
48372 case SIOCGPPPVER:
48373- vers = PPP_VERSION;
48374- if (copy_to_user(addr, vers, strlen(vers) + 1))
48375+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48376 break;
48377 err = 0;
48378 break;
48379diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48380index 079f7ad..b2a2bfa7 100644
48381--- a/drivers/net/slip/slhc.c
48382+++ b/drivers/net/slip/slhc.c
48383@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48384 register struct tcphdr *thp;
48385 register struct iphdr *ip;
48386 register struct cstate *cs;
48387- int len, hdrlen;
48388+ long len, hdrlen;
48389 unsigned char *cp = icp;
48390
48391 /* We've got a compressed packet; read the change byte */
48392diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48393index 7d39484..d58499d 100644
48394--- a/drivers/net/team/team.c
48395+++ b/drivers/net/team/team.c
48396@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
48397 return TEAM_DEFAULT_NUM_RX_QUEUES;
48398 }
48399
48400-static struct rtnl_link_ops team_link_ops __read_mostly = {
48401+static struct rtnl_link_ops team_link_ops = {
48402 .kind = DRV_NAME,
48403 .priv_size = sizeof(struct team),
48404 .setup = team_setup,
48405@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
48406 return NOTIFY_DONE;
48407 }
48408
48409-static struct notifier_block team_notifier_block __read_mostly = {
48410+static struct notifier_block team_notifier_block = {
48411 .notifier_call = team_device_event,
48412 };
48413
48414diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48415index 857dca4..642f532 100644
48416--- a/drivers/net/tun.c
48417+++ b/drivers/net/tun.c
48418@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48419 return -EINVAL;
48420 }
48421
48422-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48423+static struct rtnl_link_ops tun_link_ops = {
48424 .kind = DRV_NAME,
48425 .priv_size = sizeof(struct tun_struct),
48426 .setup = tun_setup,
48427@@ -1830,7 +1830,7 @@ unlock:
48428 }
48429
48430 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48431- unsigned long arg, int ifreq_len)
48432+ unsigned long arg, size_t ifreq_len)
48433 {
48434 struct tun_file *tfile = file->private_data;
48435 struct tun_struct *tun;
48436@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48437 int le;
48438 int ret;
48439
48440+ if (ifreq_len > sizeof ifr)
48441+ return -EFAULT;
48442+
48443 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48444 if (copy_from_user(&ifr, argp, ifreq_len))
48445 return -EFAULT;
48446diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48447index 778e915..58c4d95 100644
48448--- a/drivers/net/usb/hso.c
48449+++ b/drivers/net/usb/hso.c
48450@@ -70,7 +70,7 @@
48451 #include <asm/byteorder.h>
48452 #include <linux/serial_core.h>
48453 #include <linux/serial.h>
48454-
48455+#include <asm/local.h>
48456
48457 #define MOD_AUTHOR "Option Wireless"
48458 #define MOD_DESCRIPTION "USB High Speed Option driver"
48459@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48460 struct urb *urb;
48461
48462 urb = serial->rx_urb[0];
48463- if (serial->port.count > 0) {
48464+ if (atomic_read(&serial->port.count) > 0) {
48465 count = put_rxbuf_data(urb, serial);
48466 if (count == -1)
48467 return;
48468@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48469 DUMP1(urb->transfer_buffer, urb->actual_length);
48470
48471 /* Anyone listening? */
48472- if (serial->port.count == 0)
48473+ if (atomic_read(&serial->port.count) == 0)
48474 return;
48475
48476 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48477@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48478 tty_port_tty_set(&serial->port, tty);
48479
48480 /* check for port already opened, if not set the termios */
48481- serial->port.count++;
48482- if (serial->port.count == 1) {
48483+ if (atomic_inc_return(&serial->port.count) == 1) {
48484 serial->rx_state = RX_IDLE;
48485 /* Force default termio settings */
48486 _hso_serial_set_termios(tty, NULL);
48487@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48488 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48489 if (result) {
48490 hso_stop_serial_device(serial->parent);
48491- serial->port.count--;
48492+ atomic_dec(&serial->port.count);
48493 } else {
48494 kref_get(&serial->parent->ref);
48495 }
48496@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48497
48498 /* reset the rts and dtr */
48499 /* do the actual close */
48500- serial->port.count--;
48501+ atomic_dec(&serial->port.count);
48502
48503- if (serial->port.count <= 0) {
48504- serial->port.count = 0;
48505+ if (atomic_read(&serial->port.count) <= 0) {
48506+ atomic_set(&serial->port.count, 0);
48507 tty_port_tty_set(&serial->port, NULL);
48508 if (!usb_gone)
48509 hso_stop_serial_device(serial->parent);
48510@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48511
48512 /* the actual setup */
48513 spin_lock_irqsave(&serial->serial_lock, flags);
48514- if (serial->port.count)
48515+ if (atomic_read(&serial->port.count))
48516 _hso_serial_set_termios(tty, old);
48517 else
48518 tty->termios = *old;
48519@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
48520 D1("Pending read interrupt on port %d\n", i);
48521 spin_lock(&serial->serial_lock);
48522 if (serial->rx_state == RX_IDLE &&
48523- serial->port.count > 0) {
48524+ atomic_read(&serial->port.count) > 0) {
48525 /* Setup and send a ctrl req read on
48526 * port i */
48527 if (!serial->rx_urb_filled[0]) {
48528@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
48529 /* Start all serial ports */
48530 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48531 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48532- if (dev2ser(serial_table[i])->port.count) {
48533+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48534 result =
48535 hso_start_serial_device(serial_table[i], GFP_NOIO);
48536 hso_kick_transmit(dev2ser(serial_table[i]));
48537diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48538index 9f7c0ab..1577b4a 100644
48539--- a/drivers/net/usb/r8152.c
48540+++ b/drivers/net/usb/r8152.c
48541@@ -601,7 +601,7 @@ struct r8152 {
48542 void (*unload)(struct r8152 *);
48543 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48544 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48545- } rtl_ops;
48546+ } __no_const rtl_ops;
48547
48548 int intr_interval;
48549 u32 saved_wolopts;
48550diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48551index a2515887..6d13233 100644
48552--- a/drivers/net/usb/sierra_net.c
48553+++ b/drivers/net/usb/sierra_net.c
48554@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48555 /* atomic counter partially included in MAC address to make sure 2 devices
48556 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48557 */
48558-static atomic_t iface_counter = ATOMIC_INIT(0);
48559+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48560
48561 /*
48562 * SYNC Timer Delay definition used to set the expiry time
48563@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48564 dev->net->netdev_ops = &sierra_net_device_ops;
48565
48566 /* change MAC addr to include, ifacenum, and to be unique */
48567- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48568+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48569 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48570
48571 /* we will have to manufacture ethernet headers, prepare template */
48572diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48573index 59b0e97..a6ed579 100644
48574--- a/drivers/net/virtio_net.c
48575+++ b/drivers/net/virtio_net.c
48576@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48577 #define RECEIVE_AVG_WEIGHT 64
48578
48579 /* Minimum alignment for mergeable packet buffers. */
48580-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48581+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48582
48583 #define VIRTNET_DRIVER_VERSION "1.0.0"
48584
48585diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48586index fceb637..37c70fd 100644
48587--- a/drivers/net/vxlan.c
48588+++ b/drivers/net/vxlan.c
48589@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
48590 return vxlan->net;
48591 }
48592
48593-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48594+static struct rtnl_link_ops vxlan_link_ops = {
48595 .kind = "vxlan",
48596 .maxtype = IFLA_VXLAN_MAX,
48597 .policy = vxlan_policy,
48598@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48599 return NOTIFY_DONE;
48600 }
48601
48602-static struct notifier_block vxlan_notifier_block __read_mostly = {
48603+static struct notifier_block vxlan_notifier_block = {
48604 .notifier_call = vxlan_lowerdev_event,
48605 };
48606
48607diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48608index 5920c99..ff2e4a5 100644
48609--- a/drivers/net/wan/lmc/lmc_media.c
48610+++ b/drivers/net/wan/lmc/lmc_media.c
48611@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48612 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48613
48614 lmc_media_t lmc_ds3_media = {
48615- lmc_ds3_init, /* special media init stuff */
48616- lmc_ds3_default, /* reset to default state */
48617- lmc_ds3_set_status, /* reset status to state provided */
48618- lmc_dummy_set_1, /* set clock source */
48619- lmc_dummy_set2_1, /* set line speed */
48620- lmc_ds3_set_100ft, /* set cable length */
48621- lmc_ds3_set_scram, /* set scrambler */
48622- lmc_ds3_get_link_status, /* get link status */
48623- lmc_dummy_set_1, /* set link status */
48624- lmc_ds3_set_crc_length, /* set CRC length */
48625- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48626- lmc_ds3_watchdog
48627+ .init = lmc_ds3_init, /* special media init stuff */
48628+ .defaults = lmc_ds3_default, /* reset to default state */
48629+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48630+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48631+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48632+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48633+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48634+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48635+ .set_link_status = lmc_dummy_set_1, /* set link status */
48636+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48637+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48638+ .watchdog = lmc_ds3_watchdog
48639 };
48640
48641 lmc_media_t lmc_hssi_media = {
48642- lmc_hssi_init, /* special media init stuff */
48643- lmc_hssi_default, /* reset to default state */
48644- lmc_hssi_set_status, /* reset status to state provided */
48645- lmc_hssi_set_clock, /* set clock source */
48646- lmc_dummy_set2_1, /* set line speed */
48647- lmc_dummy_set_1, /* set cable length */
48648- lmc_dummy_set_1, /* set scrambler */
48649- lmc_hssi_get_link_status, /* get link status */
48650- lmc_hssi_set_link_status, /* set link status */
48651- lmc_hssi_set_crc_length, /* set CRC length */
48652- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48653- lmc_hssi_watchdog
48654+ .init = lmc_hssi_init, /* special media init stuff */
48655+ .defaults = lmc_hssi_default, /* reset to default state */
48656+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48657+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48658+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48659+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48660+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48661+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48662+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48663+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48664+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48665+ .watchdog = lmc_hssi_watchdog
48666 };
48667
48668-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48669- lmc_ssi_default, /* reset to default state */
48670- lmc_ssi_set_status, /* reset status to state provided */
48671- lmc_ssi_set_clock, /* set clock source */
48672- lmc_ssi_set_speed, /* set line speed */
48673- lmc_dummy_set_1, /* set cable length */
48674- lmc_dummy_set_1, /* set scrambler */
48675- lmc_ssi_get_link_status, /* get link status */
48676- lmc_ssi_set_link_status, /* set link status */
48677- lmc_ssi_set_crc_length, /* set CRC length */
48678- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48679- lmc_ssi_watchdog
48680+lmc_media_t lmc_ssi_media = {
48681+ .init = lmc_ssi_init, /* special media init stuff */
48682+ .defaults = lmc_ssi_default, /* reset to default state */
48683+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48684+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48685+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48686+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48687+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48688+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48689+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48690+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48691+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48692+ .watchdog = lmc_ssi_watchdog
48693 };
48694
48695 lmc_media_t lmc_t1_media = {
48696- lmc_t1_init, /* special media init stuff */
48697- lmc_t1_default, /* reset to default state */
48698- lmc_t1_set_status, /* reset status to state provided */
48699- lmc_t1_set_clock, /* set clock source */
48700- lmc_dummy_set2_1, /* set line speed */
48701- lmc_dummy_set_1, /* set cable length */
48702- lmc_dummy_set_1, /* set scrambler */
48703- lmc_t1_get_link_status, /* get link status */
48704- lmc_dummy_set_1, /* set link status */
48705- lmc_t1_set_crc_length, /* set CRC length */
48706- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48707- lmc_t1_watchdog
48708+ .init = lmc_t1_init, /* special media init stuff */
48709+ .defaults = lmc_t1_default, /* reset to default state */
48710+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48711+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48712+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48713+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48714+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48715+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48716+ .set_link_status = lmc_dummy_set_1, /* set link status */
48717+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48718+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48719+ .watchdog = lmc_t1_watchdog
48720 };
48721
48722 static void
48723diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48724index feacc3b..5bac0de 100644
48725--- a/drivers/net/wan/z85230.c
48726+++ b/drivers/net/wan/z85230.c
48727@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48728
48729 struct z8530_irqhandler z8530_sync =
48730 {
48731- z8530_rx,
48732- z8530_tx,
48733- z8530_status
48734+ .rx = z8530_rx,
48735+ .tx = z8530_tx,
48736+ .status = z8530_status
48737 };
48738
48739 EXPORT_SYMBOL(z8530_sync);
48740@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48741 }
48742
48743 static struct z8530_irqhandler z8530_dma_sync = {
48744- z8530_dma_rx,
48745- z8530_dma_tx,
48746- z8530_dma_status
48747+ .rx = z8530_dma_rx,
48748+ .tx = z8530_dma_tx,
48749+ .status = z8530_dma_status
48750 };
48751
48752 static struct z8530_irqhandler z8530_txdma_sync = {
48753- z8530_rx,
48754- z8530_dma_tx,
48755- z8530_dma_status
48756+ .rx = z8530_rx,
48757+ .tx = z8530_dma_tx,
48758+ .status = z8530_dma_status
48759 };
48760
48761 /**
48762@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48763
48764 struct z8530_irqhandler z8530_nop=
48765 {
48766- z8530_rx_clear,
48767- z8530_tx_clear,
48768- z8530_status_clear
48769+ .rx = z8530_rx_clear,
48770+ .tx = z8530_tx_clear,
48771+ .status = z8530_status_clear
48772 };
48773
48774
48775diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48776index 0b60295..b8bfa5b 100644
48777--- a/drivers/net/wimax/i2400m/rx.c
48778+++ b/drivers/net/wimax/i2400m/rx.c
48779@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48780 if (i2400m->rx_roq == NULL)
48781 goto error_roq_alloc;
48782
48783- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48784+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48785 GFP_KERNEL);
48786 if (rd == NULL) {
48787 result = -ENOMEM;
48788diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48789index e71a2ce..2268d61 100644
48790--- a/drivers/net/wireless/airo.c
48791+++ b/drivers/net/wireless/airo.c
48792@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48793 struct airo_info *ai = dev->ml_priv;
48794 int ridcode;
48795 int enabled;
48796- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48797+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48798 unsigned char *iobuf;
48799
48800 /* Only super-user can write RIDs */
48801diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48802index da92bfa..5a9001a 100644
48803--- a/drivers/net/wireless/at76c50x-usb.c
48804+++ b/drivers/net/wireless/at76c50x-usb.c
48805@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48806 }
48807
48808 /* Convert timeout from the DFU status to jiffies */
48809-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48810+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48811 {
48812 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48813 | (s->poll_timeout[1] << 8)
48814diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48815index 2fd9e18..3f55bdd 100644
48816--- a/drivers/net/wireless/ath/ath10k/htc.c
48817+++ b/drivers/net/wireless/ath/ath10k/htc.c
48818@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48819 /* registered target arrival callback from the HIF layer */
48820 int ath10k_htc_init(struct ath10k *ar)
48821 {
48822- struct ath10k_hif_cb htc_callbacks;
48823+ static struct ath10k_hif_cb htc_callbacks = {
48824+ .rx_completion = ath10k_htc_rx_completion_handler,
48825+ .tx_completion = ath10k_htc_tx_completion_handler,
48826+ };
48827 struct ath10k_htc_ep *ep = NULL;
48828 struct ath10k_htc *htc = &ar->htc;
48829
48830@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
48831 ath10k_htc_reset_endpoint_states(htc);
48832
48833 /* setup HIF layer callbacks */
48834- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48835- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48836 htc->ar = ar;
48837
48838 /* Get HIF default pipe for HTC message exchange */
48839diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48840index 527179c..a890150 100644
48841--- a/drivers/net/wireless/ath/ath10k/htc.h
48842+++ b/drivers/net/wireless/ath/ath10k/htc.h
48843@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48844
48845 struct ath10k_htc_ops {
48846 void (*target_send_suspend_complete)(struct ath10k *ar);
48847-};
48848+} __no_const;
48849
48850 struct ath10k_htc_ep_ops {
48851 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48852 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48853 void (*ep_tx_credits)(struct ath10k *);
48854-};
48855+} __no_const;
48856
48857 /* service connection information */
48858 struct ath10k_htc_svc_conn_req {
48859diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48860index f816909..e56cd8b 100644
48861--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48862+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48863@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48864 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48865 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48866
48867- ACCESS_ONCE(ads->ds_link) = i->link;
48868- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48869+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48870+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48871
48872 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48873 ctl6 = SM(i->keytype, AR_EncrType);
48874@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48875
48876 if ((i->is_first || i->is_last) &&
48877 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48878- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48879+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48880 | set11nTries(i->rates, 1)
48881 | set11nTries(i->rates, 2)
48882 | set11nTries(i->rates, 3)
48883 | (i->dur_update ? AR_DurUpdateEna : 0)
48884 | SM(0, AR_BurstDur);
48885
48886- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48887+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48888 | set11nRate(i->rates, 1)
48889 | set11nRate(i->rates, 2)
48890 | set11nRate(i->rates, 3);
48891 } else {
48892- ACCESS_ONCE(ads->ds_ctl2) = 0;
48893- ACCESS_ONCE(ads->ds_ctl3) = 0;
48894+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48895+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48896 }
48897
48898 if (!i->is_first) {
48899- ACCESS_ONCE(ads->ds_ctl0) = 0;
48900- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48901- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48902+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48903+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48904+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48905 return;
48906 }
48907
48908@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48909 break;
48910 }
48911
48912- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48913+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48914 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48915 | SM(i->txpower[0], AR_XmitPower0)
48916 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48917@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48918 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48919 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48920
48921- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48922- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48923+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48924+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48925
48926 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48927 return;
48928
48929- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48930+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48931 | set11nPktDurRTSCTS(i->rates, 1);
48932
48933- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48934+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48935 | set11nPktDurRTSCTS(i->rates, 3);
48936
48937- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48938+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48939 | set11nRateFlags(i->rates, 1)
48940 | set11nRateFlags(i->rates, 2)
48941 | set11nRateFlags(i->rates, 3)
48942 | SM(i->rtscts_rate, AR_RTSCTSRate);
48943
48944- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48945- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48946- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48947+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48948+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48949+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48950 }
48951
48952 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48953diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48954index da84b70..83e4978 100644
48955--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48956+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48957@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48958 (i->qcu << AR_TxQcuNum_S) | desc_len;
48959
48960 checksum += val;
48961- ACCESS_ONCE(ads->info) = val;
48962+ ACCESS_ONCE_RW(ads->info) = val;
48963
48964 checksum += i->link;
48965- ACCESS_ONCE(ads->link) = i->link;
48966+ ACCESS_ONCE_RW(ads->link) = i->link;
48967
48968 checksum += i->buf_addr[0];
48969- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48970+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48971 checksum += i->buf_addr[1];
48972- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48973+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48974 checksum += i->buf_addr[2];
48975- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48976+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48977 checksum += i->buf_addr[3];
48978- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48979+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48980
48981 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48982- ACCESS_ONCE(ads->ctl3) = val;
48983+ ACCESS_ONCE_RW(ads->ctl3) = val;
48984 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
48985- ACCESS_ONCE(ads->ctl5) = val;
48986+ ACCESS_ONCE_RW(ads->ctl5) = val;
48987 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
48988- ACCESS_ONCE(ads->ctl7) = val;
48989+ ACCESS_ONCE_RW(ads->ctl7) = val;
48990 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
48991- ACCESS_ONCE(ads->ctl9) = val;
48992+ ACCESS_ONCE_RW(ads->ctl9) = val;
48993
48994 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
48995- ACCESS_ONCE(ads->ctl10) = checksum;
48996+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
48997
48998 if (i->is_first || i->is_last) {
48999- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49000+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49001 | set11nTries(i->rates, 1)
49002 | set11nTries(i->rates, 2)
49003 | set11nTries(i->rates, 3)
49004 | (i->dur_update ? AR_DurUpdateEna : 0)
49005 | SM(0, AR_BurstDur);
49006
49007- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49008+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49009 | set11nRate(i->rates, 1)
49010 | set11nRate(i->rates, 2)
49011 | set11nRate(i->rates, 3);
49012 } else {
49013- ACCESS_ONCE(ads->ctl13) = 0;
49014- ACCESS_ONCE(ads->ctl14) = 0;
49015+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49016+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49017 }
49018
49019 ads->ctl20 = 0;
49020@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49021
49022 ctl17 = SM(i->keytype, AR_EncrType);
49023 if (!i->is_first) {
49024- ACCESS_ONCE(ads->ctl11) = 0;
49025- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49026- ACCESS_ONCE(ads->ctl15) = 0;
49027- ACCESS_ONCE(ads->ctl16) = 0;
49028- ACCESS_ONCE(ads->ctl17) = ctl17;
49029- ACCESS_ONCE(ads->ctl18) = 0;
49030- ACCESS_ONCE(ads->ctl19) = 0;
49031+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49032+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49033+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49034+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49035+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49036+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49037+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49038 return;
49039 }
49040
49041- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49042+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49043 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49044 | SM(i->txpower[0], AR_XmitPower0)
49045 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49046@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49047 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49048 ctl12 |= SM(val, AR_PAPRDChainMask);
49049
49050- ACCESS_ONCE(ads->ctl12) = ctl12;
49051- ACCESS_ONCE(ads->ctl17) = ctl17;
49052+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49053+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49054
49055- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49056+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49057 | set11nPktDurRTSCTS(i->rates, 1);
49058
49059- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49060+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49061 | set11nPktDurRTSCTS(i->rates, 3);
49062
49063- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49064+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49065 | set11nRateFlags(i->rates, 1)
49066 | set11nRateFlags(i->rates, 2)
49067 | set11nRateFlags(i->rates, 3)
49068 | SM(i->rtscts_rate, AR_RTSCTSRate);
49069
49070- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49071+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49072
49073- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49074- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49075- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49076+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49077+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49078+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49079 }
49080
49081 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49082diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49083index e82e570..8c3cf90 100644
49084--- a/drivers/net/wireless/ath/ath9k/hw.h
49085+++ b/drivers/net/wireless/ath/ath9k/hw.h
49086@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
49087
49088 /* ANI */
49089 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49090-};
49091+} __no_const;
49092
49093 /**
49094 * struct ath_spec_scan - parameters for Atheros spectral scan
49095@@ -722,7 +722,7 @@ struct ath_hw_ops {
49096 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49097 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49098 #endif
49099-};
49100+} __no_const;
49101
49102 struct ath_nf_limits {
49103 s16 max;
49104diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49105index 9ede991..a8f08fb 100644
49106--- a/drivers/net/wireless/ath/ath9k/main.c
49107+++ b/drivers/net/wireless/ath/ath9k/main.c
49108@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
49109 if (!ath9k_is_chanctx_enabled())
49110 return;
49111
49112- ath9k_ops.hw_scan = ath9k_hw_scan;
49113- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49114- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49115- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49116- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49117- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49118- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49119- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49120- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49121- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49122+ pax_open_kernel();
49123+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49124+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49125+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49126+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49127+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49128+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49129+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49130+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49131+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49132+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49133+ pax_close_kernel();
49134 }
49135
49136 #endif
49137diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49138index 058a9f2..d5cb1ba 100644
49139--- a/drivers/net/wireless/b43/phy_lp.c
49140+++ b/drivers/net/wireless/b43/phy_lp.c
49141@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49142 {
49143 struct ssb_bus *bus = dev->dev->sdev->bus;
49144
49145- static const struct b206x_channel *chandata = NULL;
49146+ const struct b206x_channel *chandata = NULL;
49147 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49148 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49149 u16 old_comm15, scale;
49150diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49151index e566580..2c218ca 100644
49152--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49153+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49154@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49155 */
49156 if (il3945_mod_params.disable_hw_scan) {
49157 D_INFO("Disabling hw_scan\n");
49158- il3945_mac_ops.hw_scan = NULL;
49159+ pax_open_kernel();
49160+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49161+ pax_close_kernel();
49162 }
49163
49164 D_INFO("*** LOAD DRIVER ***\n");
49165diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49166index 0ffb6ff..c0b7f0e 100644
49167--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49168+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49169@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49170 {
49171 struct iwl_priv *priv = file->private_data;
49172 char buf[64];
49173- int buf_size;
49174+ size_t buf_size;
49175 u32 offset, len;
49176
49177 memset(buf, 0, sizeof(buf));
49178@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49179 struct iwl_priv *priv = file->private_data;
49180
49181 char buf[8];
49182- int buf_size;
49183+ size_t buf_size;
49184 u32 reset_flag;
49185
49186 memset(buf, 0, sizeof(buf));
49187@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49188 {
49189 struct iwl_priv *priv = file->private_data;
49190 char buf[8];
49191- int buf_size;
49192+ size_t buf_size;
49193 int ht40;
49194
49195 memset(buf, 0, sizeof(buf));
49196@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49197 {
49198 struct iwl_priv *priv = file->private_data;
49199 char buf[8];
49200- int buf_size;
49201+ size_t buf_size;
49202 int value;
49203
49204 memset(buf, 0, sizeof(buf));
49205@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49206 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49207 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49208
49209-static const char *fmt_value = " %-30s %10u\n";
49210-static const char *fmt_hex = " %-30s 0x%02X\n";
49211-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49212-static const char *fmt_header =
49213+static const char fmt_value[] = " %-30s %10u\n";
49214+static const char fmt_hex[] = " %-30s 0x%02X\n";
49215+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49216+static const char fmt_header[] =
49217 "%-32s current cumulative delta max\n";
49218
49219 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49220@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49221 {
49222 struct iwl_priv *priv = file->private_data;
49223 char buf[8];
49224- int buf_size;
49225+ size_t buf_size;
49226 int clear;
49227
49228 memset(buf, 0, sizeof(buf));
49229@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49230 {
49231 struct iwl_priv *priv = file->private_data;
49232 char buf[8];
49233- int buf_size;
49234+ size_t buf_size;
49235 int trace;
49236
49237 memset(buf, 0, sizeof(buf));
49238@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49239 {
49240 struct iwl_priv *priv = file->private_data;
49241 char buf[8];
49242- int buf_size;
49243+ size_t buf_size;
49244 int missed;
49245
49246 memset(buf, 0, sizeof(buf));
49247@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49248
49249 struct iwl_priv *priv = file->private_data;
49250 char buf[8];
49251- int buf_size;
49252+ size_t buf_size;
49253 int plcp;
49254
49255 memset(buf, 0, sizeof(buf));
49256@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49257
49258 struct iwl_priv *priv = file->private_data;
49259 char buf[8];
49260- int buf_size;
49261+ size_t buf_size;
49262 int flush;
49263
49264 memset(buf, 0, sizeof(buf));
49265@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49266
49267 struct iwl_priv *priv = file->private_data;
49268 char buf[8];
49269- int buf_size;
49270+ size_t buf_size;
49271 int rts;
49272
49273 if (!priv->cfg->ht_params)
49274@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49275 {
49276 struct iwl_priv *priv = file->private_data;
49277 char buf[8];
49278- int buf_size;
49279+ size_t buf_size;
49280
49281 memset(buf, 0, sizeof(buf));
49282 buf_size = min(count, sizeof(buf) - 1);
49283@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49284 struct iwl_priv *priv = file->private_data;
49285 u32 event_log_flag;
49286 char buf[8];
49287- int buf_size;
49288+ size_t buf_size;
49289
49290 /* check that the interface is up */
49291 if (!iwl_is_ready(priv))
49292@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49293 struct iwl_priv *priv = file->private_data;
49294 char buf[8];
49295 u32 calib_disabled;
49296- int buf_size;
49297+ size_t buf_size;
49298
49299 memset(buf, 0, sizeof(buf));
49300 buf_size = min(count, sizeof(buf) - 1);
49301diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49302index 69935aa..c1ca128 100644
49303--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49304+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49305@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49306 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49307
49308 char buf[8];
49309- int buf_size;
49310+ size_t buf_size;
49311 u32 reset_flag;
49312
49313 memset(buf, 0, sizeof(buf));
49314@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49315 {
49316 struct iwl_trans *trans = file->private_data;
49317 char buf[8];
49318- int buf_size;
49319+ size_t buf_size;
49320 int csr;
49321
49322 memset(buf, 0, sizeof(buf));
49323diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49324index 8908be6..fe97ddd 100644
49325--- a/drivers/net/wireless/mac80211_hwsim.c
49326+++ b/drivers/net/wireless/mac80211_hwsim.c
49327@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
49328 if (channels < 1)
49329 return -EINVAL;
49330
49331- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49332- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49333- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49334- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49335- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49336- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49337- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49338- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49339- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49340- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49341- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49342- mac80211_hwsim_assign_vif_chanctx;
49343- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49344- mac80211_hwsim_unassign_vif_chanctx;
49345+ pax_open_kernel();
49346+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49347+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49348+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49349+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49350+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49351+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49352+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49353+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49354+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49355+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49356+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49357+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49358+ pax_close_kernel();
49359
49360 spin_lock_init(&hwsim_radio_lock);
49361 INIT_LIST_HEAD(&hwsim_radios);
49362diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49363index 60d44ce..884dd1c 100644
49364--- a/drivers/net/wireless/rndis_wlan.c
49365+++ b/drivers/net/wireless/rndis_wlan.c
49366@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49367
49368 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49369
49370- if (rts_threshold < 0 || rts_threshold > 2347)
49371+ if (rts_threshold > 2347)
49372 rts_threshold = 2347;
49373
49374 tmp = cpu_to_le32(rts_threshold);
49375diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49376index 9bb398b..b0cc047 100644
49377--- a/drivers/net/wireless/rt2x00/rt2x00.h
49378+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49379@@ -375,7 +375,7 @@ struct rt2x00_intf {
49380 * for hardware which doesn't support hardware
49381 * sequence counting.
49382 */
49383- atomic_t seqno;
49384+ atomic_unchecked_t seqno;
49385 };
49386
49387 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49388diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49389index 68b620b..92ecd9e 100644
49390--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49391+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49392@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49393 * sequence counter given by mac80211.
49394 */
49395 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49396- seqno = atomic_add_return(0x10, &intf->seqno);
49397+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49398 else
49399- seqno = atomic_read(&intf->seqno);
49400+ seqno = atomic_read_unchecked(&intf->seqno);
49401
49402 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49403 hdr->seq_ctrl |= cpu_to_le16(seqno);
49404diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49405index b661f896..ddf7d2b 100644
49406--- a/drivers/net/wireless/ti/wl1251/sdio.c
49407+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49408@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49409
49410 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49411
49412- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49413- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49414+ pax_open_kernel();
49415+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49416+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49417+ pax_close_kernel();
49418
49419 wl1251_info("using dedicated interrupt line");
49420 } else {
49421- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49422- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49423+ pax_open_kernel();
49424+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49425+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49426+ pax_close_kernel();
49427
49428 wl1251_info("using SDIO interrupt");
49429 }
49430diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49431index 144d1f8..7030936 100644
49432--- a/drivers/net/wireless/ti/wl12xx/main.c
49433+++ b/drivers/net/wireless/ti/wl12xx/main.c
49434@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49435 sizeof(wl->conf.mem));
49436
49437 /* read data preparation is only needed by wl127x */
49438- wl->ops->prepare_read = wl127x_prepare_read;
49439+ pax_open_kernel();
49440+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49441+ pax_close_kernel();
49442
49443 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49444 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49445@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49446 sizeof(wl->conf.mem));
49447
49448 /* read data preparation is only needed by wl127x */
49449- wl->ops->prepare_read = wl127x_prepare_read;
49450+ pax_open_kernel();
49451+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49452+ pax_close_kernel();
49453
49454 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49455 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49456diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49457index 717c4f5..a813aeb 100644
49458--- a/drivers/net/wireless/ti/wl18xx/main.c
49459+++ b/drivers/net/wireless/ti/wl18xx/main.c
49460@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49461 }
49462
49463 if (!checksum_param) {
49464- wl18xx_ops.set_rx_csum = NULL;
49465- wl18xx_ops.init_vif = NULL;
49466+ pax_open_kernel();
49467+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49468+ *(void **)&wl18xx_ops.init_vif = NULL;
49469+ pax_close_kernel();
49470 }
49471
49472 /* Enable 11a Band only if we have 5G antennas */
49473diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49474index a912dc0..a8225ba 100644
49475--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49476+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49477@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49478 {
49479 struct zd_usb *usb = urb->context;
49480 struct zd_usb_interrupt *intr = &usb->intr;
49481- int len;
49482+ unsigned int len;
49483 u16 int_num;
49484
49485 ZD_ASSERT(in_interrupt());
49486diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49487index ce2e2cf..f81e500 100644
49488--- a/drivers/nfc/nfcwilink.c
49489+++ b/drivers/nfc/nfcwilink.c
49490@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49491
49492 static int nfcwilink_probe(struct platform_device *pdev)
49493 {
49494- static struct nfcwilink *drv;
49495+ struct nfcwilink *drv;
49496 int rc;
49497 __u32 protocols;
49498
49499diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49500index 24d3d24..ff70d28 100644
49501--- a/drivers/nfc/st21nfca/st21nfca.c
49502+++ b/drivers/nfc/st21nfca/st21nfca.c
49503@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49504 goto exit;
49505 }
49506
49507- gate = uid_skb->data;
49508+ memcpy(gate, uid_skb->data, uid_skb->len);
49509 *len = uid_skb->len;
49510 exit:
49511 kfree_skb(uid_skb);
49512diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49513index 3a896c9..ac7b1c8 100644
49514--- a/drivers/of/fdt.c
49515+++ b/drivers/of/fdt.c
49516@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49517 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49518 return 0;
49519 }
49520- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49521+ pax_open_kernel();
49522+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49523+ pax_close_kernel();
49524 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49525 }
49526 late_initcall(of_fdt_raw_init);
49527diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49528index d93b2b6..ae50401 100644
49529--- a/drivers/oprofile/buffer_sync.c
49530+++ b/drivers/oprofile/buffer_sync.c
49531@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49532 if (cookie == NO_COOKIE)
49533 offset = pc;
49534 if (cookie == INVALID_COOKIE) {
49535- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49536+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49537 offset = pc;
49538 }
49539 if (cookie != last_cookie) {
49540@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49541 /* add userspace sample */
49542
49543 if (!mm) {
49544- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49545+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49546 return 0;
49547 }
49548
49549 cookie = lookup_dcookie(mm, s->eip, &offset);
49550
49551 if (cookie == INVALID_COOKIE) {
49552- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49553+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49554 return 0;
49555 }
49556
49557@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49558 /* ignore backtraces if failed to add a sample */
49559 if (state == sb_bt_start) {
49560 state = sb_bt_ignore;
49561- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49562+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49563 }
49564 }
49565 release_mm(mm);
49566diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49567index c0cc4e7..44d4e54 100644
49568--- a/drivers/oprofile/event_buffer.c
49569+++ b/drivers/oprofile/event_buffer.c
49570@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49571 }
49572
49573 if (buffer_pos == buffer_size) {
49574- atomic_inc(&oprofile_stats.event_lost_overflow);
49575+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49576 return;
49577 }
49578
49579diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49580index ed2c3ec..deda85a 100644
49581--- a/drivers/oprofile/oprof.c
49582+++ b/drivers/oprofile/oprof.c
49583@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49584 if (oprofile_ops.switch_events())
49585 return;
49586
49587- atomic_inc(&oprofile_stats.multiplex_counter);
49588+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49589 start_switch_worker();
49590 }
49591
49592diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49593index ee2cfce..7f8f699 100644
49594--- a/drivers/oprofile/oprofile_files.c
49595+++ b/drivers/oprofile/oprofile_files.c
49596@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49597
49598 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49599
49600-static ssize_t timeout_read(struct file *file, char __user *buf,
49601+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49602 size_t count, loff_t *offset)
49603 {
49604 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49605diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49606index 59659ce..6c860a0 100644
49607--- a/drivers/oprofile/oprofile_stats.c
49608+++ b/drivers/oprofile/oprofile_stats.c
49609@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49610 cpu_buf->sample_invalid_eip = 0;
49611 }
49612
49613- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49614- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49615- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49616- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49617- atomic_set(&oprofile_stats.multiplex_counter, 0);
49618+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49619+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49620+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49621+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49622+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49623 }
49624
49625
49626diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49627index 1fc622b..8c48fc3 100644
49628--- a/drivers/oprofile/oprofile_stats.h
49629+++ b/drivers/oprofile/oprofile_stats.h
49630@@ -13,11 +13,11 @@
49631 #include <linux/atomic.h>
49632
49633 struct oprofile_stat_struct {
49634- atomic_t sample_lost_no_mm;
49635- atomic_t sample_lost_no_mapping;
49636- atomic_t bt_lost_no_mapping;
49637- atomic_t event_lost_overflow;
49638- atomic_t multiplex_counter;
49639+ atomic_unchecked_t sample_lost_no_mm;
49640+ atomic_unchecked_t sample_lost_no_mapping;
49641+ atomic_unchecked_t bt_lost_no_mapping;
49642+ atomic_unchecked_t event_lost_overflow;
49643+ atomic_unchecked_t multiplex_counter;
49644 };
49645
49646 extern struct oprofile_stat_struct oprofile_stats;
49647diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49648index 3f49345..c750d0b 100644
49649--- a/drivers/oprofile/oprofilefs.c
49650+++ b/drivers/oprofile/oprofilefs.c
49651@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49652
49653 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49654 {
49655- atomic_t *val = file->private_data;
49656- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49657+ atomic_unchecked_t *val = file->private_data;
49658+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49659 }
49660
49661
49662@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49663
49664
49665 int oprofilefs_create_ro_atomic(struct dentry *root,
49666- char const *name, atomic_t *val)
49667+ char const *name, atomic_unchecked_t *val)
49668 {
49669 return __oprofilefs_create_file(root, name,
49670 &atomic_ro_fops, 0444, val);
49671diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49672index bdef916..88c7dee 100644
49673--- a/drivers/oprofile/timer_int.c
49674+++ b/drivers/oprofile/timer_int.c
49675@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49676 return NOTIFY_OK;
49677 }
49678
49679-static struct notifier_block __refdata oprofile_cpu_notifier = {
49680+static struct notifier_block oprofile_cpu_notifier = {
49681 .notifier_call = oprofile_cpu_notify,
49682 };
49683
49684diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49685index 3b47080..6cd05dd 100644
49686--- a/drivers/parport/procfs.c
49687+++ b/drivers/parport/procfs.c
49688@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49689
49690 *ppos += len;
49691
49692- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49693+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49694 }
49695
49696 #ifdef CONFIG_PARPORT_1284
49697@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49698
49699 *ppos += len;
49700
49701- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49702+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49703 }
49704 #endif /* IEEE1284.3 support. */
49705
49706diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
49707index ba46e58..90cfc24 100644
49708--- a/drivers/pci/host/pci-host-generic.c
49709+++ b/drivers/pci/host/pci-host-generic.c
49710@@ -26,9 +26,9 @@
49711 #include <linux/platform_device.h>
49712
49713 struct gen_pci_cfg_bus_ops {
49714+ struct pci_ops ops;
49715 u32 bus_shift;
49716- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
49717-};
49718+} __do_const;
49719
49720 struct gen_pci_cfg_windows {
49721 struct resource res;
49722@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
49723 }
49724
49725 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
49726+ .ops = {
49727+ .map_bus = gen_pci_map_cfg_bus_cam,
49728+ .read = pci_generic_config_read,
49729+ .write = pci_generic_config_write,
49730+ },
49731 .bus_shift = 16,
49732- .map_bus = gen_pci_map_cfg_bus_cam,
49733 };
49734
49735 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
49736@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
49737 }
49738
49739 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
49740+ .ops = {
49741+ .map_bus = gen_pci_map_cfg_bus_ecam,
49742+ .read = pci_generic_config_read,
49743+ .write = pci_generic_config_write,
49744+ },
49745 .bus_shift = 20,
49746- .map_bus = gen_pci_map_cfg_bus_ecam,
49747-};
49748-
49749-static struct pci_ops gen_pci_ops = {
49750- .read = pci_generic_config_read,
49751- .write = pci_generic_config_write,
49752 };
49753
49754 static const struct of_device_id gen_pci_of_match[] = {
49755@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
49756 .private_data = (void **)&pci,
49757 .setup = gen_pci_setup,
49758 .map_irq = of_irq_parse_and_map_pci,
49759- .ops = &gen_pci_ops,
49760 };
49761
49762 if (!pci)
49763@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
49764
49765 of_id = of_match_node(gen_pci_of_match, np);
49766 pci->cfg.ops = of_id->data;
49767- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
49768+ hw.ops = &pci->cfg.ops->ops;
49769 pci->host.dev.parent = dev;
49770 INIT_LIST_HEAD(&pci->host.windows);
49771 INIT_LIST_HEAD(&pci->resources);
49772diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49773index 6ca2399..68d866b 100644
49774--- a/drivers/pci/hotplug/acpiphp_ibm.c
49775+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49776@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49777 goto init_cleanup;
49778 }
49779
49780- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49781+ pax_open_kernel();
49782+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49783+ pax_close_kernel();
49784 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49785
49786 return retval;
49787diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49788index 66b7bbe..26bee78 100644
49789--- a/drivers/pci/hotplug/cpcihp_generic.c
49790+++ b/drivers/pci/hotplug/cpcihp_generic.c
49791@@ -73,7 +73,6 @@ static u16 port;
49792 static unsigned int enum_bit;
49793 static u8 enum_mask;
49794
49795-static struct cpci_hp_controller_ops generic_hpc_ops;
49796 static struct cpci_hp_controller generic_hpc;
49797
49798 static int __init validate_parameters(void)
49799@@ -139,6 +138,10 @@ static int query_enum(void)
49800 return ((value & enum_mask) == enum_mask);
49801 }
49802
49803+static struct cpci_hp_controller_ops generic_hpc_ops = {
49804+ .query_enum = query_enum,
49805+};
49806+
49807 static int __init cpcihp_generic_init(void)
49808 {
49809 int status;
49810@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49811 pci_dev_put(dev);
49812
49813 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49814- generic_hpc_ops.query_enum = query_enum;
49815 generic_hpc.ops = &generic_hpc_ops;
49816
49817 status = cpci_hp_register_controller(&generic_hpc);
49818diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49819index 7ecf34e..effed62 100644
49820--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49821+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49822@@ -59,7 +59,6 @@
49823 /* local variables */
49824 static bool debug;
49825 static bool poll;
49826-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49827 static struct cpci_hp_controller zt5550_hpc;
49828
49829 /* Primary cPCI bus bridge device */
49830@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49831 return 0;
49832 }
49833
49834+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49835+ .query_enum = zt5550_hc_query_enum,
49836+};
49837+
49838 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49839 {
49840 int status;
49841@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49842 dbg("returned from zt5550_hc_config");
49843
49844 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49845- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49846 zt5550_hpc.ops = &zt5550_hpc_ops;
49847 if (!poll) {
49848 zt5550_hpc.irq = hc_dev->irq;
49849 zt5550_hpc.irq_flags = IRQF_SHARED;
49850 zt5550_hpc.dev_id = hc_dev;
49851
49852- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49853- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49854- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49855+ pax_open_kernel();
49856+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49857+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49858+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49859+ pax_open_kernel();
49860 } else {
49861 info("using ENUM# polling mode");
49862 }
49863diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49864index 1e08ff8c..3cd145f 100644
49865--- a/drivers/pci/hotplug/cpqphp_nvram.c
49866+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49867@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49868
49869 void compaq_nvram_init (void __iomem *rom_start)
49870 {
49871+#ifndef CONFIG_PAX_KERNEXEC
49872 if (rom_start)
49873 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49874+#endif
49875
49876 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49877
49878diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49879index 56d8486..f26113f 100644
49880--- a/drivers/pci/hotplug/pci_hotplug_core.c
49881+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49882@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49883 return -EINVAL;
49884 }
49885
49886- slot->ops->owner = owner;
49887- slot->ops->mod_name = mod_name;
49888+ pax_open_kernel();
49889+ *(struct module **)&slot->ops->owner = owner;
49890+ *(const char **)&slot->ops->mod_name = mod_name;
49891+ pax_close_kernel();
49892
49893 mutex_lock(&pci_hp_mutex);
49894 /*
49895diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49896index 07aa722..84514b4 100644
49897--- a/drivers/pci/hotplug/pciehp_core.c
49898+++ b/drivers/pci/hotplug/pciehp_core.c
49899@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49900 struct slot *slot = ctrl->slot;
49901 struct hotplug_slot *hotplug = NULL;
49902 struct hotplug_slot_info *info = NULL;
49903- struct hotplug_slot_ops *ops = NULL;
49904+ hotplug_slot_ops_no_const *ops = NULL;
49905 char name[SLOT_NAME_SIZE];
49906 int retval = -ENOMEM;
49907
49908diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49909index c3e7dfc..cbd9625 100644
49910--- a/drivers/pci/msi.c
49911+++ b/drivers/pci/msi.c
49912@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49913 {
49914 struct attribute **msi_attrs;
49915 struct attribute *msi_attr;
49916- struct device_attribute *msi_dev_attr;
49917- struct attribute_group *msi_irq_group;
49918+ device_attribute_no_const *msi_dev_attr;
49919+ attribute_group_no_const *msi_irq_group;
49920 const struct attribute_group **msi_irq_groups;
49921 struct msi_desc *entry;
49922 int ret = -ENOMEM;
49923@@ -573,7 +573,7 @@ error_attrs:
49924 count = 0;
49925 msi_attr = msi_attrs[count];
49926 while (msi_attr) {
49927- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49928+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49929 kfree(msi_attr->name);
49930 kfree(msi_dev_attr);
49931 ++count;
49932diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49933index 312f23a..d21181c 100644
49934--- a/drivers/pci/pci-sysfs.c
49935+++ b/drivers/pci/pci-sysfs.c
49936@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49937 {
49938 /* allocate attribute structure, piggyback attribute name */
49939 int name_len = write_combine ? 13 : 10;
49940- struct bin_attribute *res_attr;
49941+ bin_attribute_no_const *res_attr;
49942 int retval;
49943
49944 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49945@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49946 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49947 {
49948 int retval;
49949- struct bin_attribute *attr;
49950+ bin_attribute_no_const *attr;
49951
49952 /* If the device has VPD, try to expose it in sysfs. */
49953 if (dev->vpd) {
49954@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49955 {
49956 int retval;
49957 int rom_size = 0;
49958- struct bin_attribute *attr;
49959+ bin_attribute_no_const *attr;
49960
49961 if (!sysfs_initialized)
49962 return -EACCES;
49963diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49964index 4091f82..7d98eef 100644
49965--- a/drivers/pci/pci.h
49966+++ b/drivers/pci/pci.h
49967@@ -99,7 +99,7 @@ struct pci_vpd_ops {
49968 struct pci_vpd {
49969 unsigned int len;
49970 const struct pci_vpd_ops *ops;
49971- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49972+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49973 };
49974
49975 int pci_vpd_pci22_init(struct pci_dev *dev);
49976diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49977index 820740a..8b1c673 100644
49978--- a/drivers/pci/pcie/aspm.c
49979+++ b/drivers/pci/pcie/aspm.c
49980@@ -27,9 +27,9 @@
49981 #define MODULE_PARAM_PREFIX "pcie_aspm."
49982
49983 /* Note: those are not register definitions */
49984-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49985-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49986-#define ASPM_STATE_L1 (4) /* L1 state */
49987+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49988+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49989+#define ASPM_STATE_L1 (4U) /* L1 state */
49990 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49991 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49992
49993diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
49994index be35da2..ec16cdb 100644
49995--- a/drivers/pci/pcie/portdrv_pci.c
49996+++ b/drivers/pci/pcie/portdrv_pci.c
49997@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
49998 return 0;
49999 }
50000
50001-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
50002+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
50003 /*
50004 * Boxes that should not use MSI for PCIe PME signaling.
50005 */
50006diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50007index 8d2f400..c97cc91 100644
50008--- a/drivers/pci/probe.c
50009+++ b/drivers/pci/probe.c
50010@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50011 u16 orig_cmd;
50012 struct pci_bus_region region, inverted_region;
50013
50014- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50015+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50016
50017 /* No printks while decoding is disabled! */
50018 if (!dev->mmio_always_on) {
50019diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50020index 3f155e7..0f4b1f0 100644
50021--- a/drivers/pci/proc.c
50022+++ b/drivers/pci/proc.c
50023@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50024 static int __init pci_proc_init(void)
50025 {
50026 struct pci_dev *dev = NULL;
50027+
50028+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50029+#ifdef CONFIG_GRKERNSEC_PROC_USER
50030+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50031+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50032+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50033+#endif
50034+#else
50035 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50036+#endif
50037 proc_create("devices", 0, proc_bus_pci_dir,
50038 &proc_bus_pci_dev_operations);
50039 proc_initialized = 1;
50040diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50041index b84fdd6..b89d829 100644
50042--- a/drivers/platform/chrome/chromeos_laptop.c
50043+++ b/drivers/platform/chrome/chromeos_laptop.c
50044@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50045 .callback = chromeos_laptop_dmi_matched, \
50046 .driver_data = (void *)&board_
50047
50048-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50049+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50050 {
50051 .ident = "Samsung Series 5 550",
50052 .matches = {
50053diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
50054index 3474920..acc9581 100644
50055--- a/drivers/platform/chrome/chromeos_pstore.c
50056+++ b/drivers/platform/chrome/chromeos_pstore.c
50057@@ -13,7 +13,7 @@
50058 #include <linux/platform_device.h>
50059 #include <linux/pstore_ram.h>
50060
50061-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
50062+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
50063 {
50064 /*
50065 * Today all Chromebooks/boxes ship with Google_* as version and
50066diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50067index 1e1e594..8fe59c5 100644
50068--- a/drivers/platform/x86/alienware-wmi.c
50069+++ b/drivers/platform/x86/alienware-wmi.c
50070@@ -150,7 +150,7 @@ struct wmax_led_args {
50071 } __packed;
50072
50073 static struct platform_device *platform_device;
50074-static struct device_attribute *zone_dev_attrs;
50075+static device_attribute_no_const *zone_dev_attrs;
50076 static struct attribute **zone_attrs;
50077 static struct platform_zone *zone_data;
50078
50079@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50080 }
50081 };
50082
50083-static struct attribute_group zone_attribute_group = {
50084+static attribute_group_no_const zone_attribute_group = {
50085 .name = "rgb_zones",
50086 };
50087
50088diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50089index 7543a56..367ca8ed 100644
50090--- a/drivers/platform/x86/asus-wmi.c
50091+++ b/drivers/platform/x86/asus-wmi.c
50092@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50093 int err;
50094 u32 retval = -1;
50095
50096+#ifdef CONFIG_GRKERNSEC_KMEM
50097+ return -EPERM;
50098+#endif
50099+
50100 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50101
50102 if (err < 0)
50103@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50104 int err;
50105 u32 retval = -1;
50106
50107+#ifdef CONFIG_GRKERNSEC_KMEM
50108+ return -EPERM;
50109+#endif
50110+
50111 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50112 &retval);
50113
50114@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
50115 union acpi_object *obj;
50116 acpi_status status;
50117
50118+#ifdef CONFIG_GRKERNSEC_KMEM
50119+ return -EPERM;
50120+#endif
50121+
50122 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50123 1, asus->debug.method_id,
50124 &input, &output);
50125diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
50126index 15c0fab..f674006 100644
50127--- a/drivers/platform/x86/compal-laptop.c
50128+++ b/drivers/platform/x86/compal-laptop.c
50129@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
50130 return 1;
50131 }
50132
50133-static struct dmi_system_id __initdata compal_dmi_table[] = {
50134+static const struct dmi_system_id __initconst compal_dmi_table[] = {
50135 {
50136 .ident = "FL90/IFL90",
50137 .matches = {
50138diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
50139index 458e6c9..089aee7 100644
50140--- a/drivers/platform/x86/hdaps.c
50141+++ b/drivers/platform/x86/hdaps.c
50142@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
50143 "ThinkPad T42p", so the order of the entries matters.
50144 If your ThinkPad is not recognized, please update to latest
50145 BIOS. This is especially the case for some R52 ThinkPads. */
50146-static struct dmi_system_id __initdata hdaps_whitelist[] = {
50147+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
50148 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
50149 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
50150 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
50151diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
50152index 97c2be1..2ee50ce 100644
50153--- a/drivers/platform/x86/ibm_rtl.c
50154+++ b/drivers/platform/x86/ibm_rtl.c
50155@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
50156 }
50157
50158
50159-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
50160+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
50161 { \
50162 .matches = { \
50163 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
50164diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
50165index a4a4258..a58a04c 100644
50166--- a/drivers/platform/x86/intel_oaktrail.c
50167+++ b/drivers/platform/x86/intel_oaktrail.c
50168@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
50169 return 0;
50170 }
50171
50172-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
50173+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
50174 {
50175 .ident = "OakTrail platform",
50176 .matches = {
50177diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50178index 0859877..59d596d 100644
50179--- a/drivers/platform/x86/msi-laptop.c
50180+++ b/drivers/platform/x86/msi-laptop.c
50181@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
50182 return 1;
50183 }
50184
50185-static struct dmi_system_id __initdata msi_dmi_table[] = {
50186+static const struct dmi_system_id __initconst msi_dmi_table[] = {
50187 {
50188 .ident = "MSI S270",
50189 .matches = {
50190@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50191
50192 if (!quirks->ec_read_only) {
50193 /* allow userland write sysfs file */
50194- dev_attr_bluetooth.store = store_bluetooth;
50195- dev_attr_wlan.store = store_wlan;
50196- dev_attr_threeg.store = store_threeg;
50197- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50198- dev_attr_wlan.attr.mode |= S_IWUSR;
50199- dev_attr_threeg.attr.mode |= S_IWUSR;
50200+ pax_open_kernel();
50201+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50202+ *(void **)&dev_attr_wlan.store = store_wlan;
50203+ *(void **)&dev_attr_threeg.store = store_threeg;
50204+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50205+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50206+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50207+ pax_close_kernel();
50208 }
50209
50210 /* disable hardware control by fn key */
50211diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50212index 6d2bac0..ec2b029 100644
50213--- a/drivers/platform/x86/msi-wmi.c
50214+++ b/drivers/platform/x86/msi-wmi.c
50215@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50216 static void msi_wmi_notify(u32 value, void *context)
50217 {
50218 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50219- static struct key_entry *key;
50220+ struct key_entry *key;
50221 union acpi_object *obj;
50222 acpi_status status;
50223
50224diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
50225index 9e701b2..c68a7b5 100644
50226--- a/drivers/platform/x86/samsung-laptop.c
50227+++ b/drivers/platform/x86/samsung-laptop.c
50228@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
50229 return 0;
50230 }
50231
50232-static struct dmi_system_id __initdata samsung_dmi_table[] = {
50233+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
50234 {
50235 .matches = {
50236 DMI_MATCH(DMI_SYS_VENDOR,
50237diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
50238index e6aac72..e11ff24 100644
50239--- a/drivers/platform/x86/samsung-q10.c
50240+++ b/drivers/platform/x86/samsung-q10.c
50241@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
50242 return 1;
50243 }
50244
50245-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
50246+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
50247 {
50248 .ident = "Samsung Q10",
50249 .matches = {
50250diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50251index e51c1e7..71bb385 100644
50252--- a/drivers/platform/x86/sony-laptop.c
50253+++ b/drivers/platform/x86/sony-laptop.c
50254@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50255 }
50256
50257 /* High speed charging function */
50258-static struct device_attribute *hsc_handle;
50259+static device_attribute_no_const *hsc_handle;
50260
50261 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50262 struct device_attribute *attr,
50263@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50264 }
50265
50266 /* low battery function */
50267-static struct device_attribute *lowbatt_handle;
50268+static device_attribute_no_const *lowbatt_handle;
50269
50270 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50271 struct device_attribute *attr,
50272@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50273 }
50274
50275 /* fan speed function */
50276-static struct device_attribute *fan_handle, *hsf_handle;
50277+static device_attribute_no_const *fan_handle, *hsf_handle;
50278
50279 static ssize_t sony_nc_hsfan_store(struct device *dev,
50280 struct device_attribute *attr,
50281@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50282 }
50283
50284 /* USB charge function */
50285-static struct device_attribute *uc_handle;
50286+static device_attribute_no_const *uc_handle;
50287
50288 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50289 struct device_attribute *attr,
50290@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50291 }
50292
50293 /* Panel ID function */
50294-static struct device_attribute *panel_handle;
50295+static device_attribute_no_const *panel_handle;
50296
50297 static ssize_t sony_nc_panelid_show(struct device *dev,
50298 struct device_attribute *attr, char *buffer)
50299@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50300 }
50301
50302 /* smart connect function */
50303-static struct device_attribute *sc_handle;
50304+static device_attribute_no_const *sc_handle;
50305
50306 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50307 struct device_attribute *attr,
50308@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
50309 .drv.pm = &sony_pic_pm,
50310 };
50311
50312-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
50313+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
50314 {
50315 .ident = "Sony Vaio",
50316 .matches = {
50317diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50318index 3b8ceee..e18652c 100644
50319--- a/drivers/platform/x86/thinkpad_acpi.c
50320+++ b/drivers/platform/x86/thinkpad_acpi.c
50321@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
50322 return 0;
50323 }
50324
50325-void static hotkey_mask_warn_incomplete_mask(void)
50326+static void hotkey_mask_warn_incomplete_mask(void)
50327 {
50328 /* log only what the user can fix... */
50329 const u32 wantedmask = hotkey_driver_mask &
50330@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50331 && !tp_features.bright_unkfw)
50332 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50333 }
50334+}
50335
50336 #undef TPACPI_COMPARE_KEY
50337 #undef TPACPI_MAY_SEND_KEY
50338-}
50339
50340 /*
50341 * Polling driver
50342diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50343index 438d4c7..ca8a2fb 100644
50344--- a/drivers/pnp/pnpbios/bioscalls.c
50345+++ b/drivers/pnp/pnpbios/bioscalls.c
50346@@ -59,7 +59,7 @@ do { \
50347 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50348 } while(0)
50349
50350-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50351+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50352 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50353
50354 /*
50355@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50356
50357 cpu = get_cpu();
50358 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50359+
50360+ pax_open_kernel();
50361 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50362+ pax_close_kernel();
50363
50364 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50365 spin_lock_irqsave(&pnp_bios_lock, flags);
50366@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50367 :"memory");
50368 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50369
50370+ pax_open_kernel();
50371 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50372+ pax_close_kernel();
50373+
50374 put_cpu();
50375
50376 /* If we get here and this is set then the PnP BIOS faulted on us. */
50377@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50378 return status;
50379 }
50380
50381-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50382+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50383 {
50384 int i;
50385
50386@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50387 pnp_bios_callpoint.offset = header->fields.pm16offset;
50388 pnp_bios_callpoint.segment = PNP_CS16;
50389
50390+ pax_open_kernel();
50391+
50392 for_each_possible_cpu(i) {
50393 struct desc_struct *gdt = get_cpu_gdt_table(i);
50394 if (!gdt)
50395@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50396 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50397 (unsigned long)__va(header->fields.pm16dseg));
50398 }
50399+
50400+ pax_close_kernel();
50401 }
50402diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
50403index facd43b..b291260 100644
50404--- a/drivers/pnp/pnpbios/core.c
50405+++ b/drivers/pnp/pnpbios/core.c
50406@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
50407 return 0;
50408 }
50409
50410-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
50411+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
50412 { /* PnPBIOS GPF on boot */
50413 .callback = exploding_pnp_bios,
50414 .ident = "Higraded P14H",
50415diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50416index 0c52e2a..3421ab7 100644
50417--- a/drivers/power/pda_power.c
50418+++ b/drivers/power/pda_power.c
50419@@ -37,7 +37,11 @@ static int polling;
50420
50421 #if IS_ENABLED(CONFIG_USB_PHY)
50422 static struct usb_phy *transceiver;
50423-static struct notifier_block otg_nb;
50424+static int otg_handle_notification(struct notifier_block *nb,
50425+ unsigned long event, void *unused);
50426+static struct notifier_block otg_nb = {
50427+ .notifier_call = otg_handle_notification
50428+};
50429 #endif
50430
50431 static struct regulator *ac_draw;
50432@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50433
50434 #if IS_ENABLED(CONFIG_USB_PHY)
50435 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50436- otg_nb.notifier_call = otg_handle_notification;
50437 ret = usb_register_notifier(transceiver, &otg_nb);
50438 if (ret) {
50439 dev_err(dev, "failure to register otg notifier\n");
50440diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50441index cc439fd..8fa30df 100644
50442--- a/drivers/power/power_supply.h
50443+++ b/drivers/power/power_supply.h
50444@@ -16,12 +16,12 @@ struct power_supply;
50445
50446 #ifdef CONFIG_SYSFS
50447
50448-extern void power_supply_init_attrs(struct device_type *dev_type);
50449+extern void power_supply_init_attrs(void);
50450 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50451
50452 #else
50453
50454-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50455+static inline void power_supply_init_attrs(void) {}
50456 #define power_supply_uevent NULL
50457
50458 #endif /* CONFIG_SYSFS */
50459diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50460index 694e8cd..9f03483 100644
50461--- a/drivers/power/power_supply_core.c
50462+++ b/drivers/power/power_supply_core.c
50463@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50464 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50465 EXPORT_SYMBOL_GPL(power_supply_notifier);
50466
50467-static struct device_type power_supply_dev_type;
50468+extern const struct attribute_group *power_supply_attr_groups[];
50469+static struct device_type power_supply_dev_type = {
50470+ .groups = power_supply_attr_groups,
50471+};
50472
50473 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50474 struct power_supply *supply)
50475@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50476 return PTR_ERR(power_supply_class);
50477
50478 power_supply_class->dev_uevent = power_supply_uevent;
50479- power_supply_init_attrs(&power_supply_dev_type);
50480+ power_supply_init_attrs();
50481
50482 return 0;
50483 }
50484diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50485index 62653f5..d0bb485 100644
50486--- a/drivers/power/power_supply_sysfs.c
50487+++ b/drivers/power/power_supply_sysfs.c
50488@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50489 .is_visible = power_supply_attr_is_visible,
50490 };
50491
50492-static const struct attribute_group *power_supply_attr_groups[] = {
50493+const struct attribute_group *power_supply_attr_groups[] = {
50494 &power_supply_attr_group,
50495 NULL,
50496 };
50497
50498-void power_supply_init_attrs(struct device_type *dev_type)
50499+void power_supply_init_attrs(void)
50500 {
50501 int i;
50502
50503- dev_type->groups = power_supply_attr_groups;
50504-
50505 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50506 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50507 }
50508diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50509index 84419af..268ede8 100644
50510--- a/drivers/powercap/powercap_sys.c
50511+++ b/drivers/powercap/powercap_sys.c
50512@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50513 struct device_attribute name_attr;
50514 };
50515
50516+static ssize_t show_constraint_name(struct device *dev,
50517+ struct device_attribute *dev_attr,
50518+ char *buf);
50519+
50520 static struct powercap_constraint_attr
50521- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50522+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50523+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50524+ .power_limit_attr = {
50525+ .attr = {
50526+ .name = NULL,
50527+ .mode = S_IWUSR | S_IRUGO
50528+ },
50529+ .show = show_constraint_power_limit_uw,
50530+ .store = store_constraint_power_limit_uw
50531+ },
50532+
50533+ .time_window_attr = {
50534+ .attr = {
50535+ .name = NULL,
50536+ .mode = S_IWUSR | S_IRUGO
50537+ },
50538+ .show = show_constraint_time_window_us,
50539+ .store = store_constraint_time_window_us
50540+ },
50541+
50542+ .max_power_attr = {
50543+ .attr = {
50544+ .name = NULL,
50545+ .mode = S_IRUGO
50546+ },
50547+ .show = show_constraint_max_power_uw,
50548+ .store = NULL
50549+ },
50550+
50551+ .min_power_attr = {
50552+ .attr = {
50553+ .name = NULL,
50554+ .mode = S_IRUGO
50555+ },
50556+ .show = show_constraint_min_power_uw,
50557+ .store = NULL
50558+ },
50559+
50560+ .max_time_window_attr = {
50561+ .attr = {
50562+ .name = NULL,
50563+ .mode = S_IRUGO
50564+ },
50565+ .show = show_constraint_max_time_window_us,
50566+ .store = NULL
50567+ },
50568+
50569+ .min_time_window_attr = {
50570+ .attr = {
50571+ .name = NULL,
50572+ .mode = S_IRUGO
50573+ },
50574+ .show = show_constraint_min_time_window_us,
50575+ .store = NULL
50576+ },
50577+
50578+ .name_attr = {
50579+ .attr = {
50580+ .name = NULL,
50581+ .mode = S_IRUGO
50582+ },
50583+ .show = show_constraint_name,
50584+ .store = NULL
50585+ }
50586+ }
50587+};
50588
50589 /* A list of powercap control_types */
50590 static LIST_HEAD(powercap_cntrl_list);
50591@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50592 }
50593
50594 static int create_constraint_attribute(int id, const char *name,
50595- int mode,
50596- struct device_attribute *dev_attr,
50597- ssize_t (*show)(struct device *,
50598- struct device_attribute *, char *),
50599- ssize_t (*store)(struct device *,
50600- struct device_attribute *,
50601- const char *, size_t)
50602- )
50603+ struct device_attribute *dev_attr)
50604 {
50605+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50606
50607- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50608- id, name);
50609- if (!dev_attr->attr.name)
50610+ if (!name)
50611 return -ENOMEM;
50612- dev_attr->attr.mode = mode;
50613- dev_attr->show = show;
50614- dev_attr->store = store;
50615+
50616+ pax_open_kernel();
50617+ *(const char **)&dev_attr->attr.name = name;
50618+ pax_close_kernel();
50619
50620 return 0;
50621 }
50622@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50623
50624 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50625 ret = create_constraint_attribute(i, "power_limit_uw",
50626- S_IWUSR | S_IRUGO,
50627- &constraint_attrs[i].power_limit_attr,
50628- show_constraint_power_limit_uw,
50629- store_constraint_power_limit_uw);
50630+ &constraint_attrs[i].power_limit_attr);
50631 if (ret)
50632 goto err_alloc;
50633 ret = create_constraint_attribute(i, "time_window_us",
50634- S_IWUSR | S_IRUGO,
50635- &constraint_attrs[i].time_window_attr,
50636- show_constraint_time_window_us,
50637- store_constraint_time_window_us);
50638+ &constraint_attrs[i].time_window_attr);
50639 if (ret)
50640 goto err_alloc;
50641- ret = create_constraint_attribute(i, "name", S_IRUGO,
50642- &constraint_attrs[i].name_attr,
50643- show_constraint_name,
50644- NULL);
50645+ ret = create_constraint_attribute(i, "name",
50646+ &constraint_attrs[i].name_attr);
50647 if (ret)
50648 goto err_alloc;
50649- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50650- &constraint_attrs[i].max_power_attr,
50651- show_constraint_max_power_uw,
50652- NULL);
50653+ ret = create_constraint_attribute(i, "max_power_uw",
50654+ &constraint_attrs[i].max_power_attr);
50655 if (ret)
50656 goto err_alloc;
50657- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50658- &constraint_attrs[i].min_power_attr,
50659- show_constraint_min_power_uw,
50660- NULL);
50661+ ret = create_constraint_attribute(i, "min_power_uw",
50662+ &constraint_attrs[i].min_power_attr);
50663 if (ret)
50664 goto err_alloc;
50665 ret = create_constraint_attribute(i, "max_time_window_us",
50666- S_IRUGO,
50667- &constraint_attrs[i].max_time_window_attr,
50668- show_constraint_max_time_window_us,
50669- NULL);
50670+ &constraint_attrs[i].max_time_window_attr);
50671 if (ret)
50672 goto err_alloc;
50673 ret = create_constraint_attribute(i, "min_time_window_us",
50674- S_IRUGO,
50675- &constraint_attrs[i].min_time_window_attr,
50676- show_constraint_min_time_window_us,
50677- NULL);
50678+ &constraint_attrs[i].min_time_window_attr);
50679 if (ret)
50680 goto err_alloc;
50681
50682@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50683 power_zone->zone_dev_attrs[count++] =
50684 &dev_attr_max_energy_range_uj.attr;
50685 if (power_zone->ops->get_energy_uj) {
50686+ pax_open_kernel();
50687 if (power_zone->ops->reset_energy_uj)
50688- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50689+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50690 else
50691- dev_attr_energy_uj.attr.mode = S_IRUGO;
50692+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50693+ pax_close_kernel();
50694 power_zone->zone_dev_attrs[count++] =
50695 &dev_attr_energy_uj.attr;
50696 }
50697diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50698index 9c5d414..c7900ce 100644
50699--- a/drivers/ptp/ptp_private.h
50700+++ b/drivers/ptp/ptp_private.h
50701@@ -51,7 +51,7 @@ struct ptp_clock {
50702 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50703 wait_queue_head_t tsev_wq;
50704 int defunct; /* tells readers to go away when clock is being removed */
50705- struct device_attribute *pin_dev_attr;
50706+ device_attribute_no_const *pin_dev_attr;
50707 struct attribute **pin_attr;
50708 struct attribute_group pin_attr_group;
50709 };
50710diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50711index 302e626..12579af 100644
50712--- a/drivers/ptp/ptp_sysfs.c
50713+++ b/drivers/ptp/ptp_sysfs.c
50714@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50715 goto no_pin_attr;
50716
50717 for (i = 0; i < n_pins; i++) {
50718- struct device_attribute *da = &ptp->pin_dev_attr[i];
50719+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50720 sysfs_attr_init(&da->attr);
50721 da->attr.name = info->pin_config[i].name;
50722 da->attr.mode = 0644;
50723diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50724index a4a8a6d..a3456f4 100644
50725--- a/drivers/regulator/core.c
50726+++ b/drivers/regulator/core.c
50727@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50728 const struct regulation_constraints *constraints = NULL;
50729 const struct regulator_init_data *init_data;
50730 struct regulator_config *config = NULL;
50731- static atomic_t regulator_no = ATOMIC_INIT(-1);
50732+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
50733 struct regulator_dev *rdev;
50734 struct device *dev;
50735 int ret, i;
50736@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50737 rdev->dev.class = &regulator_class;
50738 rdev->dev.parent = dev;
50739 dev_set_name(&rdev->dev, "regulator.%lu",
50740- (unsigned long) atomic_inc_return(&regulator_no));
50741+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
50742 ret = device_register(&rdev->dev);
50743 if (ret != 0) {
50744 put_device(&rdev->dev);
50745diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50746index 7eee2ca..4024513 100644
50747--- a/drivers/regulator/max8660.c
50748+++ b/drivers/regulator/max8660.c
50749@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50750 max8660->shadow_regs[MAX8660_OVER1] = 5;
50751 } else {
50752 /* Otherwise devices can be toggled via software */
50753- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50754- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50755+ pax_open_kernel();
50756+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50757+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50758+ pax_close_kernel();
50759 }
50760
50761 /*
50762diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50763index c3d55c2..0dddfe6 100644
50764--- a/drivers/regulator/max8973-regulator.c
50765+++ b/drivers/regulator/max8973-regulator.c
50766@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50767 if (!pdata || !pdata->enable_ext_control) {
50768 max->desc.enable_reg = MAX8973_VOUT;
50769 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50770- max->ops.enable = regulator_enable_regmap;
50771- max->ops.disable = regulator_disable_regmap;
50772- max->ops.is_enabled = regulator_is_enabled_regmap;
50773+ pax_open_kernel();
50774+ *(void **)&max->ops.enable = regulator_enable_regmap;
50775+ *(void **)&max->ops.disable = regulator_disable_regmap;
50776+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50777+ pax_close_kernel();
50778 }
50779
50780 if (pdata) {
50781diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50782index 0d17c92..a29f627 100644
50783--- a/drivers/regulator/mc13892-regulator.c
50784+++ b/drivers/regulator/mc13892-regulator.c
50785@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50786 mc13xxx_unlock(mc13892);
50787
50788 /* update mc13892_vcam ops */
50789- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50790+ pax_open_kernel();
50791+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50792 sizeof(struct regulator_ops));
50793- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50794- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50795+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50796+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50797+ pax_close_kernel();
50798 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50799
50800 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50801diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50802index 5b2e761..c8c8a4a 100644
50803--- a/drivers/rtc/rtc-cmos.c
50804+++ b/drivers/rtc/rtc-cmos.c
50805@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50806 hpet_rtc_timer_init();
50807
50808 /* export at least the first block of NVRAM */
50809- nvram.size = address_space - NVRAM_OFFSET;
50810+ pax_open_kernel();
50811+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50812+ pax_close_kernel();
50813 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50814 if (retval < 0) {
50815 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50816diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50817index 799c34b..8e9786a 100644
50818--- a/drivers/rtc/rtc-dev.c
50819+++ b/drivers/rtc/rtc-dev.c
50820@@ -16,6 +16,7 @@
50821 #include <linux/module.h>
50822 #include <linux/rtc.h>
50823 #include <linux/sched.h>
50824+#include <linux/grsecurity.h>
50825 #include "rtc-core.h"
50826
50827 static dev_t rtc_devt;
50828@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50829 if (copy_from_user(&tm, uarg, sizeof(tm)))
50830 return -EFAULT;
50831
50832+ gr_log_timechange();
50833+
50834 return rtc_set_time(rtc, &tm);
50835
50836 case RTC_PIE_ON:
50837diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50838index 4ffabb3..1f87fca 100644
50839--- a/drivers/rtc/rtc-ds1307.c
50840+++ b/drivers/rtc/rtc-ds1307.c
50841@@ -107,7 +107,7 @@ struct ds1307 {
50842 u8 offset; /* register's offset */
50843 u8 regs[11];
50844 u16 nvram_offset;
50845- struct bin_attribute *nvram;
50846+ bin_attribute_no_const *nvram;
50847 enum ds_type type;
50848 unsigned long flags;
50849 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50850diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50851index 90abb5b..e0bf6dd 100644
50852--- a/drivers/rtc/rtc-m48t59.c
50853+++ b/drivers/rtc/rtc-m48t59.c
50854@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50855 if (IS_ERR(m48t59->rtc))
50856 return PTR_ERR(m48t59->rtc);
50857
50858- m48t59_nvram_attr.size = pdata->offset;
50859+ pax_open_kernel();
50860+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50861+ pax_close_kernel();
50862
50863 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50864 if (ret)
50865diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50866index e693af6..2e525b6 100644
50867--- a/drivers/scsi/bfa/bfa_fcpim.h
50868+++ b/drivers/scsi/bfa/bfa_fcpim.h
50869@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50870
50871 struct bfa_itn_s {
50872 bfa_isr_func_t isr;
50873-};
50874+} __no_const;
50875
50876 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50877 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50878diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50879index 0f19455..ef7adb5 100644
50880--- a/drivers/scsi/bfa/bfa_fcs.c
50881+++ b/drivers/scsi/bfa/bfa_fcs.c
50882@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50883 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50884
50885 static struct bfa_fcs_mod_s fcs_modules[] = {
50886- { bfa_fcs_port_attach, NULL, NULL },
50887- { bfa_fcs_uf_attach, NULL, NULL },
50888- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50889- bfa_fcs_fabric_modexit },
50890+ {
50891+ .attach = bfa_fcs_port_attach,
50892+ .modinit = NULL,
50893+ .modexit = NULL
50894+ },
50895+ {
50896+ .attach = bfa_fcs_uf_attach,
50897+ .modinit = NULL,
50898+ .modexit = NULL
50899+ },
50900+ {
50901+ .attach = bfa_fcs_fabric_attach,
50902+ .modinit = bfa_fcs_fabric_modinit,
50903+ .modexit = bfa_fcs_fabric_modexit
50904+ },
50905 };
50906
50907 /*
50908diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50909index ff75ef8..2dfe00a 100644
50910--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50911+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50912@@ -89,15 +89,26 @@ static struct {
50913 void (*offline) (struct bfa_fcs_lport_s *port);
50914 } __port_action[] = {
50915 {
50916- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50917- bfa_fcs_lport_unknown_offline}, {
50918- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50919- bfa_fcs_lport_fab_offline}, {
50920- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50921- bfa_fcs_lport_n2n_offline}, {
50922- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50923- bfa_fcs_lport_loop_offline},
50924- };
50925+ .init = bfa_fcs_lport_unknown_init,
50926+ .online = bfa_fcs_lport_unknown_online,
50927+ .offline = bfa_fcs_lport_unknown_offline
50928+ },
50929+ {
50930+ .init = bfa_fcs_lport_fab_init,
50931+ .online = bfa_fcs_lport_fab_online,
50932+ .offline = bfa_fcs_lport_fab_offline
50933+ },
50934+ {
50935+ .init = bfa_fcs_lport_n2n_init,
50936+ .online = bfa_fcs_lport_n2n_online,
50937+ .offline = bfa_fcs_lport_n2n_offline
50938+ },
50939+ {
50940+ .init = bfa_fcs_lport_loop_init,
50941+ .online = bfa_fcs_lport_loop_online,
50942+ .offline = bfa_fcs_lport_loop_offline
50943+ },
50944+};
50945
50946 /*
50947 * fcs_port_sm FCS logical port state machine
50948diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50949index a38aafa0..fe8f03b 100644
50950--- a/drivers/scsi/bfa/bfa_ioc.h
50951+++ b/drivers/scsi/bfa/bfa_ioc.h
50952@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50953 bfa_ioc_disable_cbfn_t disable_cbfn;
50954 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50955 bfa_ioc_reset_cbfn_t reset_cbfn;
50956-};
50957+} __no_const;
50958
50959 /*
50960 * IOC event notification mechanism.
50961@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50962 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50963 enum bfi_ioc_state fwstate);
50964 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50965-};
50966+} __no_const;
50967
50968 /*
50969 * Queue element to wait for room in request queue. FIFO order is
50970diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50971index a14c784..6de6790 100644
50972--- a/drivers/scsi/bfa/bfa_modules.h
50973+++ b/drivers/scsi/bfa/bfa_modules.h
50974@@ -78,12 +78,12 @@ enum {
50975 \
50976 extern struct bfa_module_s hal_mod_ ## __mod; \
50977 struct bfa_module_s hal_mod_ ## __mod = { \
50978- bfa_ ## __mod ## _meminfo, \
50979- bfa_ ## __mod ## _attach, \
50980- bfa_ ## __mod ## _detach, \
50981- bfa_ ## __mod ## _start, \
50982- bfa_ ## __mod ## _stop, \
50983- bfa_ ## __mod ## _iocdisable, \
50984+ .meminfo = bfa_ ## __mod ## _meminfo, \
50985+ .attach = bfa_ ## __mod ## _attach, \
50986+ .detach = bfa_ ## __mod ## _detach, \
50987+ .start = bfa_ ## __mod ## _start, \
50988+ .stop = bfa_ ## __mod ## _stop, \
50989+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50990 }
50991
50992 #define BFA_CACHELINE_SZ (256)
50993diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50994index 045c4e1..13de803 100644
50995--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50996+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50997@@ -33,8 +33,8 @@
50998 */
50999 #include "libfcoe.h"
51000
51001-static atomic_t ctlr_num;
51002-static atomic_t fcf_num;
51003+static atomic_unchecked_t ctlr_num;
51004+static atomic_unchecked_t fcf_num;
51005
51006 /*
51007 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51008@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51009 if (!ctlr)
51010 goto out;
51011
51012- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51013+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51014 ctlr->f = f;
51015 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51016 INIT_LIST_HEAD(&ctlr->fcfs);
51017@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51018 fcf->dev.parent = &ctlr->dev;
51019 fcf->dev.bus = &fcoe_bus_type;
51020 fcf->dev.type = &fcoe_fcf_device_type;
51021- fcf->id = atomic_inc_return(&fcf_num) - 1;
51022+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51023 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51024
51025 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51026@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51027 {
51028 int error;
51029
51030- atomic_set(&ctlr_num, 0);
51031- atomic_set(&fcf_num, 0);
51032+ atomic_set_unchecked(&ctlr_num, 0);
51033+ atomic_set_unchecked(&fcf_num, 0);
51034
51035 error = bus_register(&fcoe_bus_type);
51036 if (error)
51037diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51038index 8bb173e..20236b4 100644
51039--- a/drivers/scsi/hosts.c
51040+++ b/drivers/scsi/hosts.c
51041@@ -42,7 +42,7 @@
51042 #include "scsi_logging.h"
51043
51044
51045-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51046+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51047
51048
51049 static void scsi_host_cls_release(struct device *dev)
51050@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51051 * subtract one because we increment first then return, but we need to
51052 * know what the next host number was before increment
51053 */
51054- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51055+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51056 shost->dma_channel = 0xff;
51057
51058 /* These three are default values which can be overridden */
51059diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51060index a1cfbd3..d7f8ebc 100644
51061--- a/drivers/scsi/hpsa.c
51062+++ b/drivers/scsi/hpsa.c
51063@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51064 struct reply_queue_buffer *rq = &h->reply_queue[q];
51065
51066 if (h->transMethod & CFGTBL_Trans_io_accel1)
51067- return h->access.command_completed(h, q);
51068+ return h->access->command_completed(h, q);
51069
51070 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51071- return h->access.command_completed(h, q);
51072+ return h->access->command_completed(h, q);
51073
51074 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51075 a = rq->head[rq->current_entry];
51076@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
51077 break;
51078 default:
51079 set_performant_mode(h, c);
51080- h->access.submit_command(h, c);
51081+ h->access->submit_command(h, c);
51082 }
51083 }
51084
51085@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
51086
51087 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51088 {
51089- return h->access.command_completed(h, q);
51090+ return h->access->command_completed(h, q);
51091 }
51092
51093 static inline bool interrupt_pending(struct ctlr_info *h)
51094 {
51095- return h->access.intr_pending(h);
51096+ return h->access->intr_pending(h);
51097 }
51098
51099 static inline long interrupt_not_for_us(struct ctlr_info *h)
51100 {
51101- return (h->access.intr_pending(h) == 0) ||
51102+ return (h->access->intr_pending(h) == 0) ||
51103 (h->interrupts_enabled == 0);
51104 }
51105
51106@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
51107 if (prod_index < 0)
51108 return prod_index;
51109 h->product_name = products[prod_index].product_name;
51110- h->access = *(products[prod_index].access);
51111+ h->access = products[prod_index].access;
51112
51113 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
51114 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
51115@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
51116 unsigned long flags;
51117 u32 lockup_detected;
51118
51119- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51120+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51121 spin_lock_irqsave(&h->lock, flags);
51122 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
51123 if (!lockup_detected) {
51124@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
51125 }
51126
51127 /* make sure the board interrupts are off */
51128- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51129+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51130
51131 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51132 goto clean2;
51133@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
51134 * fake ones to scoop up any residual completions.
51135 */
51136 spin_lock_irqsave(&h->lock, flags);
51137- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51138+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51139 spin_unlock_irqrestore(&h->lock, flags);
51140 hpsa_free_irqs(h);
51141 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
51142@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
51143 dev_info(&h->pdev->dev, "Board READY.\n");
51144 dev_info(&h->pdev->dev,
51145 "Waiting for stale completions to drain.\n");
51146- h->access.set_intr_mask(h, HPSA_INTR_ON);
51147+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51148 msleep(10000);
51149- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51150+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51151
51152 rc = controller_reset_failed(h->cfgtable);
51153 if (rc)
51154@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
51155
51156
51157 /* Turn the interrupts on so we can service requests */
51158- h->access.set_intr_mask(h, HPSA_INTR_ON);
51159+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51160
51161 hpsa_hba_inquiry(h);
51162 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51163@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51164 * To write all data in the battery backed cache to disks
51165 */
51166 hpsa_flush_cache(h);
51167- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51168+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51169 hpsa_free_irqs_and_disable_msix(h);
51170 }
51171
51172@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51173 CFGTBL_Trans_enable_directed_msix |
51174 (trans_support & (CFGTBL_Trans_io_accel1 |
51175 CFGTBL_Trans_io_accel2));
51176- struct access_method access = SA5_performant_access;
51177+ struct access_method *access = &SA5_performant_access;
51178
51179 /* This is a bit complicated. There are 8 registers on
51180 * the controller which we write to to tell it 8 different
51181@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51182 * perform the superfluous readl() after each command submission.
51183 */
51184 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51185- access = SA5_performant_access_no_read;
51186+ access = &SA5_performant_access_no_read;
51187
51188 /* Controller spec: zero out this buffer. */
51189 for (i = 0; i < h->nreply_queues; i++)
51190@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51191 * enable outbound interrupt coalescing in accelerator mode;
51192 */
51193 if (trans_support & CFGTBL_Trans_io_accel1) {
51194- access = SA5_ioaccel_mode1_access;
51195+ access = &SA5_ioaccel_mode1_access;
51196 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51197 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51198 } else {
51199 if (trans_support & CFGTBL_Trans_io_accel2) {
51200- access = SA5_ioaccel_mode2_access;
51201+ access = &SA5_ioaccel_mode2_access;
51202 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51203 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51204 }
51205diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51206index 6577130..955f9a4 100644
51207--- a/drivers/scsi/hpsa.h
51208+++ b/drivers/scsi/hpsa.h
51209@@ -143,7 +143,7 @@ struct ctlr_info {
51210 unsigned int msix_vector;
51211 unsigned int msi_vector;
51212 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51213- struct access_method access;
51214+ struct access_method *access;
51215 char hba_mode_enabled;
51216
51217 /* queue and queue Info */
51218@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51219 }
51220
51221 static struct access_method SA5_access = {
51222- SA5_submit_command,
51223- SA5_intr_mask,
51224- SA5_intr_pending,
51225- SA5_completed,
51226+ .submit_command = SA5_submit_command,
51227+ .set_intr_mask = SA5_intr_mask,
51228+ .intr_pending = SA5_intr_pending,
51229+ .command_completed = SA5_completed,
51230 };
51231
51232 static struct access_method SA5_ioaccel_mode1_access = {
51233- SA5_submit_command,
51234- SA5_performant_intr_mask,
51235- SA5_ioaccel_mode1_intr_pending,
51236- SA5_ioaccel_mode1_completed,
51237+ .submit_command = SA5_submit_command,
51238+ .set_intr_mask = SA5_performant_intr_mask,
51239+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51240+ .command_completed = SA5_ioaccel_mode1_completed,
51241 };
51242
51243 static struct access_method SA5_ioaccel_mode2_access = {
51244- SA5_submit_command_ioaccel2,
51245- SA5_performant_intr_mask,
51246- SA5_performant_intr_pending,
51247- SA5_performant_completed,
51248+ .submit_command = SA5_submit_command_ioaccel2,
51249+ .set_intr_mask = SA5_performant_intr_mask,
51250+ .intr_pending = SA5_performant_intr_pending,
51251+ .command_completed = SA5_performant_completed,
51252 };
51253
51254 static struct access_method SA5_performant_access = {
51255- SA5_submit_command,
51256- SA5_performant_intr_mask,
51257- SA5_performant_intr_pending,
51258- SA5_performant_completed,
51259+ .submit_command = SA5_submit_command,
51260+ .set_intr_mask = SA5_performant_intr_mask,
51261+ .intr_pending = SA5_performant_intr_pending,
51262+ .command_completed = SA5_performant_completed,
51263 };
51264
51265 static struct access_method SA5_performant_access_no_read = {
51266- SA5_submit_command_no_read,
51267- SA5_performant_intr_mask,
51268- SA5_performant_intr_pending,
51269- SA5_performant_completed,
51270+ .submit_command = SA5_submit_command_no_read,
51271+ .set_intr_mask = SA5_performant_intr_mask,
51272+ .intr_pending = SA5_performant_intr_pending,
51273+ .command_completed = SA5_performant_completed,
51274 };
51275
51276 struct board_type {
51277diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51278index 1b3a094..068e683 100644
51279--- a/drivers/scsi/libfc/fc_exch.c
51280+++ b/drivers/scsi/libfc/fc_exch.c
51281@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51282 u16 pool_max_index;
51283
51284 struct {
51285- atomic_t no_free_exch;
51286- atomic_t no_free_exch_xid;
51287- atomic_t xid_not_found;
51288- atomic_t xid_busy;
51289- atomic_t seq_not_found;
51290- atomic_t non_bls_resp;
51291+ atomic_unchecked_t no_free_exch;
51292+ atomic_unchecked_t no_free_exch_xid;
51293+ atomic_unchecked_t xid_not_found;
51294+ atomic_unchecked_t xid_busy;
51295+ atomic_unchecked_t seq_not_found;
51296+ atomic_unchecked_t non_bls_resp;
51297 } stats;
51298 };
51299
51300@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51301 /* allocate memory for exchange */
51302 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51303 if (!ep) {
51304- atomic_inc(&mp->stats.no_free_exch);
51305+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51306 goto out;
51307 }
51308 memset(ep, 0, sizeof(*ep));
51309@@ -874,7 +874,7 @@ out:
51310 return ep;
51311 err:
51312 spin_unlock_bh(&pool->lock);
51313- atomic_inc(&mp->stats.no_free_exch_xid);
51314+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51315 mempool_free(ep, mp->ep_pool);
51316 return NULL;
51317 }
51318@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51319 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51320 ep = fc_exch_find(mp, xid);
51321 if (!ep) {
51322- atomic_inc(&mp->stats.xid_not_found);
51323+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51324 reject = FC_RJT_OX_ID;
51325 goto out;
51326 }
51327@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51328 ep = fc_exch_find(mp, xid);
51329 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51330 if (ep) {
51331- atomic_inc(&mp->stats.xid_busy);
51332+ atomic_inc_unchecked(&mp->stats.xid_busy);
51333 reject = FC_RJT_RX_ID;
51334 goto rel;
51335 }
51336@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51337 }
51338 xid = ep->xid; /* get our XID */
51339 } else if (!ep) {
51340- atomic_inc(&mp->stats.xid_not_found);
51341+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51342 reject = FC_RJT_RX_ID; /* XID not found */
51343 goto out;
51344 }
51345@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51346 } else {
51347 sp = &ep->seq;
51348 if (sp->id != fh->fh_seq_id) {
51349- atomic_inc(&mp->stats.seq_not_found);
51350+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51351 if (f_ctl & FC_FC_END_SEQ) {
51352 /*
51353 * Update sequence_id based on incoming last
51354@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51355
51356 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51357 if (!ep) {
51358- atomic_inc(&mp->stats.xid_not_found);
51359+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51360 goto out;
51361 }
51362 if (ep->esb_stat & ESB_ST_COMPLETE) {
51363- atomic_inc(&mp->stats.xid_not_found);
51364+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51365 goto rel;
51366 }
51367 if (ep->rxid == FC_XID_UNKNOWN)
51368 ep->rxid = ntohs(fh->fh_rx_id);
51369 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51370- atomic_inc(&mp->stats.xid_not_found);
51371+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51372 goto rel;
51373 }
51374 if (ep->did != ntoh24(fh->fh_s_id) &&
51375 ep->did != FC_FID_FLOGI) {
51376- atomic_inc(&mp->stats.xid_not_found);
51377+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51378 goto rel;
51379 }
51380 sof = fr_sof(fp);
51381@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51382 sp->ssb_stat |= SSB_ST_RESP;
51383 sp->id = fh->fh_seq_id;
51384 } else if (sp->id != fh->fh_seq_id) {
51385- atomic_inc(&mp->stats.seq_not_found);
51386+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51387 goto rel;
51388 }
51389
51390@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51391 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51392
51393 if (!sp)
51394- atomic_inc(&mp->stats.xid_not_found);
51395+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51396 else
51397- atomic_inc(&mp->stats.non_bls_resp);
51398+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51399
51400 fc_frame_free(fp);
51401 }
51402@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51403
51404 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51405 mp = ema->mp;
51406- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51407+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51408 st->fc_no_free_exch_xid +=
51409- atomic_read(&mp->stats.no_free_exch_xid);
51410- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51411- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51412- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51413- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51414+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51415+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51416+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51417+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51418+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51419 }
51420 }
51421 EXPORT_SYMBOL(fc_exch_update_stats);
51422diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51423index 9c706d8..d3e3ed2 100644
51424--- a/drivers/scsi/libsas/sas_ata.c
51425+++ b/drivers/scsi/libsas/sas_ata.c
51426@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51427 .postreset = ata_std_postreset,
51428 .error_handler = ata_std_error_handler,
51429 .post_internal_cmd = sas_ata_post_internal,
51430- .qc_defer = ata_std_qc_defer,
51431+ .qc_defer = ata_std_qc_defer,
51432 .qc_prep = ata_noop_qc_prep,
51433 .qc_issue = sas_ata_qc_issue,
51434 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51435diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51436index 434e903..5a4a79b 100644
51437--- a/drivers/scsi/lpfc/lpfc.h
51438+++ b/drivers/scsi/lpfc/lpfc.h
51439@@ -430,7 +430,7 @@ struct lpfc_vport {
51440 struct dentry *debug_nodelist;
51441 struct dentry *vport_debugfs_root;
51442 struct lpfc_debugfs_trc *disc_trc;
51443- atomic_t disc_trc_cnt;
51444+ atomic_unchecked_t disc_trc_cnt;
51445 #endif
51446 uint8_t stat_data_enabled;
51447 uint8_t stat_data_blocked;
51448@@ -880,8 +880,8 @@ struct lpfc_hba {
51449 struct timer_list fabric_block_timer;
51450 unsigned long bit_flags;
51451 #define FABRIC_COMANDS_BLOCKED 0
51452- atomic_t num_rsrc_err;
51453- atomic_t num_cmd_success;
51454+ atomic_unchecked_t num_rsrc_err;
51455+ atomic_unchecked_t num_cmd_success;
51456 unsigned long last_rsrc_error_time;
51457 unsigned long last_ramp_down_time;
51458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51459@@ -916,7 +916,7 @@ struct lpfc_hba {
51460
51461 struct dentry *debug_slow_ring_trc;
51462 struct lpfc_debugfs_trc *slow_ring_trc;
51463- atomic_t slow_ring_trc_cnt;
51464+ atomic_unchecked_t slow_ring_trc_cnt;
51465 /* iDiag debugfs sub-directory */
51466 struct dentry *idiag_root;
51467 struct dentry *idiag_pci_cfg;
51468diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51469index 5633e7d..8272114 100644
51470--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51471+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51472@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51473
51474 #include <linux/debugfs.h>
51475
51476-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51477+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51478 static unsigned long lpfc_debugfs_start_time = 0L;
51479
51480 /* iDiag */
51481@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51482 lpfc_debugfs_enable = 0;
51483
51484 len = 0;
51485- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51486+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51487 (lpfc_debugfs_max_disc_trc - 1);
51488 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51489 dtp = vport->disc_trc + i;
51490@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51491 lpfc_debugfs_enable = 0;
51492
51493 len = 0;
51494- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51495+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51496 (lpfc_debugfs_max_slow_ring_trc - 1);
51497 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51498 dtp = phba->slow_ring_trc + i;
51499@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51500 !vport || !vport->disc_trc)
51501 return;
51502
51503- index = atomic_inc_return(&vport->disc_trc_cnt) &
51504+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51505 (lpfc_debugfs_max_disc_trc - 1);
51506 dtp = vport->disc_trc + index;
51507 dtp->fmt = fmt;
51508 dtp->data1 = data1;
51509 dtp->data2 = data2;
51510 dtp->data3 = data3;
51511- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51512+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51513 dtp->jif = jiffies;
51514 #endif
51515 return;
51516@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51517 !phba || !phba->slow_ring_trc)
51518 return;
51519
51520- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51521+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51522 (lpfc_debugfs_max_slow_ring_trc - 1);
51523 dtp = phba->slow_ring_trc + index;
51524 dtp->fmt = fmt;
51525 dtp->data1 = data1;
51526 dtp->data2 = data2;
51527 dtp->data3 = data3;
51528- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51529+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51530 dtp->jif = jiffies;
51531 #endif
51532 return;
51533@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51534 "slow_ring buffer\n");
51535 goto debug_failed;
51536 }
51537- atomic_set(&phba->slow_ring_trc_cnt, 0);
51538+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51539 memset(phba->slow_ring_trc, 0,
51540 (sizeof(struct lpfc_debugfs_trc) *
51541 lpfc_debugfs_max_slow_ring_trc));
51542@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51543 "buffer\n");
51544 goto debug_failed;
51545 }
51546- atomic_set(&vport->disc_trc_cnt, 0);
51547+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51548
51549 snprintf(name, sizeof(name), "discovery_trace");
51550 vport->debug_disc_trc =
51551diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51552index 0b2c53a..aec2b45 100644
51553--- a/drivers/scsi/lpfc/lpfc_init.c
51554+++ b/drivers/scsi/lpfc/lpfc_init.c
51555@@ -11290,8 +11290,10 @@ lpfc_init(void)
51556 "misc_register returned with status %d", error);
51557
51558 if (lpfc_enable_npiv) {
51559- lpfc_transport_functions.vport_create = lpfc_vport_create;
51560- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51561+ pax_open_kernel();
51562+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51563+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51564+ pax_close_kernel();
51565 }
51566 lpfc_transport_template =
51567 fc_attach_transport(&lpfc_transport_functions);
51568diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51569index 4f9222e..f1850e3 100644
51570--- a/drivers/scsi/lpfc/lpfc_scsi.c
51571+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51572@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51573 unsigned long expires;
51574
51575 spin_lock_irqsave(&phba->hbalock, flags);
51576- atomic_inc(&phba->num_rsrc_err);
51577+ atomic_inc_unchecked(&phba->num_rsrc_err);
51578 phba->last_rsrc_error_time = jiffies;
51579
51580 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51581@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51582 unsigned long num_rsrc_err, num_cmd_success;
51583 int i;
51584
51585- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51586- num_cmd_success = atomic_read(&phba->num_cmd_success);
51587+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51588+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51589
51590 /*
51591 * The error and success command counters are global per
51592@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51593 }
51594 }
51595 lpfc_destroy_vport_work_array(phba, vports);
51596- atomic_set(&phba->num_rsrc_err, 0);
51597- atomic_set(&phba->num_cmd_success, 0);
51598+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51599+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51600 }
51601
51602 /**
51603diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51604index 3f26147..ee8efd1 100644
51605--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51606+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51607@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
51608 {
51609 struct scsi_device *sdev = to_scsi_device(dev);
51610 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51611- static struct _raid_device *raid_device;
51612+ struct _raid_device *raid_device;
51613 unsigned long flags;
51614 Mpi2RaidVolPage0_t vol_pg0;
51615 Mpi2ConfigReply_t mpi_reply;
51616@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
51617 {
51618 struct scsi_device *sdev = to_scsi_device(dev);
51619 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51620- static struct _raid_device *raid_device;
51621+ struct _raid_device *raid_device;
51622 unsigned long flags;
51623 Mpi2RaidVolPage0_t vol_pg0;
51624 Mpi2ConfigReply_t mpi_reply;
51625@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51626 Mpi2EventDataIrOperationStatus_t *event_data =
51627 (Mpi2EventDataIrOperationStatus_t *)
51628 fw_event->event_data;
51629- static struct _raid_device *raid_device;
51630+ struct _raid_device *raid_device;
51631 unsigned long flags;
51632 u16 handle;
51633
51634@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51635 u64 sas_address;
51636 struct _sas_device *sas_device;
51637 struct _sas_node *expander_device;
51638- static struct _raid_device *raid_device;
51639+ struct _raid_device *raid_device;
51640 u8 retry_count;
51641 unsigned long flags;
51642
51643diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51644index ed31d8c..ab856b3 100644
51645--- a/drivers/scsi/pmcraid.c
51646+++ b/drivers/scsi/pmcraid.c
51647@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51648 res->scsi_dev = scsi_dev;
51649 scsi_dev->hostdata = res;
51650 res->change_detected = 0;
51651- atomic_set(&res->read_failures, 0);
51652- atomic_set(&res->write_failures, 0);
51653+ atomic_set_unchecked(&res->read_failures, 0);
51654+ atomic_set_unchecked(&res->write_failures, 0);
51655 rc = 0;
51656 }
51657 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51658@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51659
51660 /* If this was a SCSI read/write command keep count of errors */
51661 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51662- atomic_inc(&res->read_failures);
51663+ atomic_inc_unchecked(&res->read_failures);
51664 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51665- atomic_inc(&res->write_failures);
51666+ atomic_inc_unchecked(&res->write_failures);
51667
51668 if (!RES_IS_GSCSI(res->cfg_entry) &&
51669 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51670@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
51671 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51672 * hrrq_id assigned here in queuecommand
51673 */
51674- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51675+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51676 pinstance->num_hrrq;
51677 cmd->cmd_done = pmcraid_io_done;
51678
51679@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
51680 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51681 * hrrq_id assigned here in queuecommand
51682 */
51683- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51684+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51685 pinstance->num_hrrq;
51686
51687 if (request_size) {
51688@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51689
51690 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51691 /* add resources only after host is added into system */
51692- if (!atomic_read(&pinstance->expose_resources))
51693+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51694 return;
51695
51696 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51697@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51698 init_waitqueue_head(&pinstance->reset_wait_q);
51699
51700 atomic_set(&pinstance->outstanding_cmds, 0);
51701- atomic_set(&pinstance->last_message_id, 0);
51702- atomic_set(&pinstance->expose_resources, 0);
51703+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51704+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51705
51706 INIT_LIST_HEAD(&pinstance->free_res_q);
51707 INIT_LIST_HEAD(&pinstance->used_res_q);
51708@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51709 /* Schedule worker thread to handle CCN and take care of adding and
51710 * removing devices to OS
51711 */
51712- atomic_set(&pinstance->expose_resources, 1);
51713+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51714 schedule_work(&pinstance->worker_q);
51715 return rc;
51716
51717diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51718index e1d150f..6c6df44 100644
51719--- a/drivers/scsi/pmcraid.h
51720+++ b/drivers/scsi/pmcraid.h
51721@@ -748,7 +748,7 @@ struct pmcraid_instance {
51722 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51723
51724 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51725- atomic_t last_message_id;
51726+ atomic_unchecked_t last_message_id;
51727
51728 /* configuration table */
51729 struct pmcraid_config_table *cfg_table;
51730@@ -777,7 +777,7 @@ struct pmcraid_instance {
51731 atomic_t outstanding_cmds;
51732
51733 /* should add/delete resources to mid-layer now ?*/
51734- atomic_t expose_resources;
51735+ atomic_unchecked_t expose_resources;
51736
51737
51738
51739@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51740 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51741 };
51742 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51743- atomic_t read_failures; /* count of failed READ commands */
51744- atomic_t write_failures; /* count of failed WRITE commands */
51745+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51746+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51747
51748 /* To indicate add/delete/modify during CCN */
51749 u8 change_detected;
51750diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51751index 82b92c4..3178171 100644
51752--- a/drivers/scsi/qla2xxx/qla_attr.c
51753+++ b/drivers/scsi/qla2xxx/qla_attr.c
51754@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51755 return 0;
51756 }
51757
51758-struct fc_function_template qla2xxx_transport_functions = {
51759+fc_function_template_no_const qla2xxx_transport_functions = {
51760
51761 .show_host_node_name = 1,
51762 .show_host_port_name = 1,
51763@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51764 .bsg_timeout = qla24xx_bsg_timeout,
51765 };
51766
51767-struct fc_function_template qla2xxx_transport_vport_functions = {
51768+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51769
51770 .show_host_node_name = 1,
51771 .show_host_port_name = 1,
51772diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51773index 7686bfe..4710893 100644
51774--- a/drivers/scsi/qla2xxx/qla_gbl.h
51775+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51776@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51777 struct device_attribute;
51778 extern struct device_attribute *qla2x00_host_attrs[];
51779 struct fc_function_template;
51780-extern struct fc_function_template qla2xxx_transport_functions;
51781-extern struct fc_function_template qla2xxx_transport_vport_functions;
51782+extern fc_function_template_no_const qla2xxx_transport_functions;
51783+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51784 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51785 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51786 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51787diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51788index cce1cbc..5b9f0fe 100644
51789--- a/drivers/scsi/qla2xxx/qla_os.c
51790+++ b/drivers/scsi/qla2xxx/qla_os.c
51791@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51792 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51793 /* Ok, a 64bit DMA mask is applicable. */
51794 ha->flags.enable_64bit_addressing = 1;
51795- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51796- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51797+ pax_open_kernel();
51798+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51799+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51800+ pax_close_kernel();
51801 return;
51802 }
51803 }
51804diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51805index 8f6d0fb..1b21097 100644
51806--- a/drivers/scsi/qla4xxx/ql4_def.h
51807+++ b/drivers/scsi/qla4xxx/ql4_def.h
51808@@ -305,7 +305,7 @@ struct ddb_entry {
51809 * (4000 only) */
51810 atomic_t relogin_timer; /* Max Time to wait for
51811 * relogin to complete */
51812- atomic_t relogin_retry_count; /* Num of times relogin has been
51813+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51814 * retried */
51815 uint32_t default_time2wait; /* Default Min time between
51816 * relogins (+aens) */
51817diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51818index 6d25879..3031a9f 100644
51819--- a/drivers/scsi/qla4xxx/ql4_os.c
51820+++ b/drivers/scsi/qla4xxx/ql4_os.c
51821@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51822 */
51823 if (!iscsi_is_session_online(cls_sess)) {
51824 /* Reset retry relogin timer */
51825- atomic_inc(&ddb_entry->relogin_retry_count);
51826+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51827 DEBUG2(ql4_printk(KERN_INFO, ha,
51828 "%s: index[%d] relogin timed out-retrying"
51829 " relogin (%d), retry (%d)\n", __func__,
51830 ddb_entry->fw_ddb_index,
51831- atomic_read(&ddb_entry->relogin_retry_count),
51832+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51833 ddb_entry->default_time2wait + 4));
51834 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51835 atomic_set(&ddb_entry->retry_relogin_timer,
51836@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51837
51838 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51839 atomic_set(&ddb_entry->relogin_timer, 0);
51840- atomic_set(&ddb_entry->relogin_retry_count, 0);
51841+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51842 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51843 ddb_entry->default_relogin_timeout =
51844 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51845diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51846index b1a2631..5bcd9c8 100644
51847--- a/drivers/scsi/scsi_lib.c
51848+++ b/drivers/scsi/scsi_lib.c
51849@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51850 shost = sdev->host;
51851 scsi_init_cmd_errh(cmd);
51852 cmd->result = DID_NO_CONNECT << 16;
51853- atomic_inc(&cmd->device->iorequest_cnt);
51854+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51855
51856 /*
51857 * SCSI request completion path will do scsi_device_unbusy(),
51858@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
51859
51860 INIT_LIST_HEAD(&cmd->eh_entry);
51861
51862- atomic_inc(&cmd->device->iodone_cnt);
51863+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51864 if (cmd->result)
51865- atomic_inc(&cmd->device->ioerr_cnt);
51866+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51867
51868 disposition = scsi_decide_disposition(cmd);
51869 if (disposition != SUCCESS &&
51870@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51871 struct Scsi_Host *host = cmd->device->host;
51872 int rtn = 0;
51873
51874- atomic_inc(&cmd->device->iorequest_cnt);
51875+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51876
51877 /* check if the device is still usable */
51878 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51879diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51880index 1ac38e7..6acc656 100644
51881--- a/drivers/scsi/scsi_sysfs.c
51882+++ b/drivers/scsi/scsi_sysfs.c
51883@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51884 char *buf) \
51885 { \
51886 struct scsi_device *sdev = to_scsi_device(dev); \
51887- unsigned long long count = atomic_read(&sdev->field); \
51888+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51889 return snprintf(buf, 20, "0x%llx\n", count); \
51890 } \
51891 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51892diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51893index 5d6f348..18778a6b 100644
51894--- a/drivers/scsi/scsi_transport_fc.c
51895+++ b/drivers/scsi/scsi_transport_fc.c
51896@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51897 * Netlink Infrastructure
51898 */
51899
51900-static atomic_t fc_event_seq;
51901+static atomic_unchecked_t fc_event_seq;
51902
51903 /**
51904 * fc_get_event_number - Obtain the next sequential FC event number
51905@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51906 u32
51907 fc_get_event_number(void)
51908 {
51909- return atomic_add_return(1, &fc_event_seq);
51910+ return atomic_add_return_unchecked(1, &fc_event_seq);
51911 }
51912 EXPORT_SYMBOL(fc_get_event_number);
51913
51914@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51915 {
51916 int error;
51917
51918- atomic_set(&fc_event_seq, 0);
51919+ atomic_set_unchecked(&fc_event_seq, 0);
51920
51921 error = transport_class_register(&fc_host_class);
51922 if (error)
51923@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51924 char *cp;
51925
51926 *val = simple_strtoul(buf, &cp, 0);
51927- if ((*cp && (*cp != '\n')) || (*val < 0))
51928+ if (*cp && (*cp != '\n'))
51929 return -EINVAL;
51930 /*
51931 * Check for overflow; dev_loss_tmo is u32
51932diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51933index 67d43e3..8cee73c 100644
51934--- a/drivers/scsi/scsi_transport_iscsi.c
51935+++ b/drivers/scsi/scsi_transport_iscsi.c
51936@@ -79,7 +79,7 @@ struct iscsi_internal {
51937 struct transport_container session_cont;
51938 };
51939
51940-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51941+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51942 static struct workqueue_struct *iscsi_eh_timer_workq;
51943
51944 static DEFINE_IDA(iscsi_sess_ida);
51945@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51946 int err;
51947
51948 ihost = shost->shost_data;
51949- session->sid = atomic_add_return(1, &iscsi_session_nr);
51950+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51951
51952 if (target_id == ISCSI_MAX_TARGET) {
51953 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51954@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51955 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51956 ISCSI_TRANSPORT_VERSION);
51957
51958- atomic_set(&iscsi_session_nr, 0);
51959+ atomic_set_unchecked(&iscsi_session_nr, 0);
51960
51961 err = class_register(&iscsi_transport_class);
51962 if (err)
51963diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51964index ae45bd9..c32a586 100644
51965--- a/drivers/scsi/scsi_transport_srp.c
51966+++ b/drivers/scsi/scsi_transport_srp.c
51967@@ -35,7 +35,7 @@
51968 #include "scsi_priv.h"
51969
51970 struct srp_host_attrs {
51971- atomic_t next_port_id;
51972+ atomic_unchecked_t next_port_id;
51973 };
51974 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51975
51976@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51977 struct Scsi_Host *shost = dev_to_shost(dev);
51978 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51979
51980- atomic_set(&srp_host->next_port_id, 0);
51981+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51982 return 0;
51983 }
51984
51985@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51986 rport_fast_io_fail_timedout);
51987 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51988
51989- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51990+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51991 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51992
51993 transport_setup_device(&rport->dev);
51994diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51995index 6b78476..d40476f 100644
51996--- a/drivers/scsi/sd.c
51997+++ b/drivers/scsi/sd.c
51998@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51999 sdkp->disk = gd;
52000 sdkp->index = index;
52001 atomic_set(&sdkp->openers, 0);
52002- atomic_set(&sdkp->device->ioerr_cnt, 0);
52003+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52004
52005 if (!sdp->request_queue->rq_timeout) {
52006 if (sdp->type != TYPE_MOD)
52007diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52008index 2270bd5..98408a5 100644
52009--- a/drivers/scsi/sg.c
52010+++ b/drivers/scsi/sg.c
52011@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52012 sdp->disk->disk_name,
52013 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52014 NULL,
52015- (char *)arg);
52016+ (char __user *)arg);
52017 case BLKTRACESTART:
52018 return blk_trace_startstop(sdp->device->request_queue, 1);
52019 case BLKTRACESTOP:
52020diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52021index c0d660f..24a5854 100644
52022--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52023+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52024@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52025 return i;
52026 }
52027
52028-static struct bin_attribute fuse_bin_attr = {
52029+static bin_attribute_no_const fuse_bin_attr = {
52030 .attr = { .name = "fuse", .mode = S_IRUGO, },
52031 .read = fuse_read,
52032 };
52033diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52034index 57a1950..ae54e21 100644
52035--- a/drivers/spi/spi.c
52036+++ b/drivers/spi/spi.c
52037@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
52038 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52039
52040 /* portable code must never pass more than 32 bytes */
52041-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52042+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52043
52044 static u8 *buf;
52045
52046diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52047index b41429f..2de5373 100644
52048--- a/drivers/staging/android/timed_output.c
52049+++ b/drivers/staging/android/timed_output.c
52050@@ -25,7 +25,7 @@
52051 #include "timed_output.h"
52052
52053 static struct class *timed_output_class;
52054-static atomic_t device_count;
52055+static atomic_unchecked_t device_count;
52056
52057 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52058 char *buf)
52059@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52060 timed_output_class = class_create(THIS_MODULE, "timed_output");
52061 if (IS_ERR(timed_output_class))
52062 return PTR_ERR(timed_output_class);
52063- atomic_set(&device_count, 0);
52064+ atomic_set_unchecked(&device_count, 0);
52065 timed_output_class->dev_groups = timed_output_groups;
52066 }
52067
52068@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52069 if (ret < 0)
52070 return ret;
52071
52072- tdev->index = atomic_inc_return(&device_count);
52073+ tdev->index = atomic_inc_return_unchecked(&device_count);
52074 tdev->dev = device_create(timed_output_class, NULL,
52075 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52076 if (IS_ERR(tdev->dev))
52077diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
52078index 727640e..55bf61c 100644
52079--- a/drivers/staging/comedi/comedi_fops.c
52080+++ b/drivers/staging/comedi/comedi_fops.c
52081@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
52082 }
52083 cfp->last_attached = dev->attached;
52084 cfp->last_detach_count = dev->detach_count;
52085- ACCESS_ONCE(cfp->read_subdev) = read_s;
52086- ACCESS_ONCE(cfp->write_subdev) = write_s;
52087+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
52088+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
52089 }
52090
52091 static void comedi_file_check(struct file *file)
52092@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52093 !(s_old->async->cmd.flags & CMDF_WRITE))
52094 return -EBUSY;
52095
52096- ACCESS_ONCE(cfp->read_subdev) = s_new;
52097+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
52098 return 0;
52099 }
52100
52101@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52102 (s_old->async->cmd.flags & CMDF_WRITE))
52103 return -EBUSY;
52104
52105- ACCESS_ONCE(cfp->write_subdev) = s_new;
52106+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
52107 return 0;
52108 }
52109
52110diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
52111index 37dcf7e..f3c2016 100644
52112--- a/drivers/staging/fbtft/fbtft-core.c
52113+++ b/drivers/staging/fbtft/fbtft-core.c
52114@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
52115 {
52116 struct fb_info *info;
52117 struct fbtft_par *par;
52118- struct fb_ops *fbops = NULL;
52119+ fb_ops_no_const *fbops = NULL;
52120 struct fb_deferred_io *fbdefio = NULL;
52121 struct fbtft_platform_data *pdata = dev->platform_data;
52122 u8 *vmem = NULL;
52123diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
52124index 0dbf3f9..fed0063 100644
52125--- a/drivers/staging/fbtft/fbtft.h
52126+++ b/drivers/staging/fbtft/fbtft.h
52127@@ -106,7 +106,7 @@ struct fbtft_ops {
52128
52129 int (*set_var)(struct fbtft_par *par);
52130 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
52131-};
52132+} __no_const;
52133
52134 /**
52135 * struct fbtft_display - Describes the display properties
52136diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
52137index 001348c..cfaac8a 100644
52138--- a/drivers/staging/gdm724x/gdm_tty.c
52139+++ b/drivers/staging/gdm724x/gdm_tty.c
52140@@ -44,7 +44,7 @@
52141 #define gdm_tty_send_control(n, r, v, d, l) (\
52142 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
52143
52144-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
52145+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
52146
52147 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52148 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52149diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
52150index d23c3c2..eb63c81 100644
52151--- a/drivers/staging/i2o/i2o.h
52152+++ b/drivers/staging/i2o/i2o.h
52153@@ -565,7 +565,7 @@ struct i2o_controller {
52154 struct i2o_device *exec; /* Executive */
52155 #if BITS_PER_LONG == 64
52156 spinlock_t context_list_lock; /* lock for context_list */
52157- atomic_t context_list_counter; /* needed for unique contexts */
52158+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
52159 struct list_head context_list; /* list of context id's
52160 and pointers */
52161 #endif
52162diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
52163index ad84f33..c5bdf65 100644
52164--- a/drivers/staging/i2o/i2o_proc.c
52165+++ b/drivers/staging/i2o/i2o_proc.c
52166@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
52167 "Array Controller Device"
52168 };
52169
52170-static char *chtostr(char *tmp, u8 *chars, int n)
52171-{
52172- tmp[0] = 0;
52173- return strncat(tmp, (char *)chars, n);
52174-}
52175-
52176 static int i2o_report_query_status(struct seq_file *seq, int block_status,
52177 char *group)
52178 {
52179@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
52180 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
52181 {
52182 struct i2o_controller *c = (struct i2o_controller *)seq->private;
52183- static u32 work32[5];
52184- static u8 *work8 = (u8 *) work32;
52185- static u16 *work16 = (u16 *) work32;
52186+ u32 work32[5];
52187+ u8 *work8 = (u8 *) work32;
52188+ u16 *work16 = (u16 *) work32;
52189 int token;
52190 u32 hwcap;
52191
52192@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
52193 } *result;
52194
52195 i2o_exec_execute_ddm_table ddm_table;
52196- char tmp[28 + 1];
52197
52198 result = kmalloc(sizeof(*result), GFP_KERNEL);
52199 if (!result)
52200@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
52201
52202 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
52203 seq_printf(seq, "%-#8x", ddm_table.module_id);
52204- seq_printf(seq, "%-29s",
52205- chtostr(tmp, ddm_table.module_name_version, 28));
52206+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
52207 seq_printf(seq, "%9d ", ddm_table.data_size);
52208 seq_printf(seq, "%8d", ddm_table.code_size);
52209
52210@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
52211
52212 i2o_driver_result_table *result;
52213 i2o_driver_store_table *dst;
52214- char tmp[28 + 1];
52215
52216 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
52217 if (result == NULL)
52218@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
52219
52220 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
52221 seq_printf(seq, "%-#8x", dst->module_id);
52222- seq_printf(seq, "%-29s",
52223- chtostr(tmp, dst->module_name_version, 28));
52224- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
52225+ seq_printf(seq, "%-.28s", dst->module_name_version);
52226+ seq_printf(seq, "%-.8s", dst->date);
52227 seq_printf(seq, "%8d ", dst->module_size);
52228 seq_printf(seq, "%8d ", dst->mpb_size);
52229 seq_printf(seq, "0x%04x", dst->module_flags);
52230@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
52231 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
52232 {
52233 struct i2o_device *d = (struct i2o_device *)seq->private;
52234- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
52235+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
52236 // == (allow) 512d bytes (max)
52237- static u16 *work16 = (u16 *) work32;
52238+ u16 *work16 = (u16 *) work32;
52239 int token;
52240- char tmp[16 + 1];
52241
52242 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
52243
52244@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
52245 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
52246 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
52247 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
52248- seq_printf(seq, "Vendor info : %s\n",
52249- chtostr(tmp, (u8 *) (work32 + 2), 16));
52250- seq_printf(seq, "Product info : %s\n",
52251- chtostr(tmp, (u8 *) (work32 + 6), 16));
52252- seq_printf(seq, "Description : %s\n",
52253- chtostr(tmp, (u8 *) (work32 + 10), 16));
52254- seq_printf(seq, "Product rev. : %s\n",
52255- chtostr(tmp, (u8 *) (work32 + 14), 8));
52256+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
52257+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
52258+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
52259+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
52260
52261 seq_printf(seq, "Serial number : ");
52262 print_serial_number(seq, (u8 *) (work32 + 16),
52263@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
52264 u8 pad[256]; // allow up to 256 byte (max) serial number
52265 } result;
52266
52267- char tmp[24 + 1];
52268-
52269 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
52270
52271 if (token < 0) {
52272@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
52273 }
52274
52275 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
52276- seq_printf(seq, "Module name : %s\n",
52277- chtostr(tmp, result.module_name, 24));
52278- seq_printf(seq, "Module revision : %s\n",
52279- chtostr(tmp, result.module_rev, 8));
52280+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
52281+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
52282
52283 seq_printf(seq, "Serial number : ");
52284 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
52285@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52286 u8 instance_number[4];
52287 } result;
52288
52289- char tmp[64 + 1];
52290-
52291 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
52292
52293 if (token < 0) {
52294@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52295 return 0;
52296 }
52297
52298- seq_printf(seq, "Device name : %s\n",
52299- chtostr(tmp, result.device_name, 64));
52300- seq_printf(seq, "Service name : %s\n",
52301- chtostr(tmp, result.service_name, 64));
52302- seq_printf(seq, "Physical name : %s\n",
52303- chtostr(tmp, result.physical_location, 64));
52304- seq_printf(seq, "Instance number : %s\n",
52305- chtostr(tmp, result.instance_number, 4));
52306+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
52307+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
52308+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
52309+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
52310
52311 return 0;
52312 }
52313@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52314 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
52315 {
52316 struct i2o_device *d = (struct i2o_device *)seq->private;
52317- static u32 work32[12];
52318- static u16 *work16 = (u16 *) work32;
52319- static u8 *work8 = (u8 *) work32;
52320+ u32 work32[12];
52321+ u16 *work16 = (u16 *) work32;
52322+ u8 *work8 = (u8 *) work32;
52323 int token;
52324
52325 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
52326diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
52327index 52334fc..d7f40b3 100644
52328--- a/drivers/staging/i2o/iop.c
52329+++ b/drivers/staging/i2o/iop.c
52330@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
52331
52332 spin_lock_irqsave(&c->context_list_lock, flags);
52333
52334- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
52335- atomic_inc(&c->context_list_counter);
52336+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
52337+ atomic_inc_unchecked(&c->context_list_counter);
52338
52339- entry->context = atomic_read(&c->context_list_counter);
52340+ entry->context = atomic_read_unchecked(&c->context_list_counter);
52341
52342 list_add(&entry->list, &c->context_list);
52343
52344@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
52345
52346 #if BITS_PER_LONG == 64
52347 spin_lock_init(&c->context_list_lock);
52348- atomic_set(&c->context_list_counter, 0);
52349+ atomic_set_unchecked(&c->context_list_counter, 0);
52350 INIT_LIST_HEAD(&c->context_list);
52351 #endif
52352
52353diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52354index 463da07..e791ce9 100644
52355--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52356+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52357@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52358 return 0;
52359 }
52360
52361-sfw_test_client_ops_t brw_test_client;
52362-void brw_init_test_client(void)
52363-{
52364- brw_test_client.tso_init = brw_client_init;
52365- brw_test_client.tso_fini = brw_client_fini;
52366- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52367- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52368+sfw_test_client_ops_t brw_test_client = {
52369+ .tso_init = brw_client_init,
52370+ .tso_fini = brw_client_fini,
52371+ .tso_prep_rpc = brw_client_prep_rpc,
52372+ .tso_done_rpc = brw_client_done_rpc,
52373 };
52374
52375 srpc_service_t brw_test_service;
52376diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52377index 5709148..ccd9e0d 100644
52378--- a/drivers/staging/lustre/lnet/selftest/framework.c
52379+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52380@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52381
52382 extern sfw_test_client_ops_t ping_test_client;
52383 extern srpc_service_t ping_test_service;
52384-extern void ping_init_test_client(void);
52385 extern void ping_init_test_service(void);
52386
52387 extern sfw_test_client_ops_t brw_test_client;
52388 extern srpc_service_t brw_test_service;
52389-extern void brw_init_test_client(void);
52390 extern void brw_init_test_service(void);
52391
52392
52393@@ -1675,12 +1673,10 @@ sfw_startup (void)
52394 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52395 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52396
52397- brw_init_test_client();
52398 brw_init_test_service();
52399 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52400 LASSERT (rc == 0);
52401
52402- ping_init_test_client();
52403 ping_init_test_service();
52404 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52405 LASSERT (rc == 0);
52406diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52407index d8c0df6..5041cbb 100644
52408--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52409+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52410@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52411 return 0;
52412 }
52413
52414-sfw_test_client_ops_t ping_test_client;
52415-void ping_init_test_client(void)
52416-{
52417- ping_test_client.tso_init = ping_client_init;
52418- ping_test_client.tso_fini = ping_client_fini;
52419- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52420- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52421-}
52422+sfw_test_client_ops_t ping_test_client = {
52423+ .tso_init = ping_client_init,
52424+ .tso_fini = ping_client_fini,
52425+ .tso_prep_rpc = ping_client_prep_rpc,
52426+ .tso_done_rpc = ping_client_done_rpc,
52427+};
52428
52429 srpc_service_t ping_test_service;
52430 void ping_init_test_service(void)
52431diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52432index 83bc0a9..12ba00a 100644
52433--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52434+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52435@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52436 ldlm_completion_callback lcs_completion;
52437 ldlm_blocking_callback lcs_blocking;
52438 ldlm_glimpse_callback lcs_glimpse;
52439-};
52440+} __no_const;
52441
52442 /* ldlm_lockd.c */
52443 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52444diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52445index 2a88b80..62e7e5f 100644
52446--- a/drivers/staging/lustre/lustre/include/obd.h
52447+++ b/drivers/staging/lustre/lustre/include/obd.h
52448@@ -1362,7 +1362,7 @@ struct md_ops {
52449 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52450 * wrapper function in include/linux/obd_class.h.
52451 */
52452-};
52453+} __no_const;
52454
52455 struct lsm_operations {
52456 void (*lsm_free)(struct lov_stripe_md *);
52457diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52458index a4c252f..b21acac 100644
52459--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52460+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52461@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52462 int added = (mode == LCK_NL);
52463 int overlaps = 0;
52464 int splitted = 0;
52465- const struct ldlm_callback_suite null_cbs = { NULL };
52466+ const struct ldlm_callback_suite null_cbs = { };
52467
52468 CDEBUG(D_DLMTRACE,
52469 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52470diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52471index c539e37..743b213 100644
52472--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52473+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52474@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
52475 loff_t *ppos)
52476 {
52477 int rc, max_delay_cs;
52478- struct ctl_table dummy = *table;
52479+ ctl_table_no_const dummy = *table;
52480 long d;
52481
52482 dummy.data = &max_delay_cs;
52483@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
52484 loff_t *ppos)
52485 {
52486 int rc, min_delay_cs;
52487- struct ctl_table dummy = *table;
52488+ ctl_table_no_const dummy = *table;
52489 long d;
52490
52491 dummy.data = &min_delay_cs;
52492@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
52493 void __user *buffer, size_t *lenp, loff_t *ppos)
52494 {
52495 int rc, backoff;
52496- struct ctl_table dummy = *table;
52497+ ctl_table_no_const dummy = *table;
52498
52499 dummy.data = &backoff;
52500 dummy.proc_handler = &proc_dointvec;
52501diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52502index 7dc77dd..289d03e 100644
52503--- a/drivers/staging/lustre/lustre/libcfs/module.c
52504+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52505@@ -313,11 +313,11 @@ out:
52506
52507
52508 struct cfs_psdev_ops libcfs_psdev_ops = {
52509- libcfs_psdev_open,
52510- libcfs_psdev_release,
52511- NULL,
52512- NULL,
52513- libcfs_ioctl
52514+ .p_open = libcfs_psdev_open,
52515+ .p_close = libcfs_psdev_release,
52516+ .p_read = NULL,
52517+ .p_write = NULL,
52518+ .p_ioctl = libcfs_ioctl
52519 };
52520
52521 extern int insert_proc(void);
52522diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52523index 22667db..8b703b6 100644
52524--- a/drivers/staging/octeon/ethernet-rx.c
52525+++ b/drivers/staging/octeon/ethernet-rx.c
52526@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52527 /* Increment RX stats for virtual ports */
52528 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52529 #ifdef CONFIG_64BIT
52530- atomic64_add(1,
52531+ atomic64_add_unchecked(1,
52532 (atomic64_t *)&priv->stats.rx_packets);
52533- atomic64_add(skb->len,
52534+ atomic64_add_unchecked(skb->len,
52535 (atomic64_t *)&priv->stats.rx_bytes);
52536 #else
52537- atomic_add(1,
52538+ atomic_add_unchecked(1,
52539 (atomic_t *)&priv->stats.rx_packets);
52540- atomic_add(skb->len,
52541+ atomic_add_unchecked(skb->len,
52542 (atomic_t *)&priv->stats.rx_bytes);
52543 #endif
52544 }
52545@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52546 dev->name);
52547 */
52548 #ifdef CONFIG_64BIT
52549- atomic64_add(1,
52550+ atomic64_add_unchecked(1,
52551 (atomic64_t *)&priv->stats.rx_dropped);
52552 #else
52553- atomic_add(1,
52554+ atomic_add_unchecked(1,
52555 (atomic_t *)&priv->stats.rx_dropped);
52556 #endif
52557 dev_kfree_skb_irq(skb);
52558diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52559index 460e854..f926452 100644
52560--- a/drivers/staging/octeon/ethernet.c
52561+++ b/drivers/staging/octeon/ethernet.c
52562@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52563 * since the RX tasklet also increments it.
52564 */
52565 #ifdef CONFIG_64BIT
52566- atomic64_add(rx_status.dropped_packets,
52567- (atomic64_t *)&priv->stats.rx_dropped);
52568+ atomic64_add_unchecked(rx_status.dropped_packets,
52569+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52570 #else
52571- atomic_add(rx_status.dropped_packets,
52572- (atomic_t *)&priv->stats.rx_dropped);
52573+ atomic_add_unchecked(rx_status.dropped_packets,
52574+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52575 #endif
52576 }
52577
52578diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52579index 3b476d8..f522d68 100644
52580--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52581+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52582@@ -225,7 +225,7 @@ struct hal_ops {
52583
52584 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52585 void (*hal_reset_security_engine)(struct adapter *adapter);
52586-};
52587+} __no_const;
52588
52589 enum rt_eeprom_type {
52590 EEPROM_93C46,
52591diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52592index 070cc03..6806e37 100644
52593--- a/drivers/staging/rtl8712/rtl871x_io.h
52594+++ b/drivers/staging/rtl8712/rtl871x_io.h
52595@@ -108,7 +108,7 @@ struct _io_ops {
52596 u8 *pmem);
52597 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52598 u8 *pmem);
52599-};
52600+} __no_const;
52601
52602 struct io_req {
52603 struct list_head list;
52604diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52605index 98f3ba4..c6a7fce 100644
52606--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52607+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52608@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
52609 void (*device_resume)(ulong bus_no, ulong dev_no);
52610 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52611 ulong *max_size);
52612-};
52613+} __no_const;
52614
52615 /* These functions live inside visorchipset, and will be called to indicate
52616 * responses to specific events (by code outside of visorchipset).
52617@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
52618 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52619 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52620 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52621-};
52622+} __no_const;
52623
52624 /** Register functions (in the bus driver) to get called by visorchipset
52625 * whenever a bus or device appears for which this service partition is
52626diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52627index 9512af6..045bf5a 100644
52628--- a/drivers/target/sbp/sbp_target.c
52629+++ b/drivers/target/sbp/sbp_target.c
52630@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52631
52632 #define SESSION_MAINTENANCE_INTERVAL HZ
52633
52634-static atomic_t login_id = ATOMIC_INIT(0);
52635+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52636
52637 static void session_maintenance_work(struct work_struct *);
52638 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52639@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52640 login->lun = se_lun;
52641 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52642 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52643- login->login_id = atomic_inc_return(&login_id);
52644+ login->login_id = atomic_inc_return_unchecked(&login_id);
52645
52646 login->tgt_agt = sbp_target_agent_register(login);
52647 if (IS_ERR(login->tgt_agt)) {
52648diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52649index 7faa6ae..ae6c410 100644
52650--- a/drivers/target/target_core_device.c
52651+++ b/drivers/target/target_core_device.c
52652@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52653 spin_lock_init(&dev->se_tmr_lock);
52654 spin_lock_init(&dev->qf_cmd_lock);
52655 sema_init(&dev->caw_sem, 1);
52656- atomic_set(&dev->dev_ordered_id, 0);
52657+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52658 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52659 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52660 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52661diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52662index ac3cbab..f0d1dd2 100644
52663--- a/drivers/target/target_core_transport.c
52664+++ b/drivers/target/target_core_transport.c
52665@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52666 * Used to determine when ORDERED commands should go from
52667 * Dormant to Active status.
52668 */
52669- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52670+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52671 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52672 cmd->se_ordered_id, cmd->sam_task_attr,
52673 dev->transport->name);
52674diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52675index 031018e..90981a1 100644
52676--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52677+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52678@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52679 platform_set_drvdata(pdev, priv);
52680
52681 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52682- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52683- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52684+ pax_open_kernel();
52685+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52686+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52687+ pax_close_kernel();
52688 }
52689 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52690 priv, &int3400_thermal_ops,
52691diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52692index 668fb1b..2737bbe 100644
52693--- a/drivers/thermal/of-thermal.c
52694+++ b/drivers/thermal/of-thermal.c
52695@@ -31,6 +31,7 @@
52696 #include <linux/export.h>
52697 #include <linux/string.h>
52698 #include <linux/thermal.h>
52699+#include <linux/mm.h>
52700
52701 #include "thermal_core.h"
52702
52703@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52704 tz->ops = ops;
52705 tz->sensor_data = data;
52706
52707- tzd->ops->get_temp = of_thermal_get_temp;
52708- tzd->ops->get_trend = of_thermal_get_trend;
52709- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52710+ pax_open_kernel();
52711+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52712+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52713+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52714+ pax_close_kernel();
52715 mutex_unlock(&tzd->lock);
52716
52717 return tzd;
52718@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52719 return;
52720
52721 mutex_lock(&tzd->lock);
52722- tzd->ops->get_temp = NULL;
52723- tzd->ops->get_trend = NULL;
52724- tzd->ops->set_emul_temp = NULL;
52725+ pax_open_kernel();
52726+ *(void **)&tzd->ops->get_temp = NULL;
52727+ *(void **)&tzd->ops->get_trend = NULL;
52728+ *(void **)&tzd->ops->set_emul_temp = NULL;
52729+ pax_close_kernel();
52730
52731 tz->ops = NULL;
52732 tz->sensor_data = NULL;
52733diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
52734index 9ea3d9d..53e8792 100644
52735--- a/drivers/thermal/x86_pkg_temp_thermal.c
52736+++ b/drivers/thermal/x86_pkg_temp_thermal.c
52737@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
52738 return NOTIFY_OK;
52739 }
52740
52741-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
52742+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
52743 .notifier_call = pkg_temp_thermal_cpu_callback,
52744 };
52745
52746diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52747index fd66f57..48e6376 100644
52748--- a/drivers/tty/cyclades.c
52749+++ b/drivers/tty/cyclades.c
52750@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52751 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52752 info->port.count);
52753 #endif
52754- info->port.count++;
52755+ atomic_inc(&info->port.count);
52756 #ifdef CY_DEBUG_COUNT
52757 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52758- current->pid, info->port.count);
52759+ current->pid, atomic_read(&info->port.count));
52760 #endif
52761
52762 /*
52763@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52764 for (j = 0; j < cy_card[i].nports; j++) {
52765 info = &cy_card[i].ports[j];
52766
52767- if (info->port.count) {
52768+ if (atomic_read(&info->port.count)) {
52769 /* XXX is the ldisc num worth this? */
52770 struct tty_struct *tty;
52771 struct tty_ldisc *ld;
52772diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52773index 4fcec1d..5a036f7 100644
52774--- a/drivers/tty/hvc/hvc_console.c
52775+++ b/drivers/tty/hvc/hvc_console.c
52776@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52777
52778 spin_lock_irqsave(&hp->port.lock, flags);
52779 /* Check and then increment for fast path open. */
52780- if (hp->port.count++ > 0) {
52781+ if (atomic_inc_return(&hp->port.count) > 1) {
52782 spin_unlock_irqrestore(&hp->port.lock, flags);
52783 hvc_kick();
52784 return 0;
52785@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52786
52787 spin_lock_irqsave(&hp->port.lock, flags);
52788
52789- if (--hp->port.count == 0) {
52790+ if (atomic_dec_return(&hp->port.count) == 0) {
52791 spin_unlock_irqrestore(&hp->port.lock, flags);
52792 /* We are done with the tty pointer now. */
52793 tty_port_tty_set(&hp->port, NULL);
52794@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52795 */
52796 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52797 } else {
52798- if (hp->port.count < 0)
52799+ if (atomic_read(&hp->port.count) < 0)
52800 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52801- hp->vtermno, hp->port.count);
52802+ hp->vtermno, atomic_read(&hp->port.count));
52803 spin_unlock_irqrestore(&hp->port.lock, flags);
52804 }
52805 }
52806@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52807 * open->hangup case this can be called after the final close so prevent
52808 * that from happening for now.
52809 */
52810- if (hp->port.count <= 0) {
52811+ if (atomic_read(&hp->port.count) <= 0) {
52812 spin_unlock_irqrestore(&hp->port.lock, flags);
52813 return;
52814 }
52815
52816- hp->port.count = 0;
52817+ atomic_set(&hp->port.count, 0);
52818 spin_unlock_irqrestore(&hp->port.lock, flags);
52819 tty_port_tty_set(&hp->port, NULL);
52820
52821@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52822 return -EPIPE;
52823
52824 /* FIXME what's this (unprotected) check for? */
52825- if (hp->port.count <= 0)
52826+ if (atomic_read(&hp->port.count) <= 0)
52827 return -EIO;
52828
52829 spin_lock_irqsave(&hp->lock, flags);
52830diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52831index 81ff7e1..dfb7b71 100644
52832--- a/drivers/tty/hvc/hvcs.c
52833+++ b/drivers/tty/hvc/hvcs.c
52834@@ -83,6 +83,7 @@
52835 #include <asm/hvcserver.h>
52836 #include <asm/uaccess.h>
52837 #include <asm/vio.h>
52838+#include <asm/local.h>
52839
52840 /*
52841 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52842@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52843
52844 spin_lock_irqsave(&hvcsd->lock, flags);
52845
52846- if (hvcsd->port.count > 0) {
52847+ if (atomic_read(&hvcsd->port.count) > 0) {
52848 spin_unlock_irqrestore(&hvcsd->lock, flags);
52849 printk(KERN_INFO "HVCS: vterm state unchanged. "
52850 "The hvcs device node is still in use.\n");
52851@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52852 }
52853 }
52854
52855- hvcsd->port.count = 0;
52856+ atomic_set(&hvcsd->port.count, 0);
52857 hvcsd->port.tty = tty;
52858 tty->driver_data = hvcsd;
52859
52860@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52861 unsigned long flags;
52862
52863 spin_lock_irqsave(&hvcsd->lock, flags);
52864- hvcsd->port.count++;
52865+ atomic_inc(&hvcsd->port.count);
52866 hvcsd->todo_mask |= HVCS_SCHED_READ;
52867 spin_unlock_irqrestore(&hvcsd->lock, flags);
52868
52869@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52870 hvcsd = tty->driver_data;
52871
52872 spin_lock_irqsave(&hvcsd->lock, flags);
52873- if (--hvcsd->port.count == 0) {
52874+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52875
52876 vio_disable_interrupts(hvcsd->vdev);
52877
52878@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52879
52880 free_irq(irq, hvcsd);
52881 return;
52882- } else if (hvcsd->port.count < 0) {
52883+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52884 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52885 " is missmanaged.\n",
52886- hvcsd->vdev->unit_address, hvcsd->port.count);
52887+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52888 }
52889
52890 spin_unlock_irqrestore(&hvcsd->lock, flags);
52891@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52892
52893 spin_lock_irqsave(&hvcsd->lock, flags);
52894 /* Preserve this so that we know how many kref refs to put */
52895- temp_open_count = hvcsd->port.count;
52896+ temp_open_count = atomic_read(&hvcsd->port.count);
52897
52898 /*
52899 * Don't kref put inside the spinlock because the destruction
52900@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52901 tty->driver_data = NULL;
52902 hvcsd->port.tty = NULL;
52903
52904- hvcsd->port.count = 0;
52905+ atomic_set(&hvcsd->port.count, 0);
52906
52907 /* This will drop any buffered data on the floor which is OK in a hangup
52908 * scenario. */
52909@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52910 * the middle of a write operation? This is a crummy place to do this
52911 * but we want to keep it all in the spinlock.
52912 */
52913- if (hvcsd->port.count <= 0) {
52914+ if (atomic_read(&hvcsd->port.count) <= 0) {
52915 spin_unlock_irqrestore(&hvcsd->lock, flags);
52916 return -ENODEV;
52917 }
52918@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52919 {
52920 struct hvcs_struct *hvcsd = tty->driver_data;
52921
52922- if (!hvcsd || hvcsd->port.count <= 0)
52923+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52924 return 0;
52925
52926 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52927diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52928index 4190199..06d5bfa 100644
52929--- a/drivers/tty/hvc/hvsi.c
52930+++ b/drivers/tty/hvc/hvsi.c
52931@@ -85,7 +85,7 @@ struct hvsi_struct {
52932 int n_outbuf;
52933 uint32_t vtermno;
52934 uint32_t virq;
52935- atomic_t seqno; /* HVSI packet sequence number */
52936+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52937 uint16_t mctrl;
52938 uint8_t state; /* HVSI protocol state */
52939 uint8_t flags;
52940@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52941
52942 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52943 packet.hdr.len = sizeof(struct hvsi_query_response);
52944- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52945+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52946 packet.verb = VSV_SEND_VERSION_NUMBER;
52947 packet.u.version = HVSI_VERSION;
52948 packet.query_seqno = query_seqno+1;
52949@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52950
52951 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52952 packet.hdr.len = sizeof(struct hvsi_query);
52953- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52954+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52955 packet.verb = verb;
52956
52957 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52958@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52959 int wrote;
52960
52961 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52962- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52963+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52964 packet.hdr.len = sizeof(struct hvsi_control);
52965 packet.verb = VSV_SET_MODEM_CTL;
52966 packet.mask = HVSI_TSDTR;
52967@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52968 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52969
52970 packet.hdr.type = VS_DATA_PACKET_HEADER;
52971- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52972+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52973 packet.hdr.len = count + sizeof(struct hvsi_header);
52974 memcpy(&packet.data, buf, count);
52975
52976@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52977 struct hvsi_control packet __ALIGNED__;
52978
52979 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52980- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52981+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52982 packet.hdr.len = 6;
52983 packet.verb = VSV_CLOSE_PROTOCOL;
52984
52985@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52986
52987 tty_port_tty_set(&hp->port, tty);
52988 spin_lock_irqsave(&hp->lock, flags);
52989- hp->port.count++;
52990+ atomic_inc(&hp->port.count);
52991 atomic_set(&hp->seqno, 0);
52992 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52993 spin_unlock_irqrestore(&hp->lock, flags);
52994@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52995
52996 spin_lock_irqsave(&hp->lock, flags);
52997
52998- if (--hp->port.count == 0) {
52999+ if (atomic_dec_return(&hp->port.count) == 0) {
53000 tty_port_tty_set(&hp->port, NULL);
53001 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53002
53003@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53004
53005 spin_lock_irqsave(&hp->lock, flags);
53006 }
53007- } else if (hp->port.count < 0)
53008+ } else if (atomic_read(&hp->port.count) < 0)
53009 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53010- hp - hvsi_ports, hp->port.count);
53011+ hp - hvsi_ports, atomic_read(&hp->port.count));
53012
53013 spin_unlock_irqrestore(&hp->lock, flags);
53014 }
53015@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53016 tty_port_tty_set(&hp->port, NULL);
53017
53018 spin_lock_irqsave(&hp->lock, flags);
53019- hp->port.count = 0;
53020+ atomic_set(&hp->port.count, 0);
53021 hp->n_outbuf = 0;
53022 spin_unlock_irqrestore(&hp->lock, flags);
53023 }
53024diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53025index a270f04..7c77b5d 100644
53026--- a/drivers/tty/hvc/hvsi_lib.c
53027+++ b/drivers/tty/hvc/hvsi_lib.c
53028@@ -8,7 +8,7 @@
53029
53030 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53031 {
53032- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53033+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53034
53035 /* Assumes that always succeeds, works in practice */
53036 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53037@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53038
53039 /* Reset state */
53040 pv->established = 0;
53041- atomic_set(&pv->seqno, 0);
53042+ atomic_set_unchecked(&pv->seqno, 0);
53043
53044 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53045
53046diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53047index 345cebb..d5a1e9e 100644
53048--- a/drivers/tty/ipwireless/tty.c
53049+++ b/drivers/tty/ipwireless/tty.c
53050@@ -28,6 +28,7 @@
53051 #include <linux/tty_driver.h>
53052 #include <linux/tty_flip.h>
53053 #include <linux/uaccess.h>
53054+#include <asm/local.h>
53055
53056 #include "tty.h"
53057 #include "network.h"
53058@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53059 return -ENODEV;
53060
53061 mutex_lock(&tty->ipw_tty_mutex);
53062- if (tty->port.count == 0)
53063+ if (atomic_read(&tty->port.count) == 0)
53064 tty->tx_bytes_queued = 0;
53065
53066- tty->port.count++;
53067+ atomic_inc(&tty->port.count);
53068
53069 tty->port.tty = linux_tty;
53070 linux_tty->driver_data = tty;
53071@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53072
53073 static void do_ipw_close(struct ipw_tty *tty)
53074 {
53075- tty->port.count--;
53076-
53077- if (tty->port.count == 0) {
53078+ if (atomic_dec_return(&tty->port.count) == 0) {
53079 struct tty_struct *linux_tty = tty->port.tty;
53080
53081 if (linux_tty != NULL) {
53082@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53083 return;
53084
53085 mutex_lock(&tty->ipw_tty_mutex);
53086- if (tty->port.count == 0) {
53087+ if (atomic_read(&tty->port.count) == 0) {
53088 mutex_unlock(&tty->ipw_tty_mutex);
53089 return;
53090 }
53091@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53092
53093 mutex_lock(&tty->ipw_tty_mutex);
53094
53095- if (!tty->port.count) {
53096+ if (!atomic_read(&tty->port.count)) {
53097 mutex_unlock(&tty->ipw_tty_mutex);
53098 return;
53099 }
53100@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53101 return -ENODEV;
53102
53103 mutex_lock(&tty->ipw_tty_mutex);
53104- if (!tty->port.count) {
53105+ if (!atomic_read(&tty->port.count)) {
53106 mutex_unlock(&tty->ipw_tty_mutex);
53107 return -EINVAL;
53108 }
53109@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53110 if (!tty)
53111 return -ENODEV;
53112
53113- if (!tty->port.count)
53114+ if (!atomic_read(&tty->port.count))
53115 return -EINVAL;
53116
53117 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53118@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53119 if (!tty)
53120 return 0;
53121
53122- if (!tty->port.count)
53123+ if (!atomic_read(&tty->port.count))
53124 return 0;
53125
53126 return tty->tx_bytes_queued;
53127@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53128 if (!tty)
53129 return -ENODEV;
53130
53131- if (!tty->port.count)
53132+ if (!atomic_read(&tty->port.count))
53133 return -EINVAL;
53134
53135 return get_control_lines(tty);
53136@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53137 if (!tty)
53138 return -ENODEV;
53139
53140- if (!tty->port.count)
53141+ if (!atomic_read(&tty->port.count))
53142 return -EINVAL;
53143
53144 return set_control_lines(tty, set, clear);
53145@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53146 if (!tty)
53147 return -ENODEV;
53148
53149- if (!tty->port.count)
53150+ if (!atomic_read(&tty->port.count))
53151 return -EINVAL;
53152
53153 /* FIXME: Exactly how is the tty object locked here .. */
53154@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53155 * are gone */
53156 mutex_lock(&ttyj->ipw_tty_mutex);
53157 }
53158- while (ttyj->port.count)
53159+ while (atomic_read(&ttyj->port.count))
53160 do_ipw_close(ttyj);
53161 ipwireless_disassociate_network_ttys(network,
53162 ttyj->channel_idx);
53163diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53164index 14c54e0..1efd4f2 100644
53165--- a/drivers/tty/moxa.c
53166+++ b/drivers/tty/moxa.c
53167@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53168 }
53169
53170 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53171- ch->port.count++;
53172+ atomic_inc(&ch->port.count);
53173 tty->driver_data = ch;
53174 tty_port_tty_set(&ch->port, tty);
53175 mutex_lock(&ch->port.mutex);
53176diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53177index c434376..114ce13 100644
53178--- a/drivers/tty/n_gsm.c
53179+++ b/drivers/tty/n_gsm.c
53180@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53181 spin_lock_init(&dlci->lock);
53182 mutex_init(&dlci->mutex);
53183 dlci->fifo = &dlci->_fifo;
53184- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53185+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53186 kfree(dlci);
53187 return NULL;
53188 }
53189@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53190 struct gsm_dlci *dlci = tty->driver_data;
53191 struct tty_port *port = &dlci->port;
53192
53193- port->count++;
53194+ atomic_inc(&port->count);
53195 tty_port_tty_set(port, tty);
53196
53197 dlci->modem_rx = 0;
53198diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53199index cf6e0f2..4283167 100644
53200--- a/drivers/tty/n_tty.c
53201+++ b/drivers/tty/n_tty.c
53202@@ -116,7 +116,7 @@ struct n_tty_data {
53203 int minimum_to_wake;
53204
53205 /* consumer-published */
53206- size_t read_tail;
53207+ size_t read_tail __intentional_overflow(-1);
53208 size_t line_start;
53209
53210 /* protected by output lock */
53211@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53212 {
53213 *ops = tty_ldisc_N_TTY;
53214 ops->owner = NULL;
53215- ops->refcount = ops->flags = 0;
53216+ atomic_set(&ops->refcount, 0);
53217+ ops->flags = 0;
53218 }
53219 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53220diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53221index e72ee62..d977ad9 100644
53222--- a/drivers/tty/pty.c
53223+++ b/drivers/tty/pty.c
53224@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
53225 panic("Couldn't register Unix98 pts driver");
53226
53227 /* Now create the /dev/ptmx special device */
53228+ pax_open_kernel();
53229 tty_default_fops(&ptmx_fops);
53230- ptmx_fops.open = ptmx_open;
53231+ *(void **)&ptmx_fops.open = ptmx_open;
53232+ pax_close_kernel();
53233
53234 cdev_init(&ptmx_cdev, &ptmx_fops);
53235 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53236diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53237index c8dd8dc..dca6cfd 100644
53238--- a/drivers/tty/rocket.c
53239+++ b/drivers/tty/rocket.c
53240@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53241 tty->driver_data = info;
53242 tty_port_tty_set(port, tty);
53243
53244- if (port->count++ == 0) {
53245+ if (atomic_inc_return(&port->count) == 1) {
53246 atomic_inc(&rp_num_ports_open);
53247
53248 #ifdef ROCKET_DEBUG_OPEN
53249@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53250 #endif
53251 }
53252 #ifdef ROCKET_DEBUG_OPEN
53253- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53254+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53255 #endif
53256
53257 /*
53258@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53259 spin_unlock_irqrestore(&info->port.lock, flags);
53260 return;
53261 }
53262- if (info->port.count)
53263+ if (atomic_read(&info->port.count))
53264 atomic_dec(&rp_num_ports_open);
53265 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53266 spin_unlock_irqrestore(&info->port.lock, flags);
53267diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53268index aa28209..e08fb85 100644
53269--- a/drivers/tty/serial/ioc4_serial.c
53270+++ b/drivers/tty/serial/ioc4_serial.c
53271@@ -437,7 +437,7 @@ struct ioc4_soft {
53272 } is_intr_info[MAX_IOC4_INTR_ENTS];
53273
53274 /* Number of entries active in the above array */
53275- atomic_t is_num_intrs;
53276+ atomic_unchecked_t is_num_intrs;
53277 } is_intr_type[IOC4_NUM_INTR_TYPES];
53278
53279 /* is_ir_lock must be held while
53280@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53281 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53282 || (type == IOC4_OTHER_INTR_TYPE)));
53283
53284- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53285+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53286 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53287
53288 /* Save off the lower level interrupt handler */
53289@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53290
53291 soft = arg;
53292 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53293- num_intrs = (int)atomic_read(
53294+ num_intrs = (int)atomic_read_unchecked(
53295 &soft->is_intr_type[intr_type].is_num_intrs);
53296
53297 this_mir = this_ir = pending_intrs(soft, intr_type);
53298diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53299index 129dc5b..1da5bb8 100644
53300--- a/drivers/tty/serial/kgdb_nmi.c
53301+++ b/drivers/tty/serial/kgdb_nmi.c
53302@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53303 * I/O utilities that messages sent to the console will automatically
53304 * be displayed on the dbg_io.
53305 */
53306- dbg_io_ops->is_console = true;
53307+ pax_open_kernel();
53308+ *(int *)&dbg_io_ops->is_console = true;
53309+ pax_close_kernel();
53310
53311 return 0;
53312 }
53313diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53314index a260cde..6b2b5ce 100644
53315--- a/drivers/tty/serial/kgdboc.c
53316+++ b/drivers/tty/serial/kgdboc.c
53317@@ -24,8 +24,9 @@
53318 #define MAX_CONFIG_LEN 40
53319
53320 static struct kgdb_io kgdboc_io_ops;
53321+static struct kgdb_io kgdboc_io_ops_console;
53322
53323-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53324+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53325 static int configured = -1;
53326
53327 static char config[MAX_CONFIG_LEN];
53328@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53329 kgdboc_unregister_kbd();
53330 if (configured == 1)
53331 kgdb_unregister_io_module(&kgdboc_io_ops);
53332+ else if (configured == 2)
53333+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53334 }
53335
53336 static int configure_kgdboc(void)
53337@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53338 int err;
53339 char *cptr = config;
53340 struct console *cons;
53341+ int is_console = 0;
53342
53343 err = kgdboc_option_setup(config);
53344 if (err || !strlen(config) || isspace(config[0]))
53345 goto noconfig;
53346
53347 err = -ENODEV;
53348- kgdboc_io_ops.is_console = 0;
53349 kgdb_tty_driver = NULL;
53350
53351 kgdboc_use_kms = 0;
53352@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53353 int idx;
53354 if (cons->device && cons->device(cons, &idx) == p &&
53355 idx == tty_line) {
53356- kgdboc_io_ops.is_console = 1;
53357+ is_console = 1;
53358 break;
53359 }
53360 cons = cons->next;
53361@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53362 kgdb_tty_line = tty_line;
53363
53364 do_register:
53365- err = kgdb_register_io_module(&kgdboc_io_ops);
53366+ if (is_console) {
53367+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53368+ configured = 2;
53369+ } else {
53370+ err = kgdb_register_io_module(&kgdboc_io_ops);
53371+ configured = 1;
53372+ }
53373 if (err)
53374 goto noconfig;
53375
53376@@ -205,8 +214,6 @@ do_register:
53377 if (err)
53378 goto nmi_con_failed;
53379
53380- configured = 1;
53381-
53382 return 0;
53383
53384 nmi_con_failed:
53385@@ -223,7 +230,7 @@ noconfig:
53386 static int __init init_kgdboc(void)
53387 {
53388 /* Already configured? */
53389- if (configured == 1)
53390+ if (configured >= 1)
53391 return 0;
53392
53393 return configure_kgdboc();
53394@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53395 if (config[len - 1] == '\n')
53396 config[len - 1] = '\0';
53397
53398- if (configured == 1)
53399+ if (configured >= 1)
53400 cleanup_kgdboc();
53401
53402 /* Go and configure with the new params. */
53403@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53404 .post_exception = kgdboc_post_exp_handler,
53405 };
53406
53407+static struct kgdb_io kgdboc_io_ops_console = {
53408+ .name = "kgdboc",
53409+ .read_char = kgdboc_get_char,
53410+ .write_char = kgdboc_put_char,
53411+ .pre_exception = kgdboc_pre_exp_handler,
53412+ .post_exception = kgdboc_post_exp_handler,
53413+ .is_console = 1
53414+};
53415+
53416 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53417 /* This is only available if kgdboc is a built in for early debugging */
53418 static int __init kgdboc_early_init(char *opt)
53419diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53420index b73889c..9f74f0a 100644
53421--- a/drivers/tty/serial/msm_serial.c
53422+++ b/drivers/tty/serial/msm_serial.c
53423@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
53424 .cons = MSM_CONSOLE,
53425 };
53426
53427-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53428+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53429
53430 static const struct of_device_id msm_uartdm_table[] = {
53431 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53432@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53433 line = pdev->id;
53434
53435 if (line < 0)
53436- line = atomic_inc_return(&msm_uart_next_id) - 1;
53437+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53438
53439 if (unlikely(line < 0 || line >= UART_NR))
53440 return -ENXIO;
53441diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53442index cf08876..711e0bf 100644
53443--- a/drivers/tty/serial/samsung.c
53444+++ b/drivers/tty/serial/samsung.c
53445@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53446 ourport->tx_in_progress = 0;
53447 }
53448
53449+static int s3c64xx_serial_startup(struct uart_port *port);
53450 static int s3c24xx_serial_startup(struct uart_port *port)
53451 {
53452 struct s3c24xx_uart_port *ourport = to_ourport(port);
53453 int ret;
53454
53455+ /* Startup sequence is different for s3c64xx and higher SoC's */
53456+ if (s3c24xx_serial_has_interrupt_mask(port))
53457+ return s3c64xx_serial_startup(port);
53458+
53459 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53460 port, (unsigned long long)port->mapbase, port->membase);
53461
53462@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53463 /* setup info for port */
53464 port->dev = &platdev->dev;
53465
53466- /* Startup sequence is different for s3c64xx and higher SoC's */
53467- if (s3c24xx_serial_has_interrupt_mask(port))
53468- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53469-
53470 port->uartclk = 1;
53471
53472 if (cfg->uart_flags & UPF_CONS_FLOW) {
53473diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53474index 6a1055a..5ca9ad9 100644
53475--- a/drivers/tty/serial/serial_core.c
53476+++ b/drivers/tty/serial/serial_core.c
53477@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53478 state = drv->state + tty->index;
53479 port = &state->port;
53480 spin_lock_irq(&port->lock);
53481- --port->count;
53482+ atomic_dec(&port->count);
53483 spin_unlock_irq(&port->lock);
53484 return;
53485 }
53486@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53487
53488 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53489
53490- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53491+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53492 return;
53493
53494 /*
53495@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
53496 uart_flush_buffer(tty);
53497 uart_shutdown(tty, state);
53498 spin_lock_irqsave(&port->lock, flags);
53499- port->count = 0;
53500+ atomic_set(&port->count, 0);
53501 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53502 spin_unlock_irqrestore(&port->lock, flags);
53503 tty_port_tty_set(port, NULL);
53504@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53505 pr_debug("uart_open(%d) called\n", line);
53506
53507 spin_lock_irq(&port->lock);
53508- ++port->count;
53509+ atomic_inc(&port->count);
53510 spin_unlock_irq(&port->lock);
53511
53512 /*
53513diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53514index b799170..87dafd5 100644
53515--- a/drivers/tty/synclink.c
53516+++ b/drivers/tty/synclink.c
53517@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53518
53519 if (debug_level >= DEBUG_LEVEL_INFO)
53520 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53521- __FILE__,__LINE__, info->device_name, info->port.count);
53522+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53523
53524 if (tty_port_close_start(&info->port, tty, filp) == 0)
53525 goto cleanup;
53526@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53527 cleanup:
53528 if (debug_level >= DEBUG_LEVEL_INFO)
53529 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53530- tty->driver->name, info->port.count);
53531+ tty->driver->name, atomic_read(&info->port.count));
53532
53533 } /* end of mgsl_close() */
53534
53535@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53536
53537 mgsl_flush_buffer(tty);
53538 shutdown(info);
53539-
53540- info->port.count = 0;
53541+
53542+ atomic_set(&info->port.count, 0);
53543 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53544 info->port.tty = NULL;
53545
53546@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53547
53548 if (debug_level >= DEBUG_LEVEL_INFO)
53549 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53550- __FILE__,__LINE__, tty->driver->name, port->count );
53551+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53552
53553 spin_lock_irqsave(&info->irq_spinlock, flags);
53554- port->count--;
53555+ atomic_dec(&port->count);
53556 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53557 port->blocked_open++;
53558
53559@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53560
53561 if (debug_level >= DEBUG_LEVEL_INFO)
53562 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53563- __FILE__,__LINE__, tty->driver->name, port->count );
53564+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53565
53566 tty_unlock(tty);
53567 schedule();
53568@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53569
53570 /* FIXME: Racy on hangup during close wait */
53571 if (!tty_hung_up_p(filp))
53572- port->count++;
53573+ atomic_inc(&port->count);
53574 port->blocked_open--;
53575
53576 if (debug_level >= DEBUG_LEVEL_INFO)
53577 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53578- __FILE__,__LINE__, tty->driver->name, port->count );
53579+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53580
53581 if (!retval)
53582 port->flags |= ASYNC_NORMAL_ACTIVE;
53583@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53584
53585 if (debug_level >= DEBUG_LEVEL_INFO)
53586 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53587- __FILE__,__LINE__,tty->driver->name, info->port.count);
53588+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53589
53590 /* If port is closing, signal caller to try again */
53591 if (info->port.flags & ASYNC_CLOSING){
53592@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53593 spin_unlock_irqrestore(&info->netlock, flags);
53594 goto cleanup;
53595 }
53596- info->port.count++;
53597+ atomic_inc(&info->port.count);
53598 spin_unlock_irqrestore(&info->netlock, flags);
53599
53600- if (info->port.count == 1) {
53601+ if (atomic_read(&info->port.count) == 1) {
53602 /* 1st open on this device, init hardware */
53603 retval = startup(info);
53604 if (retval < 0)
53605@@ -3442,8 +3442,8 @@ cleanup:
53606 if (retval) {
53607 if (tty->count == 1)
53608 info->port.tty = NULL; /* tty layer will release tty struct */
53609- if(info->port.count)
53610- info->port.count--;
53611+ if (atomic_read(&info->port.count))
53612+ atomic_dec(&info->port.count);
53613 }
53614
53615 return retval;
53616@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53617 unsigned short new_crctype;
53618
53619 /* return error if TTY interface open */
53620- if (info->port.count)
53621+ if (atomic_read(&info->port.count))
53622 return -EBUSY;
53623
53624 switch (encoding)
53625@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53626
53627 /* arbitrate between network and tty opens */
53628 spin_lock_irqsave(&info->netlock, flags);
53629- if (info->port.count != 0 || info->netcount != 0) {
53630+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53631 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53632 spin_unlock_irqrestore(&info->netlock, flags);
53633 return -EBUSY;
53634@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53635 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53636
53637 /* return error if TTY interface open */
53638- if (info->port.count)
53639+ if (atomic_read(&info->port.count))
53640 return -EBUSY;
53641
53642 if (cmd != SIOCWANDEV)
53643diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53644index 0e8c39b..e0cb171 100644
53645--- a/drivers/tty/synclink_gt.c
53646+++ b/drivers/tty/synclink_gt.c
53647@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53648 tty->driver_data = info;
53649 info->port.tty = tty;
53650
53651- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53652+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53653
53654 /* If port is closing, signal caller to try again */
53655 if (info->port.flags & ASYNC_CLOSING){
53656@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53657 mutex_unlock(&info->port.mutex);
53658 goto cleanup;
53659 }
53660- info->port.count++;
53661+ atomic_inc(&info->port.count);
53662 spin_unlock_irqrestore(&info->netlock, flags);
53663
53664- if (info->port.count == 1) {
53665+ if (atomic_read(&info->port.count) == 1) {
53666 /* 1st open on this device, init hardware */
53667 retval = startup(info);
53668 if (retval < 0) {
53669@@ -715,8 +715,8 @@ cleanup:
53670 if (retval) {
53671 if (tty->count == 1)
53672 info->port.tty = NULL; /* tty layer will release tty struct */
53673- if(info->port.count)
53674- info->port.count--;
53675+ if(atomic_read(&info->port.count))
53676+ atomic_dec(&info->port.count);
53677 }
53678
53679 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53680@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53681
53682 if (sanity_check(info, tty->name, "close"))
53683 return;
53684- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53685+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53686
53687 if (tty_port_close_start(&info->port, tty, filp) == 0)
53688 goto cleanup;
53689@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53690 tty_port_close_end(&info->port, tty);
53691 info->port.tty = NULL;
53692 cleanup:
53693- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53694+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53695 }
53696
53697 static void hangup(struct tty_struct *tty)
53698@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53699 shutdown(info);
53700
53701 spin_lock_irqsave(&info->port.lock, flags);
53702- info->port.count = 0;
53703+ atomic_set(&info->port.count, 0);
53704 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53705 info->port.tty = NULL;
53706 spin_unlock_irqrestore(&info->port.lock, flags);
53707@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53708 unsigned short new_crctype;
53709
53710 /* return error if TTY interface open */
53711- if (info->port.count)
53712+ if (atomic_read(&info->port.count))
53713 return -EBUSY;
53714
53715 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53716@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53717
53718 /* arbitrate between network and tty opens */
53719 spin_lock_irqsave(&info->netlock, flags);
53720- if (info->port.count != 0 || info->netcount != 0) {
53721+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53722 DBGINFO(("%s hdlc_open busy\n", dev->name));
53723 spin_unlock_irqrestore(&info->netlock, flags);
53724 return -EBUSY;
53725@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53726 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53727
53728 /* return error if TTY interface open */
53729- if (info->port.count)
53730+ if (atomic_read(&info->port.count))
53731 return -EBUSY;
53732
53733 if (cmd != SIOCWANDEV)
53734@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53735 if (port == NULL)
53736 continue;
53737 spin_lock(&port->lock);
53738- if ((port->port.count || port->netcount) &&
53739+ if ((atomic_read(&port->port.count) || port->netcount) &&
53740 port->pending_bh && !port->bh_running &&
53741 !port->bh_requested) {
53742 DBGISR(("%s bh queued\n", port->device_name));
53743@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53744 add_wait_queue(&port->open_wait, &wait);
53745
53746 spin_lock_irqsave(&info->lock, flags);
53747- port->count--;
53748+ atomic_dec(&port->count);
53749 spin_unlock_irqrestore(&info->lock, flags);
53750 port->blocked_open++;
53751
53752@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53753 remove_wait_queue(&port->open_wait, &wait);
53754
53755 if (!tty_hung_up_p(filp))
53756- port->count++;
53757+ atomic_inc(&port->count);
53758 port->blocked_open--;
53759
53760 if (!retval)
53761diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53762index c3f9091..abe4601 100644
53763--- a/drivers/tty/synclinkmp.c
53764+++ b/drivers/tty/synclinkmp.c
53765@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53766
53767 if (debug_level >= DEBUG_LEVEL_INFO)
53768 printk("%s(%d):%s open(), old ref count = %d\n",
53769- __FILE__,__LINE__,tty->driver->name, info->port.count);
53770+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53771
53772 /* If port is closing, signal caller to try again */
53773 if (info->port.flags & ASYNC_CLOSING){
53774@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53775 spin_unlock_irqrestore(&info->netlock, flags);
53776 goto cleanup;
53777 }
53778- info->port.count++;
53779+ atomic_inc(&info->port.count);
53780 spin_unlock_irqrestore(&info->netlock, flags);
53781
53782- if (info->port.count == 1) {
53783+ if (atomic_read(&info->port.count) == 1) {
53784 /* 1st open on this device, init hardware */
53785 retval = startup(info);
53786 if (retval < 0)
53787@@ -796,8 +796,8 @@ cleanup:
53788 if (retval) {
53789 if (tty->count == 1)
53790 info->port.tty = NULL; /* tty layer will release tty struct */
53791- if(info->port.count)
53792- info->port.count--;
53793+ if(atomic_read(&info->port.count))
53794+ atomic_dec(&info->port.count);
53795 }
53796
53797 return retval;
53798@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53799
53800 if (debug_level >= DEBUG_LEVEL_INFO)
53801 printk("%s(%d):%s close() entry, count=%d\n",
53802- __FILE__,__LINE__, info->device_name, info->port.count);
53803+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53804
53805 if (tty_port_close_start(&info->port, tty, filp) == 0)
53806 goto cleanup;
53807@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53808 cleanup:
53809 if (debug_level >= DEBUG_LEVEL_INFO)
53810 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53811- tty->driver->name, info->port.count);
53812+ tty->driver->name, atomic_read(&info->port.count));
53813 }
53814
53815 /* Called by tty_hangup() when a hangup is signaled.
53816@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53817 shutdown(info);
53818
53819 spin_lock_irqsave(&info->port.lock, flags);
53820- info->port.count = 0;
53821+ atomic_set(&info->port.count, 0);
53822 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53823 info->port.tty = NULL;
53824 spin_unlock_irqrestore(&info->port.lock, flags);
53825@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53826 unsigned short new_crctype;
53827
53828 /* return error if TTY interface open */
53829- if (info->port.count)
53830+ if (atomic_read(&info->port.count))
53831 return -EBUSY;
53832
53833 switch (encoding)
53834@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53835
53836 /* arbitrate between network and tty opens */
53837 spin_lock_irqsave(&info->netlock, flags);
53838- if (info->port.count != 0 || info->netcount != 0) {
53839+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53840 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53841 spin_unlock_irqrestore(&info->netlock, flags);
53842 return -EBUSY;
53843@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53844 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53845
53846 /* return error if TTY interface open */
53847- if (info->port.count)
53848+ if (atomic_read(&info->port.count))
53849 return -EBUSY;
53850
53851 if (cmd != SIOCWANDEV)
53852@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53853 * do not request bottom half processing if the
53854 * device is not open in a normal mode.
53855 */
53856- if ( port && (port->port.count || port->netcount) &&
53857+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53858 port->pending_bh && !port->bh_running &&
53859 !port->bh_requested ) {
53860 if ( debug_level >= DEBUG_LEVEL_ISR )
53861@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53862
53863 if (debug_level >= DEBUG_LEVEL_INFO)
53864 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53865- __FILE__,__LINE__, tty->driver->name, port->count );
53866+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53867
53868 spin_lock_irqsave(&info->lock, flags);
53869- port->count--;
53870+ atomic_dec(&port->count);
53871 spin_unlock_irqrestore(&info->lock, flags);
53872 port->blocked_open++;
53873
53874@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53875
53876 if (debug_level >= DEBUG_LEVEL_INFO)
53877 printk("%s(%d):%s block_til_ready() count=%d\n",
53878- __FILE__,__LINE__, tty->driver->name, port->count );
53879+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53880
53881 tty_unlock(tty);
53882 schedule();
53883@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53884 set_current_state(TASK_RUNNING);
53885 remove_wait_queue(&port->open_wait, &wait);
53886 if (!tty_hung_up_p(filp))
53887- port->count++;
53888+ atomic_inc(&port->count);
53889 port->blocked_open--;
53890
53891 if (debug_level >= DEBUG_LEVEL_INFO)
53892 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53893- __FILE__,__LINE__, tty->driver->name, port->count );
53894+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53895
53896 if (!retval)
53897 port->flags |= ASYNC_NORMAL_ACTIVE;
53898diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53899index 259a4d5..9b0c9e7 100644
53900--- a/drivers/tty/sysrq.c
53901+++ b/drivers/tty/sysrq.c
53902@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53903 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53904 size_t count, loff_t *ppos)
53905 {
53906- if (count) {
53907+ if (count && capable(CAP_SYS_ADMIN)) {
53908 char c;
53909
53910 if (get_user(c, buf))
53911diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53912index 2bb4dfc..a7f6e86 100644
53913--- a/drivers/tty/tty_io.c
53914+++ b/drivers/tty/tty_io.c
53915@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53916
53917 void tty_default_fops(struct file_operations *fops)
53918 {
53919- *fops = tty_fops;
53920+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53921 }
53922
53923 /*
53924diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53925index 3737f55..7cef448 100644
53926--- a/drivers/tty/tty_ldisc.c
53927+++ b/drivers/tty/tty_ldisc.c
53928@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53929 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53930 tty_ldiscs[disc] = new_ldisc;
53931 new_ldisc->num = disc;
53932- new_ldisc->refcount = 0;
53933+ atomic_set(&new_ldisc->refcount, 0);
53934 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53935
53936 return ret;
53937@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53938 return -EINVAL;
53939
53940 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53941- if (tty_ldiscs[disc]->refcount)
53942+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53943 ret = -EBUSY;
53944 else
53945 tty_ldiscs[disc] = NULL;
53946@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53947 if (ldops) {
53948 ret = ERR_PTR(-EAGAIN);
53949 if (try_module_get(ldops->owner)) {
53950- ldops->refcount++;
53951+ atomic_inc(&ldops->refcount);
53952 ret = ldops;
53953 }
53954 }
53955@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53956 unsigned long flags;
53957
53958 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53959- ldops->refcount--;
53960+ atomic_dec(&ldops->refcount);
53961 module_put(ldops->owner);
53962 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53963 }
53964diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53965index 40b31835..94d92ae 100644
53966--- a/drivers/tty/tty_port.c
53967+++ b/drivers/tty/tty_port.c
53968@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53969 unsigned long flags;
53970
53971 spin_lock_irqsave(&port->lock, flags);
53972- port->count = 0;
53973+ atomic_set(&port->count, 0);
53974 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53975 tty = port->tty;
53976 if (tty)
53977@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53978
53979 /* The port lock protects the port counts */
53980 spin_lock_irqsave(&port->lock, flags);
53981- port->count--;
53982+ atomic_dec(&port->count);
53983 port->blocked_open++;
53984 spin_unlock_irqrestore(&port->lock, flags);
53985
53986@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53987 we must not mess that up further */
53988 spin_lock_irqsave(&port->lock, flags);
53989 if (!tty_hung_up_p(filp))
53990- port->count++;
53991+ atomic_inc(&port->count);
53992 port->blocked_open--;
53993 if (retval == 0)
53994 port->flags |= ASYNC_NORMAL_ACTIVE;
53995@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53996 return 0;
53997
53998 spin_lock_irqsave(&port->lock, flags);
53999- if (tty->count == 1 && port->count != 1) {
54000+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54001 printk(KERN_WARNING
54002 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54003- port->count);
54004- port->count = 1;
54005+ atomic_read(&port->count));
54006+ atomic_set(&port->count, 1);
54007 }
54008- if (--port->count < 0) {
54009+ if (atomic_dec_return(&port->count) < 0) {
54010 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54011- port->count);
54012- port->count = 0;
54013+ atomic_read(&port->count));
54014+ atomic_set(&port->count, 0);
54015 }
54016
54017- if (port->count) {
54018+ if (atomic_read(&port->count)) {
54019 spin_unlock_irqrestore(&port->lock, flags);
54020 return 0;
54021 }
54022@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54023 struct file *filp)
54024 {
54025 spin_lock_irq(&port->lock);
54026- ++port->count;
54027+ atomic_inc(&port->count);
54028 spin_unlock_irq(&port->lock);
54029 tty_port_tty_set(port, tty);
54030
54031diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54032index 8a89f6e..50b32af 100644
54033--- a/drivers/tty/vt/keyboard.c
54034+++ b/drivers/tty/vt/keyboard.c
54035@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54036 kbd->kbdmode == VC_OFF) &&
54037 value != KVAL(K_SAK))
54038 return; /* SAK is allowed even in raw mode */
54039+
54040+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54041+ {
54042+ void *func = fn_handler[value];
54043+ if (func == fn_show_state || func == fn_show_ptregs ||
54044+ func == fn_show_mem)
54045+ return;
54046+ }
54047+#endif
54048+
54049 fn_handler[value](vc);
54050 }
54051
54052@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54053 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54054 return -EFAULT;
54055
54056- if (!capable(CAP_SYS_TTY_CONFIG))
54057- perm = 0;
54058-
54059 switch (cmd) {
54060 case KDGKBENT:
54061 /* Ensure another thread doesn't free it under us */
54062@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54063 spin_unlock_irqrestore(&kbd_event_lock, flags);
54064 return put_user(val, &user_kbe->kb_value);
54065 case KDSKBENT:
54066+ if (!capable(CAP_SYS_TTY_CONFIG))
54067+ perm = 0;
54068+
54069 if (!perm)
54070 return -EPERM;
54071 if (!i && v == K_NOSUCHMAP) {
54072@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54073 int i, j, k;
54074 int ret;
54075
54076- if (!capable(CAP_SYS_TTY_CONFIG))
54077- perm = 0;
54078-
54079 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54080 if (!kbs) {
54081 ret = -ENOMEM;
54082@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54083 kfree(kbs);
54084 return ((p && *p) ? -EOVERFLOW : 0);
54085 case KDSKBSENT:
54086+ if (!capable(CAP_SYS_TTY_CONFIG))
54087+ perm = 0;
54088+
54089 if (!perm) {
54090 ret = -EPERM;
54091 goto reterr;
54092diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54093index 6276f13..84f2449 100644
54094--- a/drivers/uio/uio.c
54095+++ b/drivers/uio/uio.c
54096@@ -25,6 +25,7 @@
54097 #include <linux/kobject.h>
54098 #include <linux/cdev.h>
54099 #include <linux/uio_driver.h>
54100+#include <asm/local.h>
54101
54102 #define UIO_MAX_DEVICES (1U << MINORBITS)
54103
54104@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
54105 struct device_attribute *attr, char *buf)
54106 {
54107 struct uio_device *idev = dev_get_drvdata(dev);
54108- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54109+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54110 }
54111 static DEVICE_ATTR_RO(event);
54112
54113@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
54114 {
54115 struct uio_device *idev = info->uio_dev;
54116
54117- atomic_inc(&idev->event);
54118+ atomic_inc_unchecked(&idev->event);
54119 wake_up_interruptible(&idev->wait);
54120 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54121 }
54122@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54123 }
54124
54125 listener->dev = idev;
54126- listener->event_count = atomic_read(&idev->event);
54127+ listener->event_count = atomic_read_unchecked(&idev->event);
54128 filep->private_data = listener;
54129
54130 if (idev->info->open) {
54131@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54132 return -EIO;
54133
54134 poll_wait(filep, &idev->wait, wait);
54135- if (listener->event_count != atomic_read(&idev->event))
54136+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54137 return POLLIN | POLLRDNORM;
54138 return 0;
54139 }
54140@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54141 do {
54142 set_current_state(TASK_INTERRUPTIBLE);
54143
54144- event_count = atomic_read(&idev->event);
54145+ event_count = atomic_read_unchecked(&idev->event);
54146 if (event_count != listener->event_count) {
54147 if (copy_to_user(buf, &event_count, count))
54148 retval = -EFAULT;
54149@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54150 static int uio_find_mem_index(struct vm_area_struct *vma)
54151 {
54152 struct uio_device *idev = vma->vm_private_data;
54153+ unsigned long size;
54154
54155 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54156- if (idev->info->mem[vma->vm_pgoff].size == 0)
54157+ size = idev->info->mem[vma->vm_pgoff].size;
54158+ if (size == 0)
54159+ return -1;
54160+ if (vma->vm_end - vma->vm_start > size)
54161 return -1;
54162 return (int)vma->vm_pgoff;
54163 }
54164@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
54165 idev->owner = owner;
54166 idev->info = info;
54167 init_waitqueue_head(&idev->wait);
54168- atomic_set(&idev->event, 0);
54169+ atomic_set_unchecked(&idev->event, 0);
54170
54171 ret = uio_get_minor(idev);
54172 if (ret)
54173diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54174index 813d4d3..a71934f 100644
54175--- a/drivers/usb/atm/cxacru.c
54176+++ b/drivers/usb/atm/cxacru.c
54177@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54178 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54179 if (ret < 2)
54180 return -EINVAL;
54181- if (index < 0 || index > 0x7f)
54182+ if (index > 0x7f)
54183 return -EINVAL;
54184 pos += tmp;
54185
54186diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54187index dada014..1d0d517 100644
54188--- a/drivers/usb/atm/usbatm.c
54189+++ b/drivers/usb/atm/usbatm.c
54190@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54191 if (printk_ratelimit())
54192 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54193 __func__, vpi, vci);
54194- atomic_inc(&vcc->stats->rx_err);
54195+ atomic_inc_unchecked(&vcc->stats->rx_err);
54196 return;
54197 }
54198
54199@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54200 if (length > ATM_MAX_AAL5_PDU) {
54201 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54202 __func__, length, vcc);
54203- atomic_inc(&vcc->stats->rx_err);
54204+ atomic_inc_unchecked(&vcc->stats->rx_err);
54205 goto out;
54206 }
54207
54208@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54209 if (sarb->len < pdu_length) {
54210 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54211 __func__, pdu_length, sarb->len, vcc);
54212- atomic_inc(&vcc->stats->rx_err);
54213+ atomic_inc_unchecked(&vcc->stats->rx_err);
54214 goto out;
54215 }
54216
54217 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54218 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54219 __func__, vcc);
54220- atomic_inc(&vcc->stats->rx_err);
54221+ atomic_inc_unchecked(&vcc->stats->rx_err);
54222 goto out;
54223 }
54224
54225@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54226 if (printk_ratelimit())
54227 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54228 __func__, length);
54229- atomic_inc(&vcc->stats->rx_drop);
54230+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54231 goto out;
54232 }
54233
54234@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54235
54236 vcc->push(vcc, skb);
54237
54238- atomic_inc(&vcc->stats->rx);
54239+ atomic_inc_unchecked(&vcc->stats->rx);
54240 out:
54241 skb_trim(sarb, 0);
54242 }
54243@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54244 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54245
54246 usbatm_pop(vcc, skb);
54247- atomic_inc(&vcc->stats->tx);
54248+ atomic_inc_unchecked(&vcc->stats->tx);
54249
54250 skb = skb_dequeue(&instance->sndqueue);
54251 }
54252@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54253 if (!left--)
54254 return sprintf(page,
54255 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54256- atomic_read(&atm_dev->stats.aal5.tx),
54257- atomic_read(&atm_dev->stats.aal5.tx_err),
54258- atomic_read(&atm_dev->stats.aal5.rx),
54259- atomic_read(&atm_dev->stats.aal5.rx_err),
54260- atomic_read(&atm_dev->stats.aal5.rx_drop));
54261+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54262+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54263+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54264+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54265+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54266
54267 if (!left--) {
54268 if (instance->disconnected)
54269diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54270index 2a3bbdf..91d72cf 100644
54271--- a/drivers/usb/core/devices.c
54272+++ b/drivers/usb/core/devices.c
54273@@ -126,7 +126,7 @@ static const char format_endpt[] =
54274 * time it gets called.
54275 */
54276 static struct device_connect_event {
54277- atomic_t count;
54278+ atomic_unchecked_t count;
54279 wait_queue_head_t wait;
54280 } device_event = {
54281 .count = ATOMIC_INIT(1),
54282@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54283
54284 void usbfs_conn_disc_event(void)
54285 {
54286- atomic_add(2, &device_event.count);
54287+ atomic_add_unchecked(2, &device_event.count);
54288 wake_up(&device_event.wait);
54289 }
54290
54291@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54292
54293 poll_wait(file, &device_event.wait, wait);
54294
54295- event_count = atomic_read(&device_event.count);
54296+ event_count = atomic_read_unchecked(&device_event.count);
54297 if (file->f_version != event_count) {
54298 file->f_version = event_count;
54299 return POLLIN | POLLRDNORM;
54300diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54301index 1163553..f292679 100644
54302--- a/drivers/usb/core/devio.c
54303+++ b/drivers/usb/core/devio.c
54304@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54305 struct usb_dev_state *ps = file->private_data;
54306 struct usb_device *dev = ps->dev;
54307 ssize_t ret = 0;
54308- unsigned len;
54309+ size_t len;
54310 loff_t pos;
54311 int i;
54312
54313@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54314 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54315 struct usb_config_descriptor *config =
54316 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54317- unsigned int length = le16_to_cpu(config->wTotalLength);
54318+ size_t length = le16_to_cpu(config->wTotalLength);
54319
54320 if (*ppos < pos + length) {
54321
54322 /* The descriptor may claim to be longer than it
54323 * really is. Here is the actual allocated length. */
54324- unsigned alloclen =
54325+ size_t alloclen =
54326 le16_to_cpu(dev->config[i].desc.wTotalLength);
54327
54328- len = length - (*ppos - pos);
54329+ len = length + pos - *ppos;
54330 if (len > nbytes)
54331 len = nbytes;
54332
54333 /* Simply don't write (skip over) unallocated parts */
54334 if (alloclen > (*ppos - pos)) {
54335- alloclen -= (*ppos - pos);
54336+ alloclen = alloclen + pos - *ppos;
54337 if (copy_to_user(buf,
54338 dev->rawdescriptors[i] + (*ppos - pos),
54339 min(len, alloclen))) {
54340diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54341index 45a915c..09f9735 100644
54342--- a/drivers/usb/core/hcd.c
54343+++ b/drivers/usb/core/hcd.c
54344@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54345 */
54346 usb_get_urb(urb);
54347 atomic_inc(&urb->use_count);
54348- atomic_inc(&urb->dev->urbnum);
54349+ atomic_inc_unchecked(&urb->dev->urbnum);
54350 usbmon_urb_submit(&hcd->self, urb);
54351
54352 /* NOTE requirements on root-hub callers (usbfs and the hub
54353@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54354 urb->hcpriv = NULL;
54355 INIT_LIST_HEAD(&urb->urb_list);
54356 atomic_dec(&urb->use_count);
54357- atomic_dec(&urb->dev->urbnum);
54358+ atomic_dec_unchecked(&urb->dev->urbnum);
54359 if (atomic_read(&urb->reject))
54360 wake_up(&usb_kill_urb_queue);
54361 usb_put_urb(urb);
54362diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54363index d7c3d5a..2f87607 100644
54364--- a/drivers/usb/core/hub.c
54365+++ b/drivers/usb/core/hub.c
54366@@ -26,6 +26,7 @@
54367 #include <linux/mutex.h>
54368 #include <linux/random.h>
54369 #include <linux/pm_qos.h>
54370+#include <linux/grsecurity.h>
54371
54372 #include <asm/uaccess.h>
54373 #include <asm/byteorder.h>
54374@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54375 goto done;
54376 return;
54377 }
54378+
54379+ if (gr_handle_new_usb())
54380+ goto done;
54381+
54382 if (hub_is_superspeed(hub->hdev))
54383 unit_load = 150;
54384 else
54385diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54386index f368d20..0c30ac5 100644
54387--- a/drivers/usb/core/message.c
54388+++ b/drivers/usb/core/message.c
54389@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54390 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54391 * error number.
54392 */
54393-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54394+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54395 __u8 requesttype, __u16 value, __u16 index, void *data,
54396 __u16 size, int timeout)
54397 {
54398@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54399 * If successful, 0. Otherwise a negative error number. The number of actual
54400 * bytes transferred will be stored in the @actual_length parameter.
54401 */
54402-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54403+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54404 void *data, int len, int *actual_length, int timeout)
54405 {
54406 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54407@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54408 * bytes transferred will be stored in the @actual_length parameter.
54409 *
54410 */
54411-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54412+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54413 void *data, int len, int *actual_length, int timeout)
54414 {
54415 struct urb *urb;
54416diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54417index d269738..7340cd7 100644
54418--- a/drivers/usb/core/sysfs.c
54419+++ b/drivers/usb/core/sysfs.c
54420@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54421 struct usb_device *udev;
54422
54423 udev = to_usb_device(dev);
54424- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54425+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54426 }
54427 static DEVICE_ATTR_RO(urbnum);
54428
54429diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54430index b1fb9ae..4224885 100644
54431--- a/drivers/usb/core/usb.c
54432+++ b/drivers/usb/core/usb.c
54433@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54434 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54435 dev->state = USB_STATE_ATTACHED;
54436 dev->lpm_disable_count = 1;
54437- atomic_set(&dev->urbnum, 0);
54438+ atomic_set_unchecked(&dev->urbnum, 0);
54439
54440 INIT_LIST_HEAD(&dev->ep0.urb_list);
54441 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54442diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54443index 8cfc319..4868255 100644
54444--- a/drivers/usb/early/ehci-dbgp.c
54445+++ b/drivers/usb/early/ehci-dbgp.c
54446@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54447
54448 #ifdef CONFIG_KGDB
54449 static struct kgdb_io kgdbdbgp_io_ops;
54450-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54451+static struct kgdb_io kgdbdbgp_io_ops_console;
54452+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54453 #else
54454 #define dbgp_kgdb_mode (0)
54455 #endif
54456@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54457 .write_char = kgdbdbgp_write_char,
54458 };
54459
54460+static struct kgdb_io kgdbdbgp_io_ops_console = {
54461+ .name = "kgdbdbgp",
54462+ .read_char = kgdbdbgp_read_char,
54463+ .write_char = kgdbdbgp_write_char,
54464+ .is_console = 1
54465+};
54466+
54467 static int kgdbdbgp_wait_time;
54468
54469 static int __init kgdbdbgp_parse_config(char *str)
54470@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54471 ptr++;
54472 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54473 }
54474- kgdb_register_io_module(&kgdbdbgp_io_ops);
54475- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54476+ if (early_dbgp_console.index != -1)
54477+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54478+ else
54479+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54480
54481 return 0;
54482 }
54483diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54484index 9719abf..789d5d9 100644
54485--- a/drivers/usb/gadget/function/f_uac1.c
54486+++ b/drivers/usb/gadget/function/f_uac1.c
54487@@ -14,6 +14,7 @@
54488 #include <linux/module.h>
54489 #include <linux/device.h>
54490 #include <linux/atomic.h>
54491+#include <linux/module.h>
54492
54493 #include "u_uac1.h"
54494
54495diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54496index 491082a..dfd7d17 100644
54497--- a/drivers/usb/gadget/function/u_serial.c
54498+++ b/drivers/usb/gadget/function/u_serial.c
54499@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54500 spin_lock_irq(&port->port_lock);
54501
54502 /* already open? Great. */
54503- if (port->port.count) {
54504+ if (atomic_read(&port->port.count)) {
54505 status = 0;
54506- port->port.count++;
54507+ atomic_inc(&port->port.count);
54508
54509 /* currently opening/closing? wait ... */
54510 } else if (port->openclose) {
54511@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54512 tty->driver_data = port;
54513 port->port.tty = tty;
54514
54515- port->port.count = 1;
54516+ atomic_set(&port->port.count, 1);
54517 port->openclose = false;
54518
54519 /* if connected, start the I/O stream */
54520@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54521
54522 spin_lock_irq(&port->port_lock);
54523
54524- if (port->port.count != 1) {
54525- if (port->port.count == 0)
54526+ if (atomic_read(&port->port.count) != 1) {
54527+ if (atomic_read(&port->port.count) == 0)
54528 WARN_ON(1);
54529 else
54530- --port->port.count;
54531+ atomic_dec(&port->port.count);
54532 goto exit;
54533 }
54534
54535@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54536 * and sleep if necessary
54537 */
54538 port->openclose = true;
54539- port->port.count = 0;
54540+ atomic_set(&port->port.count, 0);
54541
54542 gser = port->port_usb;
54543 if (gser && gser->disconnect)
54544@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54545 int cond;
54546
54547 spin_lock_irq(&port->port_lock);
54548- cond = (port->port.count == 0) && !port->openclose;
54549+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54550 spin_unlock_irq(&port->port_lock);
54551 return cond;
54552 }
54553@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54554 /* if it's already open, start I/O ... and notify the serial
54555 * protocol about open/close status (connect/disconnect).
54556 */
54557- if (port->port.count) {
54558+ if (atomic_read(&port->port.count)) {
54559 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54560 gs_start_io(port);
54561 if (gser->connect)
54562@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54563
54564 port->port_usb = NULL;
54565 gser->ioport = NULL;
54566- if (port->port.count > 0 || port->openclose) {
54567+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54568 wake_up_interruptible(&port->drain_wait);
54569 if (port->port.tty)
54570 tty_hangup(port->port.tty);
54571@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54572
54573 /* finally, free any unused/unusable I/O buffers */
54574 spin_lock_irqsave(&port->port_lock, flags);
54575- if (port->port.count == 0 && !port->openclose)
54576+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54577 gs_buf_free(&port->port_write_buf);
54578 gs_free_requests(gser->out, &port->read_pool, NULL);
54579 gs_free_requests(gser->out, &port->read_queue, NULL);
54580diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54581index c78c841..48fd281 100644
54582--- a/drivers/usb/gadget/function/u_uac1.c
54583+++ b/drivers/usb/gadget/function/u_uac1.c
54584@@ -17,6 +17,7 @@
54585 #include <linux/ctype.h>
54586 #include <linux/random.h>
54587 #include <linux/syscalls.h>
54588+#include <linux/module.h>
54589
54590 #include "u_uac1.h"
54591
54592diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54593index 87cf86f..3de9809 100644
54594--- a/drivers/usb/host/ehci-hub.c
54595+++ b/drivers/usb/host/ehci-hub.c
54596@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54597 urb->transfer_flags = URB_DIR_IN;
54598 usb_get_urb(urb);
54599 atomic_inc(&urb->use_count);
54600- atomic_inc(&urb->dev->urbnum);
54601+ atomic_inc_unchecked(&urb->dev->urbnum);
54602 urb->setup_dma = dma_map_single(
54603 hcd->self.controller,
54604 urb->setup_packet,
54605@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54606 urb->status = -EINPROGRESS;
54607 usb_get_urb(urb);
54608 atomic_inc(&urb->use_count);
54609- atomic_inc(&urb->dev->urbnum);
54610+ atomic_inc_unchecked(&urb->dev->urbnum);
54611 retval = submit_single_step_set_feature(hcd, urb, 0);
54612 if (!retval && !wait_for_completion_timeout(&done,
54613 msecs_to_jiffies(2000))) {
54614diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54615index 1db0626..4948782 100644
54616--- a/drivers/usb/host/hwa-hc.c
54617+++ b/drivers/usb/host/hwa-hc.c
54618@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54619 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54620 struct wahc *wa = &hwahc->wa;
54621 struct device *dev = &wa->usb_iface->dev;
54622- u8 mas_le[UWB_NUM_MAS/8];
54623+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54624+
54625+ if (mas_le == NULL)
54626+ return -ENOMEM;
54627
54628 /* Set the stream index */
54629 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54630@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54631 WUSB_REQ_SET_WUSB_MAS,
54632 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54633 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54634- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54635+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54636 if (result < 0)
54637 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54638 out:
54639+ kfree(mas_le);
54640+
54641 return result;
54642 }
54643
54644diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54645index b3d245e..99549ed 100644
54646--- a/drivers/usb/misc/appledisplay.c
54647+++ b/drivers/usb/misc/appledisplay.c
54648@@ -84,7 +84,7 @@ struct appledisplay {
54649 struct mutex sysfslock; /* concurrent read and write */
54650 };
54651
54652-static atomic_t count_displays = ATOMIC_INIT(0);
54653+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54654 static struct workqueue_struct *wq;
54655
54656 static void appledisplay_complete(struct urb *urb)
54657@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54658
54659 /* Register backlight device */
54660 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54661- atomic_inc_return(&count_displays) - 1);
54662+ atomic_inc_return_unchecked(&count_displays) - 1);
54663 memset(&props, 0, sizeof(struct backlight_properties));
54664 props.type = BACKLIGHT_RAW;
54665 props.max_brightness = 0xff;
54666diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54667index 3806e70..55c508b 100644
54668--- a/drivers/usb/serial/console.c
54669+++ b/drivers/usb/serial/console.c
54670@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
54671
54672 info->port = port;
54673
54674- ++port->port.count;
54675+ atomic_inc(&port->port.count);
54676 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54677 if (serial->type->set_termios) {
54678 /*
54679@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
54680 }
54681 /* Now that any required fake tty operations are completed restore
54682 * the tty port count */
54683- --port->port.count;
54684+ atomic_dec(&port->port.count);
54685 /* The console is special in terms of closing the device so
54686 * indicate this port is now acting as a system console. */
54687 port->port.console = 1;
54688@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
54689 put_tty:
54690 tty_kref_put(tty);
54691 reset_open_count:
54692- port->port.count = 0;
54693+ atomic_set(&port->port.count, 0);
54694 usb_autopm_put_interface(serial->interface);
54695 error_get_interface:
54696 usb_serial_put(serial);
54697@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
54698 static void usb_console_write(struct console *co,
54699 const char *buf, unsigned count)
54700 {
54701- static struct usbcons_info *info = &usbcons_info;
54702+ struct usbcons_info *info = &usbcons_info;
54703 struct usb_serial_port *port = info->port;
54704 struct usb_serial *serial;
54705 int retval = -ENODEV;
54706diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54707index 307e339..6aa97cb 100644
54708--- a/drivers/usb/storage/usb.h
54709+++ b/drivers/usb/storage/usb.h
54710@@ -63,7 +63,7 @@ struct us_unusual_dev {
54711 __u8 useProtocol;
54712 __u8 useTransport;
54713 int (*initFunction)(struct us_data *);
54714-};
54715+} __do_const;
54716
54717
54718 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54719diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54720index a863a98..d272795 100644
54721--- a/drivers/usb/usbip/vhci.h
54722+++ b/drivers/usb/usbip/vhci.h
54723@@ -83,7 +83,7 @@ struct vhci_hcd {
54724 unsigned resuming:1;
54725 unsigned long re_timeout;
54726
54727- atomic_t seqnum;
54728+ atomic_unchecked_t seqnum;
54729
54730 /*
54731 * NOTE:
54732diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54733index 11f6f61..1087910 100644
54734--- a/drivers/usb/usbip/vhci_hcd.c
54735+++ b/drivers/usb/usbip/vhci_hcd.c
54736@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
54737
54738 spin_lock(&vdev->priv_lock);
54739
54740- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54741+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54742 if (priv->seqnum == 0xffff)
54743 dev_info(&urb->dev->dev, "seqnum max\n");
54744
54745@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54746 return -ENOMEM;
54747 }
54748
54749- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54750+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54751 if (unlink->seqnum == 0xffff)
54752 pr_info("seqnum max\n");
54753
54754@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
54755 vdev->rhport = rhport;
54756 }
54757
54758- atomic_set(&vhci->seqnum, 0);
54759+ atomic_set_unchecked(&vhci->seqnum, 0);
54760 spin_lock_init(&vhci->lock);
54761
54762 hcd->power_budget = 0; /* no limit */
54763diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54764index 00e4a54..d676f85 100644
54765--- a/drivers/usb/usbip/vhci_rx.c
54766+++ b/drivers/usb/usbip/vhci_rx.c
54767@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54768 if (!urb) {
54769 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54770 pr_info("max seqnum %d\n",
54771- atomic_read(&the_controller->seqnum));
54772+ atomic_read_unchecked(&the_controller->seqnum));
54773 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54774 return;
54775 }
54776diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54777index edc7267..9f65ce2 100644
54778--- a/drivers/usb/wusbcore/wa-hc.h
54779+++ b/drivers/usb/wusbcore/wa-hc.h
54780@@ -240,7 +240,7 @@ struct wahc {
54781 spinlock_t xfer_list_lock;
54782 struct work_struct xfer_enqueue_work;
54783 struct work_struct xfer_error_work;
54784- atomic_t xfer_id_count;
54785+ atomic_unchecked_t xfer_id_count;
54786
54787 kernel_ulong_t quirks;
54788 };
54789@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54790 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54791 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54792 wa->dto_in_use = 0;
54793- atomic_set(&wa->xfer_id_count, 1);
54794+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54795 /* init the buf in URBs */
54796 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54797 usb_init_urb(&(wa->buf_in_urbs[index]));
54798diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54799index 69af4fd..da390d7 100644
54800--- a/drivers/usb/wusbcore/wa-xfer.c
54801+++ b/drivers/usb/wusbcore/wa-xfer.c
54802@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54803 */
54804 static void wa_xfer_id_init(struct wa_xfer *xfer)
54805 {
54806- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54807+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54808 }
54809
54810 /* Return the xfer's ID. */
54811diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54812index 4cde855..b23d05d 100644
54813--- a/drivers/vfio/vfio.c
54814+++ b/drivers/vfio/vfio.c
54815@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54816 return 0;
54817
54818 /* TODO Prevent device auto probing */
54819- WARN("Device %s added to live group %d!\n", dev_name(dev),
54820+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54821 iommu_group_id(group->iommu_group));
54822
54823 return 0;
54824diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54825index 3bb02c6..a01ff38 100644
54826--- a/drivers/vhost/vringh.c
54827+++ b/drivers/vhost/vringh.c
54828@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54829 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54830 {
54831 __virtio16 v = 0;
54832- int rc = get_user(v, (__force __virtio16 __user *)p);
54833+ int rc = get_user(v, (__force_user __virtio16 *)p);
54834 *val = vringh16_to_cpu(vrh, v);
54835 return rc;
54836 }
54837@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54838 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54839 {
54840 __virtio16 v = cpu_to_vringh16(vrh, val);
54841- return put_user(v, (__force __virtio16 __user *)p);
54842+ return put_user(v, (__force_user __virtio16 *)p);
54843 }
54844
54845 static inline int copydesc_user(void *dst, const void *src, size_t len)
54846 {
54847- return copy_from_user(dst, (__force void __user *)src, len) ?
54848+ return copy_from_user(dst, (void __force_user *)src, len) ?
54849 -EFAULT : 0;
54850 }
54851
54852@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54853 const struct vring_used_elem *src,
54854 unsigned int num)
54855 {
54856- return copy_to_user((__force void __user *)dst, src,
54857+ return copy_to_user((void __force_user *)dst, src,
54858 sizeof(*dst) * num) ? -EFAULT : 0;
54859 }
54860
54861 static inline int xfer_from_user(void *src, void *dst, size_t len)
54862 {
54863- return copy_from_user(dst, (__force void __user *)src, len) ?
54864+ return copy_from_user(dst, (void __force_user *)src, len) ?
54865 -EFAULT : 0;
54866 }
54867
54868 static inline int xfer_to_user(void *dst, void *src, size_t len)
54869 {
54870- return copy_to_user((__force void __user *)dst, src, len) ?
54871+ return copy_to_user((void __force_user *)dst, src, len) ?
54872 -EFAULT : 0;
54873 }
54874
54875@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54876 vrh->last_used_idx = 0;
54877 vrh->vring.num = num;
54878 /* vring expects kernel addresses, but only used via accessors. */
54879- vrh->vring.desc = (__force struct vring_desc *)desc;
54880- vrh->vring.avail = (__force struct vring_avail *)avail;
54881- vrh->vring.used = (__force struct vring_used *)used;
54882+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54883+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54884+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54885 return 0;
54886 }
54887 EXPORT_SYMBOL(vringh_init_user);
54888@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54889
54890 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54891 {
54892- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54893+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54894 return 0;
54895 }
54896
54897diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54898index 84a110a..96312c3 100644
54899--- a/drivers/video/backlight/kb3886_bl.c
54900+++ b/drivers/video/backlight/kb3886_bl.c
54901@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54902 static unsigned long kb3886bl_flags;
54903 #define KB3886BL_SUSPENDED 0x01
54904
54905-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54906+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54907 {
54908 .ident = "Sahara Touch-iT",
54909 .matches = {
54910diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54911index 1b0b233..6f34c2c 100644
54912--- a/drivers/video/fbdev/arcfb.c
54913+++ b/drivers/video/fbdev/arcfb.c
54914@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54915 return -ENOSPC;
54916
54917 err = 0;
54918- if ((count + p) > fbmemlength) {
54919+ if (count > (fbmemlength - p)) {
54920 count = fbmemlength - p;
54921 err = -ENOSPC;
54922 }
54923diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54924index aedf2fb..47c9aca 100644
54925--- a/drivers/video/fbdev/aty/aty128fb.c
54926+++ b/drivers/video/fbdev/aty/aty128fb.c
54927@@ -149,7 +149,7 @@ enum {
54928 };
54929
54930 /* Must match above enum */
54931-static char * const r128_family[] = {
54932+static const char * const r128_family[] = {
54933 "AGP",
54934 "PCI",
54935 "PRO AGP",
54936diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54937index 8789e48..698fe4c 100644
54938--- a/drivers/video/fbdev/aty/atyfb_base.c
54939+++ b/drivers/video/fbdev/aty/atyfb_base.c
54940@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54941 par->accel_flags = var->accel_flags; /* hack */
54942
54943 if (var->accel_flags) {
54944- info->fbops->fb_sync = atyfb_sync;
54945+ pax_open_kernel();
54946+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54947+ pax_close_kernel();
54948 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54949 } else {
54950- info->fbops->fb_sync = NULL;
54951+ pax_open_kernel();
54952+ *(void **)&info->fbops->fb_sync = NULL;
54953+ pax_close_kernel();
54954 info->flags |= FBINFO_HWACCEL_DISABLED;
54955 }
54956
54957diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54958index 2fa0317..4983f2a 100644
54959--- a/drivers/video/fbdev/aty/mach64_cursor.c
54960+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54961@@ -8,6 +8,7 @@
54962 #include "../core/fb_draw.h"
54963
54964 #include <asm/io.h>
54965+#include <asm/pgtable.h>
54966
54967 #ifdef __sparc__
54968 #include <asm/fbio.h>
54969@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54970 info->sprite.buf_align = 16; /* and 64 lines tall. */
54971 info->sprite.flags = FB_PIXMAP_IO;
54972
54973- info->fbops->fb_cursor = atyfb_cursor;
54974+ pax_open_kernel();
54975+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54976+ pax_close_kernel();
54977
54978 return 0;
54979 }
54980diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54981index d6cab1f..112f680 100644
54982--- a/drivers/video/fbdev/core/fb_defio.c
54983+++ b/drivers/video/fbdev/core/fb_defio.c
54984@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54985
54986 BUG_ON(!fbdefio);
54987 mutex_init(&fbdefio->lock);
54988- info->fbops->fb_mmap = fb_deferred_io_mmap;
54989+ pax_open_kernel();
54990+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54991+ pax_close_kernel();
54992 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54993 INIT_LIST_HEAD(&fbdefio->pagelist);
54994 if (fbdefio->delay == 0) /* set a default of 1 s */
54995@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54996 page->mapping = NULL;
54997 }
54998
54999- info->fbops->fb_mmap = NULL;
55000+ *(void **)&info->fbops->fb_mmap = NULL;
55001 mutex_destroy(&fbdefio->lock);
55002 }
55003 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55004diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55005index 0705d88..d9429bf 100644
55006--- a/drivers/video/fbdev/core/fbmem.c
55007+++ b/drivers/video/fbdev/core/fbmem.c
55008@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55009 __u32 data;
55010 int err;
55011
55012- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55013+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55014
55015 data = (__u32) (unsigned long) fix->smem_start;
55016 err |= put_user(data, &fix32->smem_start);
55017diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55018index 4254336..282567e 100644
55019--- a/drivers/video/fbdev/hyperv_fb.c
55020+++ b/drivers/video/fbdev/hyperv_fb.c
55021@@ -240,7 +240,7 @@ static uint screen_fb_size;
55022 static inline int synthvid_send(struct hv_device *hdev,
55023 struct synthvid_msg *msg)
55024 {
55025- static atomic64_t request_id = ATOMIC64_INIT(0);
55026+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55027 int ret;
55028
55029 msg->pipe_hdr.type = PIPE_MSG_DATA;
55030@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55031
55032 ret = vmbus_sendpacket(hdev->channel, msg,
55033 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55034- atomic64_inc_return(&request_id),
55035+ atomic64_inc_return_unchecked(&request_id),
55036 VM_PKT_DATA_INBAND, 0);
55037
55038 if (ret)
55039diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55040index 7672d2e..b56437f 100644
55041--- a/drivers/video/fbdev/i810/i810_accel.c
55042+++ b/drivers/video/fbdev/i810/i810_accel.c
55043@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55044 }
55045 }
55046 printk("ringbuffer lockup!!!\n");
55047+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55048 i810_report_error(mmio);
55049 par->dev_flags |= LOCKUP;
55050 info->pixmap.scan_align = 1;
55051diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55052index a01147f..5d896f8 100644
55053--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55054+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55055@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55056
55057 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55058 struct matrox_switch matrox_mystique = {
55059- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55060+ .preinit = MGA1064_preinit,
55061+ .reset = MGA1064_reset,
55062+ .init = MGA1064_init,
55063+ .restore = MGA1064_restore,
55064 };
55065 EXPORT_SYMBOL(matrox_mystique);
55066 #endif
55067
55068 #ifdef CONFIG_FB_MATROX_G
55069 struct matrox_switch matrox_G100 = {
55070- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55071+ .preinit = MGAG100_preinit,
55072+ .reset = MGAG100_reset,
55073+ .init = MGAG100_init,
55074+ .restore = MGAG100_restore,
55075 };
55076 EXPORT_SYMBOL(matrox_G100);
55077 #endif
55078diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55079index 195ad7c..09743fc 100644
55080--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55081+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55082@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55083 }
55084
55085 struct matrox_switch matrox_millennium = {
55086- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55087+ .preinit = Ti3026_preinit,
55088+ .reset = Ti3026_reset,
55089+ .init = Ti3026_init,
55090+ .restore = Ti3026_restore
55091 };
55092 EXPORT_SYMBOL(matrox_millennium);
55093 #endif
55094diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55095index fe92eed..106e085 100644
55096--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55097+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55098@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55099 struct mb862xxfb_par *par = info->par;
55100
55101 if (info->var.bits_per_pixel == 32) {
55102- info->fbops->fb_fillrect = cfb_fillrect;
55103- info->fbops->fb_copyarea = cfb_copyarea;
55104- info->fbops->fb_imageblit = cfb_imageblit;
55105+ pax_open_kernel();
55106+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55107+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55108+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55109+ pax_close_kernel();
55110 } else {
55111 outreg(disp, GC_L0EM, 3);
55112- info->fbops->fb_fillrect = mb86290fb_fillrect;
55113- info->fbops->fb_copyarea = mb86290fb_copyarea;
55114- info->fbops->fb_imageblit = mb86290fb_imageblit;
55115+ pax_open_kernel();
55116+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55117+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55118+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55119+ pax_close_kernel();
55120 }
55121 outreg(draw, GDC_REG_DRAW_BASE, 0);
55122 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55123diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55124index def0412..fed6529 100644
55125--- a/drivers/video/fbdev/nvidia/nvidia.c
55126+++ b/drivers/video/fbdev/nvidia/nvidia.c
55127@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55128 info->fix.line_length = (info->var.xres_virtual *
55129 info->var.bits_per_pixel) >> 3;
55130 if (info->var.accel_flags) {
55131- info->fbops->fb_imageblit = nvidiafb_imageblit;
55132- info->fbops->fb_fillrect = nvidiafb_fillrect;
55133- info->fbops->fb_copyarea = nvidiafb_copyarea;
55134- info->fbops->fb_sync = nvidiafb_sync;
55135+ pax_open_kernel();
55136+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55137+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55138+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55139+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55140+ pax_close_kernel();
55141 info->pixmap.scan_align = 4;
55142 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55143 info->flags |= FBINFO_READS_FAST;
55144 NVResetGraphics(info);
55145 } else {
55146- info->fbops->fb_imageblit = cfb_imageblit;
55147- info->fbops->fb_fillrect = cfb_fillrect;
55148- info->fbops->fb_copyarea = cfb_copyarea;
55149- info->fbops->fb_sync = NULL;
55150+ pax_open_kernel();
55151+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55152+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55153+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55154+ *(void **)&info->fbops->fb_sync = NULL;
55155+ pax_close_kernel();
55156 info->pixmap.scan_align = 1;
55157 info->flags |= FBINFO_HWACCEL_DISABLED;
55158 info->flags &= ~FBINFO_READS_FAST;
55159@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55160 info->pixmap.size = 8 * 1024;
55161 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55162
55163- if (!hwcur)
55164- info->fbops->fb_cursor = NULL;
55165+ if (!hwcur) {
55166+ pax_open_kernel();
55167+ *(void **)&info->fbops->fb_cursor = NULL;
55168+ pax_close_kernel();
55169+ }
55170
55171 info->var.accel_flags = (!noaccel);
55172
55173diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55174index 2412a0d..294215b 100644
55175--- a/drivers/video/fbdev/omap2/dss/display.c
55176+++ b/drivers/video/fbdev/omap2/dss/display.c
55177@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55178 if (dssdev->name == NULL)
55179 dssdev->name = dssdev->alias;
55180
55181+ pax_open_kernel();
55182 if (drv && drv->get_resolution == NULL)
55183- drv->get_resolution = omapdss_default_get_resolution;
55184+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55185 if (drv && drv->get_recommended_bpp == NULL)
55186- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55187+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55188 if (drv && drv->get_timings == NULL)
55189- drv->get_timings = omapdss_default_get_timings;
55190+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55191+ pax_close_kernel();
55192
55193 mutex_lock(&panel_list_mutex);
55194 list_add_tail(&dssdev->panel_list, &panel_list);
55195diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55196index 83433cb..71e9b98 100644
55197--- a/drivers/video/fbdev/s1d13xxxfb.c
55198+++ b/drivers/video/fbdev/s1d13xxxfb.c
55199@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55200
55201 switch(prod_id) {
55202 case S1D13506_PROD_ID: /* activate acceleration */
55203- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55204- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55205+ pax_open_kernel();
55206+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55207+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55208+ pax_close_kernel();
55209 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55210 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55211 break;
55212diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55213index d3013cd..95b8285 100644
55214--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55215+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55216@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55217 }
55218
55219 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55220- lcdc_sys_write_index,
55221- lcdc_sys_write_data,
55222- lcdc_sys_read_data,
55223+ .write_index = lcdc_sys_write_index,
55224+ .write_data = lcdc_sys_write_data,
55225+ .read_data = lcdc_sys_read_data,
55226 };
55227
55228 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55229diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55230index 9279e5f..d5f5276 100644
55231--- a/drivers/video/fbdev/smscufx.c
55232+++ b/drivers/video/fbdev/smscufx.c
55233@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55234 fb_deferred_io_cleanup(info);
55235 kfree(info->fbdefio);
55236 info->fbdefio = NULL;
55237- info->fbops->fb_mmap = ufx_ops_mmap;
55238+ pax_open_kernel();
55239+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55240+ pax_close_kernel();
55241 }
55242
55243 pr_debug("released /dev/fb%d user=%d count=%d",
55244diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55245index ff2b873..626a8d5 100644
55246--- a/drivers/video/fbdev/udlfb.c
55247+++ b/drivers/video/fbdev/udlfb.c
55248@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55249 dlfb_urb_completion(urb);
55250
55251 error:
55252- atomic_add(bytes_sent, &dev->bytes_sent);
55253- atomic_add(bytes_identical, &dev->bytes_identical);
55254- atomic_add(width*height*2, &dev->bytes_rendered);
55255+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55256+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55257+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55258 end_cycles = get_cycles();
55259- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55260+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55261 >> 10)), /* Kcycles */
55262 &dev->cpu_kcycles_used);
55263
55264@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55265 dlfb_urb_completion(urb);
55266
55267 error:
55268- atomic_add(bytes_sent, &dev->bytes_sent);
55269- atomic_add(bytes_identical, &dev->bytes_identical);
55270- atomic_add(bytes_rendered, &dev->bytes_rendered);
55271+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55272+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55273+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55274 end_cycles = get_cycles();
55275- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55276+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55277 >> 10)), /* Kcycles */
55278 &dev->cpu_kcycles_used);
55279 }
55280@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55281 fb_deferred_io_cleanup(info);
55282 kfree(info->fbdefio);
55283 info->fbdefio = NULL;
55284- info->fbops->fb_mmap = dlfb_ops_mmap;
55285+ pax_open_kernel();
55286+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55287+ pax_close_kernel();
55288 }
55289
55290 pr_warn("released /dev/fb%d user=%d count=%d\n",
55291@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55292 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55293 struct dlfb_data *dev = fb_info->par;
55294 return snprintf(buf, PAGE_SIZE, "%u\n",
55295- atomic_read(&dev->bytes_rendered));
55296+ atomic_read_unchecked(&dev->bytes_rendered));
55297 }
55298
55299 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55300@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55301 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55302 struct dlfb_data *dev = fb_info->par;
55303 return snprintf(buf, PAGE_SIZE, "%u\n",
55304- atomic_read(&dev->bytes_identical));
55305+ atomic_read_unchecked(&dev->bytes_identical));
55306 }
55307
55308 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55309@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55310 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55311 struct dlfb_data *dev = fb_info->par;
55312 return snprintf(buf, PAGE_SIZE, "%u\n",
55313- atomic_read(&dev->bytes_sent));
55314+ atomic_read_unchecked(&dev->bytes_sent));
55315 }
55316
55317 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55318@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55319 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55320 struct dlfb_data *dev = fb_info->par;
55321 return snprintf(buf, PAGE_SIZE, "%u\n",
55322- atomic_read(&dev->cpu_kcycles_used));
55323+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55324 }
55325
55326 static ssize_t edid_show(
55327@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55328 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55329 struct dlfb_data *dev = fb_info->par;
55330
55331- atomic_set(&dev->bytes_rendered, 0);
55332- atomic_set(&dev->bytes_identical, 0);
55333- atomic_set(&dev->bytes_sent, 0);
55334- atomic_set(&dev->cpu_kcycles_used, 0);
55335+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55336+ atomic_set_unchecked(&dev->bytes_identical, 0);
55337+ atomic_set_unchecked(&dev->bytes_sent, 0);
55338+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55339
55340 return count;
55341 }
55342diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55343index d32d1c4..46722e6 100644
55344--- a/drivers/video/fbdev/uvesafb.c
55345+++ b/drivers/video/fbdev/uvesafb.c
55346@@ -19,6 +19,7 @@
55347 #include <linux/io.h>
55348 #include <linux/mutex.h>
55349 #include <linux/slab.h>
55350+#include <linux/moduleloader.h>
55351 #include <video/edid.h>
55352 #include <video/uvesafb.h>
55353 #ifdef CONFIG_X86
55354@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55355 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55356 par->pmi_setpal = par->ypan = 0;
55357 } else {
55358+
55359+#ifdef CONFIG_PAX_KERNEXEC
55360+#ifdef CONFIG_MODULES
55361+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55362+#endif
55363+ if (!par->pmi_code) {
55364+ par->pmi_setpal = par->ypan = 0;
55365+ return 0;
55366+ }
55367+#endif
55368+
55369 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55370 + task->t.regs.edi);
55371+
55372+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55373+ pax_open_kernel();
55374+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55375+ pax_close_kernel();
55376+
55377+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55378+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55379+#else
55380 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55381 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55382+#endif
55383+
55384 printk(KERN_INFO "uvesafb: protected mode interface info at "
55385 "%04x:%04x\n",
55386 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55387@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55388 par->ypan = ypan;
55389
55390 if (par->pmi_setpal || par->ypan) {
55391+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55392 if (__supported_pte_mask & _PAGE_NX) {
55393 par->pmi_setpal = par->ypan = 0;
55394 printk(KERN_WARNING "uvesafb: NX protection is active, "
55395 "better not use the PMI.\n");
55396- } else {
55397+ } else
55398+#endif
55399 uvesafb_vbe_getpmi(task, par);
55400- }
55401 }
55402 #else
55403 /* The protected mode interface is not available on non-x86. */
55404@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55405 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55406
55407 /* Disable blanking if the user requested so. */
55408- if (!blank)
55409- info->fbops->fb_blank = NULL;
55410+ if (!blank) {
55411+ pax_open_kernel();
55412+ *(void **)&info->fbops->fb_blank = NULL;
55413+ pax_close_kernel();
55414+ }
55415
55416 /*
55417 * Find out how much IO memory is required for the mode with
55418@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55419 info->flags = FBINFO_FLAG_DEFAULT |
55420 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55421
55422- if (!par->ypan)
55423- info->fbops->fb_pan_display = NULL;
55424+ if (!par->ypan) {
55425+ pax_open_kernel();
55426+ *(void **)&info->fbops->fb_pan_display = NULL;
55427+ pax_close_kernel();
55428+ }
55429 }
55430
55431 static void uvesafb_init_mtrr(struct fb_info *info)
55432@@ -1786,6 +1816,11 @@ out_mode:
55433 out:
55434 kfree(par->vbe_modes);
55435
55436+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55437+ if (par->pmi_code)
55438+ module_memfree_exec(par->pmi_code);
55439+#endif
55440+
55441 framebuffer_release(info);
55442 return err;
55443 }
55444@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55445 kfree(par->vbe_state_orig);
55446 kfree(par->vbe_state_saved);
55447
55448+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55449+ if (par->pmi_code)
55450+ module_memfree_exec(par->pmi_code);
55451+#endif
55452+
55453 framebuffer_release(info);
55454 }
55455 return 0;
55456diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55457index d79a0ac..2d0c3d4 100644
55458--- a/drivers/video/fbdev/vesafb.c
55459+++ b/drivers/video/fbdev/vesafb.c
55460@@ -9,6 +9,7 @@
55461 */
55462
55463 #include <linux/module.h>
55464+#include <linux/moduleloader.h>
55465 #include <linux/kernel.h>
55466 #include <linux/errno.h>
55467 #include <linux/string.h>
55468@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55469 static int vram_total; /* Set total amount of memory */
55470 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55471 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55472-static void (*pmi_start)(void) __read_mostly;
55473-static void (*pmi_pal) (void) __read_mostly;
55474+static void (*pmi_start)(void) __read_only;
55475+static void (*pmi_pal) (void) __read_only;
55476 static int depth __read_mostly;
55477 static int vga_compat __read_mostly;
55478 /* --------------------------------------------------------------------- */
55479@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55480 unsigned int size_remap;
55481 unsigned int size_total;
55482 char *option = NULL;
55483+ void *pmi_code = NULL;
55484
55485 /* ignore error return of fb_get_options */
55486 fb_get_options("vesafb", &option);
55487@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55488 size_remap = size_total;
55489 vesafb_fix.smem_len = size_remap;
55490
55491-#ifndef __i386__
55492- screen_info.vesapm_seg = 0;
55493-#endif
55494-
55495 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55496 printk(KERN_WARNING
55497 "vesafb: cannot reserve video memory at 0x%lx\n",
55498@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55499 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55500 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55501
55502+#ifdef __i386__
55503+
55504+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55505+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55506+ if (!pmi_code)
55507+#elif !defined(CONFIG_PAX_KERNEXEC)
55508+ if (0)
55509+#endif
55510+
55511+#endif
55512+ screen_info.vesapm_seg = 0;
55513+
55514 if (screen_info.vesapm_seg) {
55515- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55516- screen_info.vesapm_seg,screen_info.vesapm_off);
55517+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55518+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55519 }
55520
55521 if (screen_info.vesapm_seg < 0xc000)
55522@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55523
55524 if (ypan || pmi_setpal) {
55525 unsigned short *pmi_base;
55526+
55527 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55528- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55529- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55530+
55531+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55532+ pax_open_kernel();
55533+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55534+#else
55535+ pmi_code = pmi_base;
55536+#endif
55537+
55538+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55539+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55540+
55541+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55542+ pmi_start = ktva_ktla(pmi_start);
55543+ pmi_pal = ktva_ktla(pmi_pal);
55544+ pax_close_kernel();
55545+#endif
55546+
55547 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55548 if (pmi_base[3]) {
55549 printk(KERN_INFO "vesafb: pmi: ports = ");
55550@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55551 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55552 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55553
55554- if (!ypan)
55555- info->fbops->fb_pan_display = NULL;
55556+ if (!ypan) {
55557+ pax_open_kernel();
55558+ *(void **)&info->fbops->fb_pan_display = NULL;
55559+ pax_close_kernel();
55560+ }
55561
55562 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55563 err = -ENOMEM;
55564@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55565 fb_info(info, "%s frame buffer device\n", info->fix.id);
55566 return 0;
55567 err:
55568+
55569+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55570+ module_memfree_exec(pmi_code);
55571+#endif
55572+
55573 if (info->screen_base)
55574 iounmap(info->screen_base);
55575 framebuffer_release(info);
55576diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55577index 88714ae..16c2e11 100644
55578--- a/drivers/video/fbdev/via/via_clock.h
55579+++ b/drivers/video/fbdev/via/via_clock.h
55580@@ -56,7 +56,7 @@ struct via_clock {
55581
55582 void (*set_engine_pll_state)(u8 state);
55583 void (*set_engine_pll)(struct via_pll_config config);
55584-};
55585+} __no_const;
55586
55587
55588 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55589diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55590index 3c14e43..2630570 100644
55591--- a/drivers/video/logo/logo_linux_clut224.ppm
55592+++ b/drivers/video/logo/logo_linux_clut224.ppm
55593@@ -2,1603 +2,1123 @@ P3
55594 # Standard 224-color Linux logo
55595 80 80
55596 255
55597- 0 0 0 0 0 0 0 0 0 0 0 0
55598- 0 0 0 0 0 0 0 0 0 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 0 0 0 0 0 0 0 0 0 0
55606- 6 6 6 6 6 6 10 10 10 10 10 10
55607- 10 10 10 6 6 6 6 6 6 6 6 6
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 0 0 0
55611- 0 0 0 0 0 0 0 0 0 0 0 0
55612- 0 0 0 0 0 0 0 0 0 0 0 0
55613- 0 0 0 0 0 0 0 0 0 0 0 0
55614- 0 0 0 0 0 0 0 0 0 0 0 0
55615- 0 0 0 0 0 0 0 0 0 0 0 0
55616- 0 0 0 0 0 0 0 0 0 0 0 0
55617- 0 0 0 0 0 0 0 0 0 0 0 0
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 6 6 6 10 10 10 14 14 14
55626- 22 22 22 26 26 26 30 30 30 34 34 34
55627- 30 30 30 30 30 30 26 26 26 18 18 18
55628- 14 14 14 10 10 10 6 6 6 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 0 0 0
55633- 0 0 0 0 0 0 0 0 0 0 0 0
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 1 0 0 1 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 6 6 6 14 14 14 26 26 26 42 42 42
55646- 54 54 54 66 66 66 78 78 78 78 78 78
55647- 78 78 78 74 74 74 66 66 66 54 54 54
55648- 42 42 42 26 26 26 18 18 18 10 10 10
55649- 6 6 6 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 0 0 0
55653- 0 0 0 0 0 0 0 0 0 0 0 0
55654- 0 0 0 0 0 0 0 0 0 0 0 0
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 1 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 10 10 10
55665- 22 22 22 42 42 42 66 66 66 86 86 86
55666- 66 66 66 38 38 38 38 38 38 22 22 22
55667- 26 26 26 34 34 34 54 54 54 66 66 66
55668- 86 86 86 70 70 70 46 46 46 26 26 26
55669- 14 14 14 6 6 6 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 0 0 0 0 0 0 0
55673- 0 0 0 0 0 0 0 0 0 0 0 0
55674- 0 0 0 0 0 0 0 0 0 0 0 0
55675- 0 0 0 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 1 0 0 1 0 0 1 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 10 10 10 26 26 26
55685- 50 50 50 82 82 82 58 58 58 6 6 6
55686- 2 2 6 2 2 6 2 2 6 2 2 6
55687- 2 2 6 2 2 6 2 2 6 2 2 6
55688- 6 6 6 54 54 54 86 86 86 66 66 66
55689- 38 38 38 18 18 18 6 6 6 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 0 0 0 0 0 0 0 0 0 0
55693- 0 0 0 0 0 0 0 0 0 0 0 0
55694- 0 0 0 0 0 0 0 0 0 0 0 0
55695- 0 0 0 0 0 0 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 6 6 6 22 22 22 50 50 50
55705- 78 78 78 34 34 34 2 2 6 2 2 6
55706- 2 2 6 2 2 6 2 2 6 2 2 6
55707- 2 2 6 2 2 6 2 2 6 2 2 6
55708- 2 2 6 2 2 6 6 6 6 70 70 70
55709- 78 78 78 46 46 46 22 22 22 6 6 6
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 0 0 0 0 0 0 0 0 0 0
55713- 0 0 0 0 0 0 0 0 0 0 0 0
55714- 0 0 0 0 0 0 0 0 0 0 0 0
55715- 0 0 0 0 0 0 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 1 0 0 1 0 0 1 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 6 6 6 18 18 18 42 42 42 82 82 82
55725- 26 26 26 2 2 6 2 2 6 2 2 6
55726- 2 2 6 2 2 6 2 2 6 2 2 6
55727- 2 2 6 2 2 6 2 2 6 14 14 14
55728- 46 46 46 34 34 34 6 6 6 2 2 6
55729- 42 42 42 78 78 78 42 42 42 18 18 18
55730- 6 6 6 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 0 0 0
55733- 0 0 0 0 0 0 0 0 0 0 0 0
55734- 0 0 0 0 0 0 0 0 0 0 0 0
55735- 0 0 0 0 0 0 0 0 0 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 1 0 0 0 0 0 1 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 10 10 10 30 30 30 66 66 66 58 58 58
55745- 2 2 6 2 2 6 2 2 6 2 2 6
55746- 2 2 6 2 2 6 2 2 6 2 2 6
55747- 2 2 6 2 2 6 2 2 6 26 26 26
55748- 86 86 86 101 101 101 46 46 46 10 10 10
55749- 2 2 6 58 58 58 70 70 70 34 34 34
55750- 10 10 10 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 0 0 0 0 0 0 0 0 0 0
55753- 0 0 0 0 0 0 0 0 0 0 0 0
55754- 0 0 0 0 0 0 0 0 0 0 0 0
55755- 0 0 0 0 0 0 0 0 0 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 0 0 1 0 0 1 0 0 1 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 14 14 14 42 42 42 86 86 86 10 10 10
55765- 2 2 6 2 2 6 2 2 6 2 2 6
55766- 2 2 6 2 2 6 2 2 6 2 2 6
55767- 2 2 6 2 2 6 2 2 6 30 30 30
55768- 94 94 94 94 94 94 58 58 58 26 26 26
55769- 2 2 6 6 6 6 78 78 78 54 54 54
55770- 22 22 22 6 6 6 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 0 0 0 0 0 0 0 0 0 0
55773- 0 0 0 0 0 0 0 0 0 0 0 0
55774- 0 0 0 0 0 0 0 0 0 0 0 0
55775- 0 0 0 0 0 0 0 0 0 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 6 6 6
55784- 22 22 22 62 62 62 62 62 62 2 2 6
55785- 2 2 6 2 2 6 2 2 6 2 2 6
55786- 2 2 6 2 2 6 2 2 6 2 2 6
55787- 2 2 6 2 2 6 2 2 6 26 26 26
55788- 54 54 54 38 38 38 18 18 18 10 10 10
55789- 2 2 6 2 2 6 34 34 34 82 82 82
55790- 38 38 38 14 14 14 0 0 0 0 0 0
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 0 0 0 0 0 0 0 0 0 0
55793- 0 0 0 0 0 0 0 0 0 0 0 0
55794- 0 0 0 0 0 0 0 0 0 0 0 0
55795- 0 0 0 0 0 0 0 0 0 0 0 0
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 0 0 0 0 0 1 0 0 1 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 6 6 6
55804- 30 30 30 78 78 78 30 30 30 2 2 6
55805- 2 2 6 2 2 6 2 2 6 2 2 6
55806- 2 2 6 2 2 6 2 2 6 2 2 6
55807- 2 2 6 2 2 6 2 2 6 10 10 10
55808- 10 10 10 2 2 6 2 2 6 2 2 6
55809- 2 2 6 2 2 6 2 2 6 78 78 78
55810- 50 50 50 18 18 18 6 6 6 0 0 0
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 0 0 0
55813- 0 0 0 0 0 0 0 0 0 0 0 0
55814- 0 0 0 0 0 0 0 0 0 0 0 0
55815- 0 0 0 0 0 0 0 0 0 0 0 0
55816- 0 0 0 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 1 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 10 10 10
55824- 38 38 38 86 86 86 14 14 14 2 2 6
55825- 2 2 6 2 2 6 2 2 6 2 2 6
55826- 2 2 6 2 2 6 2 2 6 2 2 6
55827- 2 2 6 2 2 6 2 2 6 2 2 6
55828- 2 2 6 2 2 6 2 2 6 2 2 6
55829- 2 2 6 2 2 6 2 2 6 54 54 54
55830- 66 66 66 26 26 26 6 6 6 0 0 0
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 0 0 0 0 0 0 0
55833- 0 0 0 0 0 0 0 0 0 0 0 0
55834- 0 0 0 0 0 0 0 0 0 0 0 0
55835- 0 0 0 0 0 0 0 0 0 0 0 0
55836- 0 0 0 0 0 0 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 1 0 0 1 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 14 14 14
55844- 42 42 42 82 82 82 2 2 6 2 2 6
55845- 2 2 6 6 6 6 10 10 10 2 2 6
55846- 2 2 6 2 2 6 2 2 6 2 2 6
55847- 2 2 6 2 2 6 2 2 6 6 6 6
55848- 14 14 14 10 10 10 2 2 6 2 2 6
55849- 2 2 6 2 2 6 2 2 6 18 18 18
55850- 82 82 82 34 34 34 10 10 10 0 0 0
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 0 0 0 0 0 0 0 0 0 0
55853- 0 0 0 0 0 0 0 0 0 0 0 0
55854- 0 0 0 0 0 0 0 0 0 0 0 0
55855- 0 0 0 0 0 0 0 0 0 0 0 0
55856- 0 0 0 0 0 0 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 1 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 14 14 14
55864- 46 46 46 86 86 86 2 2 6 2 2 6
55865- 6 6 6 6 6 6 22 22 22 34 34 34
55866- 6 6 6 2 2 6 2 2 6 2 2 6
55867- 2 2 6 2 2 6 18 18 18 34 34 34
55868- 10 10 10 50 50 50 22 22 22 2 2 6
55869- 2 2 6 2 2 6 2 2 6 10 10 10
55870- 86 86 86 42 42 42 14 14 14 0 0 0
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 0 0 0 0 0 0 0
55873- 0 0 0 0 0 0 0 0 0 0 0 0
55874- 0 0 0 0 0 0 0 0 0 0 0 0
55875- 0 0 0 0 0 0 0 0 0 0 0 0
55876- 0 0 0 0 0 0 0 0 0 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 1 0 0 1 0 0 1 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 14 14 14
55884- 46 46 46 86 86 86 2 2 6 2 2 6
55885- 38 38 38 116 116 116 94 94 94 22 22 22
55886- 22 22 22 2 2 6 2 2 6 2 2 6
55887- 14 14 14 86 86 86 138 138 138 162 162 162
55888-154 154 154 38 38 38 26 26 26 6 6 6
55889- 2 2 6 2 2 6 2 2 6 2 2 6
55890- 86 86 86 46 46 46 14 14 14 0 0 0
55891- 0 0 0 0 0 0 0 0 0 0 0 0
55892- 0 0 0 0 0 0 0 0 0 0 0 0
55893- 0 0 0 0 0 0 0 0 0 0 0 0
55894- 0 0 0 0 0 0 0 0 0 0 0 0
55895- 0 0 0 0 0 0 0 0 0 0 0 0
55896- 0 0 0 0 0 0 0 0 0 0 0 0
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 14 14 14
55904- 46 46 46 86 86 86 2 2 6 14 14 14
55905-134 134 134 198 198 198 195 195 195 116 116 116
55906- 10 10 10 2 2 6 2 2 6 6 6 6
55907-101 98 89 187 187 187 210 210 210 218 218 218
55908-214 214 214 134 134 134 14 14 14 6 6 6
55909- 2 2 6 2 2 6 2 2 6 2 2 6
55910- 86 86 86 50 50 50 18 18 18 6 6 6
55911- 0 0 0 0 0 0 0 0 0 0 0 0
55912- 0 0 0 0 0 0 0 0 0 0 0 0
55913- 0 0 0 0 0 0 0 0 0 0 0 0
55914- 0 0 0 0 0 0 0 0 0 0 0 0
55915- 0 0 0 0 0 0 0 0 0 0 0 0
55916- 0 0 0 0 0 0 0 0 0 0 0 0
55917- 0 0 0 0 0 0 0 0 1 0 0 0
55918- 0 0 1 0 0 1 0 0 1 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 14 14 14
55924- 46 46 46 86 86 86 2 2 6 54 54 54
55925-218 218 218 195 195 195 226 226 226 246 246 246
55926- 58 58 58 2 2 6 2 2 6 30 30 30
55927-210 210 210 253 253 253 174 174 174 123 123 123
55928-221 221 221 234 234 234 74 74 74 2 2 6
55929- 2 2 6 2 2 6 2 2 6 2 2 6
55930- 70 70 70 58 58 58 22 22 22 6 6 6
55931- 0 0 0 0 0 0 0 0 0 0 0 0
55932- 0 0 0 0 0 0 0 0 0 0 0 0
55933- 0 0 0 0 0 0 0 0 0 0 0 0
55934- 0 0 0 0 0 0 0 0 0 0 0 0
55935- 0 0 0 0 0 0 0 0 0 0 0 0
55936- 0 0 0 0 0 0 0 0 0 0 0 0
55937- 0 0 0 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 14 14 14
55944- 46 46 46 82 82 82 2 2 6 106 106 106
55945-170 170 170 26 26 26 86 86 86 226 226 226
55946-123 123 123 10 10 10 14 14 14 46 46 46
55947-231 231 231 190 190 190 6 6 6 70 70 70
55948- 90 90 90 238 238 238 158 158 158 2 2 6
55949- 2 2 6 2 2 6 2 2 6 2 2 6
55950- 70 70 70 58 58 58 22 22 22 6 6 6
55951- 0 0 0 0 0 0 0 0 0 0 0 0
55952- 0 0 0 0 0 0 0 0 0 0 0 0
55953- 0 0 0 0 0 0 0 0 0 0 0 0
55954- 0 0 0 0 0 0 0 0 0 0 0 0
55955- 0 0 0 0 0 0 0 0 0 0 0 0
55956- 0 0 0 0 0 0 0 0 0 0 0 0
55957- 0 0 0 0 0 0 0 0 1 0 0 0
55958- 0 0 1 0 0 1 0 0 1 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 14 14 14
55964- 42 42 42 86 86 86 6 6 6 116 116 116
55965-106 106 106 6 6 6 70 70 70 149 149 149
55966-128 128 128 18 18 18 38 38 38 54 54 54
55967-221 221 221 106 106 106 2 2 6 14 14 14
55968- 46 46 46 190 190 190 198 198 198 2 2 6
55969- 2 2 6 2 2 6 2 2 6 2 2 6
55970- 74 74 74 62 62 62 22 22 22 6 6 6
55971- 0 0 0 0 0 0 0 0 0 0 0 0
55972- 0 0 0 0 0 0 0 0 0 0 0 0
55973- 0 0 0 0 0 0 0 0 0 0 0 0
55974- 0 0 0 0 0 0 0 0 0 0 0 0
55975- 0 0 0 0 0 0 0 0 0 0 0 0
55976- 0 0 0 0 0 0 0 0 0 0 0 0
55977- 0 0 0 0 0 0 0 0 1 0 0 0
55978- 0 0 1 0 0 0 0 0 1 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 14 14 14
55984- 42 42 42 94 94 94 14 14 14 101 101 101
55985-128 128 128 2 2 6 18 18 18 116 116 116
55986-118 98 46 121 92 8 121 92 8 98 78 10
55987-162 162 162 106 106 106 2 2 6 2 2 6
55988- 2 2 6 195 195 195 195 195 195 6 6 6
55989- 2 2 6 2 2 6 2 2 6 2 2 6
55990- 74 74 74 62 62 62 22 22 22 6 6 6
55991- 0 0 0 0 0 0 0 0 0 0 0 0
55992- 0 0 0 0 0 0 0 0 0 0 0 0
55993- 0 0 0 0 0 0 0 0 0 0 0 0
55994- 0 0 0 0 0 0 0 0 0 0 0 0
55995- 0 0 0 0 0 0 0 0 0 0 0 0
55996- 0 0 0 0 0 0 0 0 0 0 0 0
55997- 0 0 0 0 0 0 0 0 1 0 0 1
55998- 0 0 1 0 0 0 0 0 1 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 10 10 10
56004- 38 38 38 90 90 90 14 14 14 58 58 58
56005-210 210 210 26 26 26 54 38 6 154 114 10
56006-226 170 11 236 186 11 225 175 15 184 144 12
56007-215 174 15 175 146 61 37 26 9 2 2 6
56008- 70 70 70 246 246 246 138 138 138 2 2 6
56009- 2 2 6 2 2 6 2 2 6 2 2 6
56010- 70 70 70 66 66 66 26 26 26 6 6 6
56011- 0 0 0 0 0 0 0 0 0 0 0 0
56012- 0 0 0 0 0 0 0 0 0 0 0 0
56013- 0 0 0 0 0 0 0 0 0 0 0 0
56014- 0 0 0 0 0 0 0 0 0 0 0 0
56015- 0 0 0 0 0 0 0 0 0 0 0 0
56016- 0 0 0 0 0 0 0 0 0 0 0 0
56017- 0 0 0 0 0 0 0 0 0 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 10 10 10
56024- 38 38 38 86 86 86 14 14 14 10 10 10
56025-195 195 195 188 164 115 192 133 9 225 175 15
56026-239 182 13 234 190 10 232 195 16 232 200 30
56027-245 207 45 241 208 19 232 195 16 184 144 12
56028-218 194 134 211 206 186 42 42 42 2 2 6
56029- 2 2 6 2 2 6 2 2 6 2 2 6
56030- 50 50 50 74 74 74 30 30 30 6 6 6
56031- 0 0 0 0 0 0 0 0 0 0 0 0
56032- 0 0 0 0 0 0 0 0 0 0 0 0
56033- 0 0 0 0 0 0 0 0 0 0 0 0
56034- 0 0 0 0 0 0 0 0 0 0 0 0
56035- 0 0 0 0 0 0 0 0 0 0 0 0
56036- 0 0 0 0 0 0 0 0 0 0 0 0
56037- 0 0 0 0 0 0 0 0 0 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 10 10 10
56044- 34 34 34 86 86 86 14 14 14 2 2 6
56045-121 87 25 192 133 9 219 162 10 239 182 13
56046-236 186 11 232 195 16 241 208 19 244 214 54
56047-246 218 60 246 218 38 246 215 20 241 208 19
56048-241 208 19 226 184 13 121 87 25 2 2 6
56049- 2 2 6 2 2 6 2 2 6 2 2 6
56050- 50 50 50 82 82 82 34 34 34 10 10 10
56051- 0 0 0 0 0 0 0 0 0 0 0 0
56052- 0 0 0 0 0 0 0 0 0 0 0 0
56053- 0 0 0 0 0 0 0 0 0 0 0 0
56054- 0 0 0 0 0 0 0 0 0 0 0 0
56055- 0 0 0 0 0 0 0 0 0 0 0 0
56056- 0 0 0 0 0 0 0 0 0 0 0 0
56057- 0 0 0 0 0 0 0 0 0 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 10 10 10
56064- 34 34 34 82 82 82 30 30 30 61 42 6
56065-180 123 7 206 145 10 230 174 11 239 182 13
56066-234 190 10 238 202 15 241 208 19 246 218 74
56067-246 218 38 246 215 20 246 215 20 246 215 20
56068-226 184 13 215 174 15 184 144 12 6 6 6
56069- 2 2 6 2 2 6 2 2 6 2 2 6
56070- 26 26 26 94 94 94 42 42 42 14 14 14
56071- 0 0 0 0 0 0 0 0 0 0 0 0
56072- 0 0 0 0 0 0 0 0 0 0 0 0
56073- 0 0 0 0 0 0 0 0 0 0 0 0
56074- 0 0 0 0 0 0 0 0 0 0 0 0
56075- 0 0 0 0 0 0 0 0 0 0 0 0
56076- 0 0 0 0 0 0 0 0 0 0 0 0
56077- 0 0 0 0 0 0 0 0 0 0 0 0
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 10 10 10
56084- 30 30 30 78 78 78 50 50 50 104 69 6
56085-192 133 9 216 158 10 236 178 12 236 186 11
56086-232 195 16 241 208 19 244 214 54 245 215 43
56087-246 215 20 246 215 20 241 208 19 198 155 10
56088-200 144 11 216 158 10 156 118 10 2 2 6
56089- 2 2 6 2 2 6 2 2 6 2 2 6
56090- 6 6 6 90 90 90 54 54 54 18 18 18
56091- 6 6 6 0 0 0 0 0 0 0 0 0
56092- 0 0 0 0 0 0 0 0 0 0 0 0
56093- 0 0 0 0 0 0 0 0 0 0 0 0
56094- 0 0 0 0 0 0 0 0 0 0 0 0
56095- 0 0 0 0 0 0 0 0 0 0 0 0
56096- 0 0 0 0 0 0 0 0 0 0 0 0
56097- 0 0 0 0 0 0 0 0 0 0 0 0
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 10 10 10
56104- 30 30 30 78 78 78 46 46 46 22 22 22
56105-137 92 6 210 162 10 239 182 13 238 190 10
56106-238 202 15 241 208 19 246 215 20 246 215 20
56107-241 208 19 203 166 17 185 133 11 210 150 10
56108-216 158 10 210 150 10 102 78 10 2 2 6
56109- 6 6 6 54 54 54 14 14 14 2 2 6
56110- 2 2 6 62 62 62 74 74 74 30 30 30
56111- 10 10 10 0 0 0 0 0 0 0 0 0
56112- 0 0 0 0 0 0 0 0 0 0 0 0
56113- 0 0 0 0 0 0 0 0 0 0 0 0
56114- 0 0 0 0 0 0 0 0 0 0 0 0
56115- 0 0 0 0 0 0 0 0 0 0 0 0
56116- 0 0 0 0 0 0 0 0 0 0 0 0
56117- 0 0 0 0 0 0 0 0 0 0 0 0
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 10 10 10
56124- 34 34 34 78 78 78 50 50 50 6 6 6
56125- 94 70 30 139 102 15 190 146 13 226 184 13
56126-232 200 30 232 195 16 215 174 15 190 146 13
56127-168 122 10 192 133 9 210 150 10 213 154 11
56128-202 150 34 182 157 106 101 98 89 2 2 6
56129- 2 2 6 78 78 78 116 116 116 58 58 58
56130- 2 2 6 22 22 22 90 90 90 46 46 46
56131- 18 18 18 6 6 6 0 0 0 0 0 0
56132- 0 0 0 0 0 0 0 0 0 0 0 0
56133- 0 0 0 0 0 0 0 0 0 0 0 0
56134- 0 0 0 0 0 0 0 0 0 0 0 0
56135- 0 0 0 0 0 0 0 0 0 0 0 0
56136- 0 0 0 0 0 0 0 0 0 0 0 0
56137- 0 0 0 0 0 0 0 0 0 0 0 0
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 10 10 10
56144- 38 38 38 86 86 86 50 50 50 6 6 6
56145-128 128 128 174 154 114 156 107 11 168 122 10
56146-198 155 10 184 144 12 197 138 11 200 144 11
56147-206 145 10 206 145 10 197 138 11 188 164 115
56148-195 195 195 198 198 198 174 174 174 14 14 14
56149- 2 2 6 22 22 22 116 116 116 116 116 116
56150- 22 22 22 2 2 6 74 74 74 70 70 70
56151- 30 30 30 10 10 10 0 0 0 0 0 0
56152- 0 0 0 0 0 0 0 0 0 0 0 0
56153- 0 0 0 0 0 0 0 0 0 0 0 0
56154- 0 0 0 0 0 0 0 0 0 0 0 0
56155- 0 0 0 0 0 0 0 0 0 0 0 0
56156- 0 0 0 0 0 0 0 0 0 0 0 0
56157- 0 0 0 0 0 0 0 0 0 0 0 0
56158- 0 0 0 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 6 6 6 18 18 18
56164- 50 50 50 101 101 101 26 26 26 10 10 10
56165-138 138 138 190 190 190 174 154 114 156 107 11
56166-197 138 11 200 144 11 197 138 11 192 133 9
56167-180 123 7 190 142 34 190 178 144 187 187 187
56168-202 202 202 221 221 221 214 214 214 66 66 66
56169- 2 2 6 2 2 6 50 50 50 62 62 62
56170- 6 6 6 2 2 6 10 10 10 90 90 90
56171- 50 50 50 18 18 18 6 6 6 0 0 0
56172- 0 0 0 0 0 0 0 0 0 0 0 0
56173- 0 0 0 0 0 0 0 0 0 0 0 0
56174- 0 0 0 0 0 0 0 0 0 0 0 0
56175- 0 0 0 0 0 0 0 0 0 0 0 0
56176- 0 0 0 0 0 0 0 0 0 0 0 0
56177- 0 0 0 0 0 0 0 0 0 0 0 0
56178- 0 0 0 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 10 10 10 34 34 34
56184- 74 74 74 74 74 74 2 2 6 6 6 6
56185-144 144 144 198 198 198 190 190 190 178 166 146
56186-154 121 60 156 107 11 156 107 11 168 124 44
56187-174 154 114 187 187 187 190 190 190 210 210 210
56188-246 246 246 253 253 253 253 253 253 182 182 182
56189- 6 6 6 2 2 6 2 2 6 2 2 6
56190- 2 2 6 2 2 6 2 2 6 62 62 62
56191- 74 74 74 34 34 34 14 14 14 0 0 0
56192- 0 0 0 0 0 0 0 0 0 0 0 0
56193- 0 0 0 0 0 0 0 0 0 0 0 0
56194- 0 0 0 0 0 0 0 0 0 0 0 0
56195- 0 0 0 0 0 0 0 0 0 0 0 0
56196- 0 0 0 0 0 0 0 0 0 0 0 0
56197- 0 0 0 0 0 0 0 0 0 0 0 0
56198- 0 0 0 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 10 10 10 22 22 22 54 54 54
56204- 94 94 94 18 18 18 2 2 6 46 46 46
56205-234 234 234 221 221 221 190 190 190 190 190 190
56206-190 190 190 187 187 187 187 187 187 190 190 190
56207-190 190 190 195 195 195 214 214 214 242 242 242
56208-253 253 253 253 253 253 253 253 253 253 253 253
56209- 82 82 82 2 2 6 2 2 6 2 2 6
56210- 2 2 6 2 2 6 2 2 6 14 14 14
56211- 86 86 86 54 54 54 22 22 22 6 6 6
56212- 0 0 0 0 0 0 0 0 0 0 0 0
56213- 0 0 0 0 0 0 0 0 0 0 0 0
56214- 0 0 0 0 0 0 0 0 0 0 0 0
56215- 0 0 0 0 0 0 0 0 0 0 0 0
56216- 0 0 0 0 0 0 0 0 0 0 0 0
56217- 0 0 0 0 0 0 0 0 0 0 0 0
56218- 0 0 0 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 0 0 0 0 0 0 0 0 0
56221- 0 0 0 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 6 6 6 18 18 18 46 46 46 90 90 90
56224- 46 46 46 18 18 18 6 6 6 182 182 182
56225-253 253 253 246 246 246 206 206 206 190 190 190
56226-190 190 190 190 190 190 190 190 190 190 190 190
56227-206 206 206 231 231 231 250 250 250 253 253 253
56228-253 253 253 253 253 253 253 253 253 253 253 253
56229-202 202 202 14 14 14 2 2 6 2 2 6
56230- 2 2 6 2 2 6 2 2 6 2 2 6
56231- 42 42 42 86 86 86 42 42 42 18 18 18
56232- 6 6 6 0 0 0 0 0 0 0 0 0
56233- 0 0 0 0 0 0 0 0 0 0 0 0
56234- 0 0 0 0 0 0 0 0 0 0 0 0
56235- 0 0 0 0 0 0 0 0 0 0 0 0
56236- 0 0 0 0 0 0 0 0 0 0 0 0
56237- 0 0 0 0 0 0 0 0 0 0 0 0
56238- 0 0 0 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 0 0 0 0 0 0 0 0 0 0 0 0
56241- 0 0 0 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 6 6 6
56243- 14 14 14 38 38 38 74 74 74 66 66 66
56244- 2 2 6 6 6 6 90 90 90 250 250 250
56245-253 253 253 253 253 253 238 238 238 198 198 198
56246-190 190 190 190 190 190 195 195 195 221 221 221
56247-246 246 246 253 253 253 253 253 253 253 253 253
56248-253 253 253 253 253 253 253 253 253 253 253 253
56249-253 253 253 82 82 82 2 2 6 2 2 6
56250- 2 2 6 2 2 6 2 2 6 2 2 6
56251- 2 2 6 78 78 78 70 70 70 34 34 34
56252- 14 14 14 6 6 6 0 0 0 0 0 0
56253- 0 0 0 0 0 0 0 0 0 0 0 0
56254- 0 0 0 0 0 0 0 0 0 0 0 0
56255- 0 0 0 0 0 0 0 0 0 0 0 0
56256- 0 0 0 0 0 0 0 0 0 0 0 0
56257- 0 0 0 0 0 0 0 0 0 0 0 0
56258- 0 0 0 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 0 0 0
56260- 0 0 0 0 0 0 0 0 0 0 0 0
56261- 0 0 0 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 14 14 14
56263- 34 34 34 66 66 66 78 78 78 6 6 6
56264- 2 2 6 18 18 18 218 218 218 253 253 253
56265-253 253 253 253 253 253 253 253 253 246 246 246
56266-226 226 226 231 231 231 246 246 246 253 253 253
56267-253 253 253 253 253 253 253 253 253 253 253 253
56268-253 253 253 253 253 253 253 253 253 253 253 253
56269-253 253 253 178 178 178 2 2 6 2 2 6
56270- 2 2 6 2 2 6 2 2 6 2 2 6
56271- 2 2 6 18 18 18 90 90 90 62 62 62
56272- 30 30 30 10 10 10 0 0 0 0 0 0
56273- 0 0 0 0 0 0 0 0 0 0 0 0
56274- 0 0 0 0 0 0 0 0 0 0 0 0
56275- 0 0 0 0 0 0 0 0 0 0 0 0
56276- 0 0 0 0 0 0 0 0 0 0 0 0
56277- 0 0 0 0 0 0 0 0 0 0 0 0
56278- 0 0 0 0 0 0 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 0 0 0
56280- 0 0 0 0 0 0 0 0 0 0 0 0
56281- 0 0 0 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 10 10 10 26 26 26
56283- 58 58 58 90 90 90 18 18 18 2 2 6
56284- 2 2 6 110 110 110 253 253 253 253 253 253
56285-253 253 253 253 253 253 253 253 253 253 253 253
56286-250 250 250 253 253 253 253 253 253 253 253 253
56287-253 253 253 253 253 253 253 253 253 253 253 253
56288-253 253 253 253 253 253 253 253 253 253 253 253
56289-253 253 253 231 231 231 18 18 18 2 2 6
56290- 2 2 6 2 2 6 2 2 6 2 2 6
56291- 2 2 6 2 2 6 18 18 18 94 94 94
56292- 54 54 54 26 26 26 10 10 10 0 0 0
56293- 0 0 0 0 0 0 0 0 0 0 0 0
56294- 0 0 0 0 0 0 0 0 0 0 0 0
56295- 0 0 0 0 0 0 0 0 0 0 0 0
56296- 0 0 0 0 0 0 0 0 0 0 0 0
56297- 0 0 0 0 0 0 0 0 0 0 0 0
56298- 0 0 0 0 0 0 0 0 0 0 0 0
56299- 0 0 0 0 0 0 0 0 0 0 0 0
56300- 0 0 0 0 0 0 0 0 0 0 0 0
56301- 0 0 0 0 0 0 0 0 0 0 0 0
56302- 0 0 0 6 6 6 22 22 22 50 50 50
56303- 90 90 90 26 26 26 2 2 6 2 2 6
56304- 14 14 14 195 195 195 250 250 250 253 253 253
56305-253 253 253 253 253 253 253 253 253 253 253 253
56306-253 253 253 253 253 253 253 253 253 253 253 253
56307-253 253 253 253 253 253 253 253 253 253 253 253
56308-253 253 253 253 253 253 253 253 253 253 253 253
56309-250 250 250 242 242 242 54 54 54 2 2 6
56310- 2 2 6 2 2 6 2 2 6 2 2 6
56311- 2 2 6 2 2 6 2 2 6 38 38 38
56312- 86 86 86 50 50 50 22 22 22 6 6 6
56313- 0 0 0 0 0 0 0 0 0 0 0 0
56314- 0 0 0 0 0 0 0 0 0 0 0 0
56315- 0 0 0 0 0 0 0 0 0 0 0 0
56316- 0 0 0 0 0 0 0 0 0 0 0 0
56317- 0 0 0 0 0 0 0 0 0 0 0 0
56318- 0 0 0 0 0 0 0 0 0 0 0 0
56319- 0 0 0 0 0 0 0 0 0 0 0 0
56320- 0 0 0 0 0 0 0 0 0 0 0 0
56321- 0 0 0 0 0 0 0 0 0 0 0 0
56322- 6 6 6 14 14 14 38 38 38 82 82 82
56323- 34 34 34 2 2 6 2 2 6 2 2 6
56324- 42 42 42 195 195 195 246 246 246 253 253 253
56325-253 253 253 253 253 253 253 253 253 250 250 250
56326-242 242 242 242 242 242 250 250 250 253 253 253
56327-253 253 253 253 253 253 253 253 253 253 253 253
56328-253 253 253 250 250 250 246 246 246 238 238 238
56329-226 226 226 231 231 231 101 101 101 6 6 6
56330- 2 2 6 2 2 6 2 2 6 2 2 6
56331- 2 2 6 2 2 6 2 2 6 2 2 6
56332- 38 38 38 82 82 82 42 42 42 14 14 14
56333- 6 6 6 0 0 0 0 0 0 0 0 0
56334- 0 0 0 0 0 0 0 0 0 0 0 0
56335- 0 0 0 0 0 0 0 0 0 0 0 0
56336- 0 0 0 0 0 0 0 0 0 0 0 0
56337- 0 0 0 0 0 0 0 0 0 0 0 0
56338- 0 0 0 0 0 0 0 0 0 0 0 0
56339- 0 0 0 0 0 0 0 0 0 0 0 0
56340- 0 0 0 0 0 0 0 0 0 0 0 0
56341- 0 0 0 0 0 0 0 0 0 0 0 0
56342- 10 10 10 26 26 26 62 62 62 66 66 66
56343- 2 2 6 2 2 6 2 2 6 6 6 6
56344- 70 70 70 170 170 170 206 206 206 234 234 234
56345-246 246 246 250 250 250 250 250 250 238 238 238
56346-226 226 226 231 231 231 238 238 238 250 250 250
56347-250 250 250 250 250 250 246 246 246 231 231 231
56348-214 214 214 206 206 206 202 202 202 202 202 202
56349-198 198 198 202 202 202 182 182 182 18 18 18
56350- 2 2 6 2 2 6 2 2 6 2 2 6
56351- 2 2 6 2 2 6 2 2 6 2 2 6
56352- 2 2 6 62 62 62 66 66 66 30 30 30
56353- 10 10 10 0 0 0 0 0 0 0 0 0
56354- 0 0 0 0 0 0 0 0 0 0 0 0
56355- 0 0 0 0 0 0 0 0 0 0 0 0
56356- 0 0 0 0 0 0 0 0 0 0 0 0
56357- 0 0 0 0 0 0 0 0 0 0 0 0
56358- 0 0 0 0 0 0 0 0 0 0 0 0
56359- 0 0 0 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 14 14 14 42 42 42 82 82 82 18 18 18
56363- 2 2 6 2 2 6 2 2 6 10 10 10
56364- 94 94 94 182 182 182 218 218 218 242 242 242
56365-250 250 250 253 253 253 253 253 253 250 250 250
56366-234 234 234 253 253 253 253 253 253 253 253 253
56367-253 253 253 253 253 253 253 253 253 246 246 246
56368-238 238 238 226 226 226 210 210 210 202 202 202
56369-195 195 195 195 195 195 210 210 210 158 158 158
56370- 6 6 6 14 14 14 50 50 50 14 14 14
56371- 2 2 6 2 2 6 2 2 6 2 2 6
56372- 2 2 6 6 6 6 86 86 86 46 46 46
56373- 18 18 18 6 6 6 0 0 0 0 0 0
56374- 0 0 0 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 0 0 0 0 0 0 0 0 0 0 0 0
56377- 0 0 0 0 0 0 0 0 0 0 0 0
56378- 0 0 0 0 0 0 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 6 6 6
56382- 22 22 22 54 54 54 70 70 70 2 2 6
56383- 2 2 6 10 10 10 2 2 6 22 22 22
56384-166 166 166 231 231 231 250 250 250 253 253 253
56385-253 253 253 253 253 253 253 253 253 250 250 250
56386-242 242 242 253 253 253 253 253 253 253 253 253
56387-253 253 253 253 253 253 253 253 253 253 253 253
56388-253 253 253 253 253 253 253 253 253 246 246 246
56389-231 231 231 206 206 206 198 198 198 226 226 226
56390- 94 94 94 2 2 6 6 6 6 38 38 38
56391- 30 30 30 2 2 6 2 2 6 2 2 6
56392- 2 2 6 2 2 6 62 62 62 66 66 66
56393- 26 26 26 10 10 10 0 0 0 0 0 0
56394- 0 0 0 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 0 0 0 0 0 0 0 0 0 0 0 0
56397- 0 0 0 0 0 0 0 0 0 0 0 0
56398- 0 0 0 0 0 0 0 0 0 0 0 0
56399- 0 0 0 0 0 0 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 0 0 0 0 10 10 10
56402- 30 30 30 74 74 74 50 50 50 2 2 6
56403- 26 26 26 26 26 26 2 2 6 106 106 106
56404-238 238 238 253 253 253 253 253 253 253 253 253
56405-253 253 253 253 253 253 253 253 253 253 253 253
56406-253 253 253 253 253 253 253 253 253 253 253 253
56407-253 253 253 253 253 253 253 253 253 253 253 253
56408-253 253 253 253 253 253 253 253 253 253 253 253
56409-253 253 253 246 246 246 218 218 218 202 202 202
56410-210 210 210 14 14 14 2 2 6 2 2 6
56411- 30 30 30 22 22 22 2 2 6 2 2 6
56412- 2 2 6 2 2 6 18 18 18 86 86 86
56413- 42 42 42 14 14 14 0 0 0 0 0 0
56414- 0 0 0 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 0 0 0
56416- 0 0 0 0 0 0 0 0 0 0 0 0
56417- 0 0 0 0 0 0 0 0 0 0 0 0
56418- 0 0 0 0 0 0 0 0 0 0 0 0
56419- 0 0 0 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 0 0 0 0 0 0 0 14 14 14
56422- 42 42 42 90 90 90 22 22 22 2 2 6
56423- 42 42 42 2 2 6 18 18 18 218 218 218
56424-253 253 253 253 253 253 253 253 253 253 253 253
56425-253 253 253 253 253 253 253 253 253 253 253 253
56426-253 253 253 253 253 253 253 253 253 253 253 253
56427-253 253 253 253 253 253 253 253 253 253 253 253
56428-253 253 253 253 253 253 253 253 253 253 253 253
56429-253 253 253 253 253 253 250 250 250 221 221 221
56430-218 218 218 101 101 101 2 2 6 14 14 14
56431- 18 18 18 38 38 38 10 10 10 2 2 6
56432- 2 2 6 2 2 6 2 2 6 78 78 78
56433- 58 58 58 22 22 22 6 6 6 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 0 0 0 0 0 0 0 0 0 0 0 0
56439- 0 0 0 0 0 0 0 0 0 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 6 6 6 18 18 18
56442- 54 54 54 82 82 82 2 2 6 26 26 26
56443- 22 22 22 2 2 6 123 123 123 253 253 253
56444-253 253 253 253 253 253 253 253 253 253 253 253
56445-253 253 253 253 253 253 253 253 253 253 253 253
56446-253 253 253 253 253 253 253 253 253 253 253 253
56447-253 253 253 253 253 253 253 253 253 253 253 253
56448-253 253 253 253 253 253 253 253 253 253 253 253
56449-253 253 253 253 253 253 253 253 253 250 250 250
56450-238 238 238 198 198 198 6 6 6 38 38 38
56451- 58 58 58 26 26 26 38 38 38 2 2 6
56452- 2 2 6 2 2 6 2 2 6 46 46 46
56453- 78 78 78 30 30 30 10 10 10 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 0 0 0 0 0 0 0 0 0
56458- 0 0 0 0 0 0 0 0 0 0 0 0
56459- 0 0 0 0 0 0 0 0 0 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 10 10 10 30 30 30
56462- 74 74 74 58 58 58 2 2 6 42 42 42
56463- 2 2 6 22 22 22 231 231 231 253 253 253
56464-253 253 253 253 253 253 253 253 253 253 253 253
56465-253 253 253 253 253 253 253 253 253 250 250 250
56466-253 253 253 253 253 253 253 253 253 253 253 253
56467-253 253 253 253 253 253 253 253 253 253 253 253
56468-253 253 253 253 253 253 253 253 253 253 253 253
56469-253 253 253 253 253 253 253 253 253 253 253 253
56470-253 253 253 246 246 246 46 46 46 38 38 38
56471- 42 42 42 14 14 14 38 38 38 14 14 14
56472- 2 2 6 2 2 6 2 2 6 6 6 6
56473- 86 86 86 46 46 46 14 14 14 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 0 0 0 0 0 0 0 0 0 0 0 0
56478- 0 0 0 0 0 0 0 0 0 0 0 0
56479- 0 0 0 0 0 0 0 0 0 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 0 6 6 6 14 14 14 42 42 42
56482- 90 90 90 18 18 18 18 18 18 26 26 26
56483- 2 2 6 116 116 116 253 253 253 253 253 253
56484-253 253 253 253 253 253 253 253 253 253 253 253
56485-253 253 253 253 253 253 250 250 250 238 238 238
56486-253 253 253 253 253 253 253 253 253 253 253 253
56487-253 253 253 253 253 253 253 253 253 253 253 253
56488-253 253 253 253 253 253 253 253 253 253 253 253
56489-253 253 253 253 253 253 253 253 253 253 253 253
56490-253 253 253 253 253 253 94 94 94 6 6 6
56491- 2 2 6 2 2 6 10 10 10 34 34 34
56492- 2 2 6 2 2 6 2 2 6 2 2 6
56493- 74 74 74 58 58 58 22 22 22 6 6 6
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 0 0 0 0 0 0 0 0 0
56499- 0 0 0 0 0 0 0 0 0 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 10 10 10 26 26 26 66 66 66
56502- 82 82 82 2 2 6 38 38 38 6 6 6
56503- 14 14 14 210 210 210 253 253 253 253 253 253
56504-253 253 253 253 253 253 253 253 253 253 253 253
56505-253 253 253 253 253 253 246 246 246 242 242 242
56506-253 253 253 253 253 253 253 253 253 253 253 253
56507-253 253 253 253 253 253 253 253 253 253 253 253
56508-253 253 253 253 253 253 253 253 253 253 253 253
56509-253 253 253 253 253 253 253 253 253 253 253 253
56510-253 253 253 253 253 253 144 144 144 2 2 6
56511- 2 2 6 2 2 6 2 2 6 46 46 46
56512- 2 2 6 2 2 6 2 2 6 2 2 6
56513- 42 42 42 74 74 74 30 30 30 10 10 10
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 0 0 0 0 0 0 0 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 6 6 6 14 14 14 42 42 42 90 90 90
56522- 26 26 26 6 6 6 42 42 42 2 2 6
56523- 74 74 74 250 250 250 253 253 253 253 253 253
56524-253 253 253 253 253 253 253 253 253 253 253 253
56525-253 253 253 253 253 253 242 242 242 242 242 242
56526-253 253 253 253 253 253 253 253 253 253 253 253
56527-253 253 253 253 253 253 253 253 253 253 253 253
56528-253 253 253 253 253 253 253 253 253 253 253 253
56529-253 253 253 253 253 253 253 253 253 253 253 253
56530-253 253 253 253 253 253 182 182 182 2 2 6
56531- 2 2 6 2 2 6 2 2 6 46 46 46
56532- 2 2 6 2 2 6 2 2 6 2 2 6
56533- 10 10 10 86 86 86 38 38 38 10 10 10
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 0 0 0 0 0 0 0 0 0 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 10 10 10 26 26 26 66 66 66 82 82 82
56542- 2 2 6 22 22 22 18 18 18 2 2 6
56543-149 149 149 253 253 253 253 253 253 253 253 253
56544-253 253 253 253 253 253 253 253 253 253 253 253
56545-253 253 253 253 253 253 234 234 234 242 242 242
56546-253 253 253 253 253 253 253 253 253 253 253 253
56547-253 253 253 253 253 253 253 253 253 253 253 253
56548-253 253 253 253 253 253 253 253 253 253 253 253
56549-253 253 253 253 253 253 253 253 253 253 253 253
56550-253 253 253 253 253 253 206 206 206 2 2 6
56551- 2 2 6 2 2 6 2 2 6 38 38 38
56552- 2 2 6 2 2 6 2 2 6 2 2 6
56553- 6 6 6 86 86 86 46 46 46 14 14 14
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 0 0 0 0 0 0 0 0 0 0 0 0
56560- 0 0 0 0 0 0 0 0 0 6 6 6
56561- 18 18 18 46 46 46 86 86 86 18 18 18
56562- 2 2 6 34 34 34 10 10 10 6 6 6
56563-210 210 210 253 253 253 253 253 253 253 253 253
56564-253 253 253 253 253 253 253 253 253 253 253 253
56565-253 253 253 253 253 253 234 234 234 242 242 242
56566-253 253 253 253 253 253 253 253 253 253 253 253
56567-253 253 253 253 253 253 253 253 253 253 253 253
56568-253 253 253 253 253 253 253 253 253 253 253 253
56569-253 253 253 253 253 253 253 253 253 253 253 253
56570-253 253 253 253 253 253 221 221 221 6 6 6
56571- 2 2 6 2 2 6 6 6 6 30 30 30
56572- 2 2 6 2 2 6 2 2 6 2 2 6
56573- 2 2 6 82 82 82 54 54 54 18 18 18
56574- 6 6 6 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 0 0 0
56579- 0 0 0 0 0 0 0 0 0 0 0 0
56580- 0 0 0 0 0 0 0 0 0 10 10 10
56581- 26 26 26 66 66 66 62 62 62 2 2 6
56582- 2 2 6 38 38 38 10 10 10 26 26 26
56583-238 238 238 253 253 253 253 253 253 253 253 253
56584-253 253 253 253 253 253 253 253 253 253 253 253
56585-253 253 253 253 253 253 231 231 231 238 238 238
56586-253 253 253 253 253 253 253 253 253 253 253 253
56587-253 253 253 253 253 253 253 253 253 253 253 253
56588-253 253 253 253 253 253 253 253 253 253 253 253
56589-253 253 253 253 253 253 253 253 253 253 253 253
56590-253 253 253 253 253 253 231 231 231 6 6 6
56591- 2 2 6 2 2 6 10 10 10 30 30 30
56592- 2 2 6 2 2 6 2 2 6 2 2 6
56593- 2 2 6 66 66 66 58 58 58 22 22 22
56594- 6 6 6 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 0 0 0
56599- 0 0 0 0 0 0 0 0 0 0 0 0
56600- 0 0 0 0 0 0 0 0 0 10 10 10
56601- 38 38 38 78 78 78 6 6 6 2 2 6
56602- 2 2 6 46 46 46 14 14 14 42 42 42
56603-246 246 246 253 253 253 253 253 253 253 253 253
56604-253 253 253 253 253 253 253 253 253 253 253 253
56605-253 253 253 253 253 253 231 231 231 242 242 242
56606-253 253 253 253 253 253 253 253 253 253 253 253
56607-253 253 253 253 253 253 253 253 253 253 253 253
56608-253 253 253 253 253 253 253 253 253 253 253 253
56609-253 253 253 253 253 253 253 253 253 253 253 253
56610-253 253 253 253 253 253 234 234 234 10 10 10
56611- 2 2 6 2 2 6 22 22 22 14 14 14
56612- 2 2 6 2 2 6 2 2 6 2 2 6
56613- 2 2 6 66 66 66 62 62 62 22 22 22
56614- 6 6 6 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 0 0 0
56619- 0 0 0 0 0 0 0 0 0 0 0 0
56620- 0 0 0 0 0 0 6 6 6 18 18 18
56621- 50 50 50 74 74 74 2 2 6 2 2 6
56622- 14 14 14 70 70 70 34 34 34 62 62 62
56623-250 250 250 253 253 253 253 253 253 253 253 253
56624-253 253 253 253 253 253 253 253 253 253 253 253
56625-253 253 253 253 253 253 231 231 231 246 246 246
56626-253 253 253 253 253 253 253 253 253 253 253 253
56627-253 253 253 253 253 253 253 253 253 253 253 253
56628-253 253 253 253 253 253 253 253 253 253 253 253
56629-253 253 253 253 253 253 253 253 253 253 253 253
56630-253 253 253 253 253 253 234 234 234 14 14 14
56631- 2 2 6 2 2 6 30 30 30 2 2 6
56632- 2 2 6 2 2 6 2 2 6 2 2 6
56633- 2 2 6 66 66 66 62 62 62 22 22 22
56634- 6 6 6 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 0 0 0
56639- 0 0 0 0 0 0 0 0 0 0 0 0
56640- 0 0 0 0 0 0 6 6 6 18 18 18
56641- 54 54 54 62 62 62 2 2 6 2 2 6
56642- 2 2 6 30 30 30 46 46 46 70 70 70
56643-250 250 250 253 253 253 253 253 253 253 253 253
56644-253 253 253 253 253 253 253 253 253 253 253 253
56645-253 253 253 253 253 253 231 231 231 246 246 246
56646-253 253 253 253 253 253 253 253 253 253 253 253
56647-253 253 253 253 253 253 253 253 253 253 253 253
56648-253 253 253 253 253 253 253 253 253 253 253 253
56649-253 253 253 253 253 253 253 253 253 253 253 253
56650-253 253 253 253 253 253 226 226 226 10 10 10
56651- 2 2 6 6 6 6 30 30 30 2 2 6
56652- 2 2 6 2 2 6 2 2 6 2 2 6
56653- 2 2 6 66 66 66 58 58 58 22 22 22
56654- 6 6 6 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 6 6 6 22 22 22
56661- 58 58 58 62 62 62 2 2 6 2 2 6
56662- 2 2 6 2 2 6 30 30 30 78 78 78
56663-250 250 250 253 253 253 253 253 253 253 253 253
56664-253 253 253 253 253 253 253 253 253 253 253 253
56665-253 253 253 253 253 253 231 231 231 246 246 246
56666-253 253 253 253 253 253 253 253 253 253 253 253
56667-253 253 253 253 253 253 253 253 253 253 253 253
56668-253 253 253 253 253 253 253 253 253 253 253 253
56669-253 253 253 253 253 253 253 253 253 253 253 253
56670-253 253 253 253 253 253 206 206 206 2 2 6
56671- 22 22 22 34 34 34 18 14 6 22 22 22
56672- 26 26 26 18 18 18 6 6 6 2 2 6
56673- 2 2 6 82 82 82 54 54 54 18 18 18
56674- 6 6 6 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 0 0 0
56679- 0 0 0 0 0 0 0 0 0 0 0 0
56680- 0 0 0 0 0 0 6 6 6 26 26 26
56681- 62 62 62 106 106 106 74 54 14 185 133 11
56682-210 162 10 121 92 8 6 6 6 62 62 62
56683-238 238 238 253 253 253 253 253 253 253 253 253
56684-253 253 253 253 253 253 253 253 253 253 253 253
56685-253 253 253 253 253 253 231 231 231 246 246 246
56686-253 253 253 253 253 253 253 253 253 253 253 253
56687-253 253 253 253 253 253 253 253 253 253 253 253
56688-253 253 253 253 253 253 253 253 253 253 253 253
56689-253 253 253 253 253 253 253 253 253 253 253 253
56690-253 253 253 253 253 253 158 158 158 18 18 18
56691- 14 14 14 2 2 6 2 2 6 2 2 6
56692- 6 6 6 18 18 18 66 66 66 38 38 38
56693- 6 6 6 94 94 94 50 50 50 18 18 18
56694- 6 6 6 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 6 6 6
56700- 10 10 10 10 10 10 18 18 18 38 38 38
56701- 78 78 78 142 134 106 216 158 10 242 186 14
56702-246 190 14 246 190 14 156 118 10 10 10 10
56703- 90 90 90 238 238 238 253 253 253 253 253 253
56704-253 253 253 253 253 253 253 253 253 253 253 253
56705-253 253 253 253 253 253 231 231 231 250 250 250
56706-253 253 253 253 253 253 253 253 253 253 253 253
56707-253 253 253 253 253 253 253 253 253 253 253 253
56708-253 253 253 253 253 253 253 253 253 253 253 253
56709-253 253 253 253 253 253 253 253 253 246 230 190
56710-238 204 91 238 204 91 181 142 44 37 26 9
56711- 2 2 6 2 2 6 2 2 6 2 2 6
56712- 2 2 6 2 2 6 38 38 38 46 46 46
56713- 26 26 26 106 106 106 54 54 54 18 18 18
56714- 6 6 6 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 0 0 0
56719- 0 0 0 6 6 6 14 14 14 22 22 22
56720- 30 30 30 38 38 38 50 50 50 70 70 70
56721-106 106 106 190 142 34 226 170 11 242 186 14
56722-246 190 14 246 190 14 246 190 14 154 114 10
56723- 6 6 6 74 74 74 226 226 226 253 253 253
56724-253 253 253 253 253 253 253 253 253 253 253 253
56725-253 253 253 253 253 253 231 231 231 250 250 250
56726-253 253 253 253 253 253 253 253 253 253 253 253
56727-253 253 253 253 253 253 253 253 253 253 253 253
56728-253 253 253 253 253 253 253 253 253 253 253 253
56729-253 253 253 253 253 253 253 253 253 228 184 62
56730-241 196 14 241 208 19 232 195 16 38 30 10
56731- 2 2 6 2 2 6 2 2 6 2 2 6
56732- 2 2 6 6 6 6 30 30 30 26 26 26
56733-203 166 17 154 142 90 66 66 66 26 26 26
56734- 6 6 6 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 0 0 0
56739- 6 6 6 18 18 18 38 38 38 58 58 58
56740- 78 78 78 86 86 86 101 101 101 123 123 123
56741-175 146 61 210 150 10 234 174 13 246 186 14
56742-246 190 14 246 190 14 246 190 14 238 190 10
56743-102 78 10 2 2 6 46 46 46 198 198 198
56744-253 253 253 253 253 253 253 253 253 253 253 253
56745-253 253 253 253 253 253 234 234 234 242 242 242
56746-253 253 253 253 253 253 253 253 253 253 253 253
56747-253 253 253 253 253 253 253 253 253 253 253 253
56748-253 253 253 253 253 253 253 253 253 253 253 253
56749-253 253 253 253 253 253 253 253 253 224 178 62
56750-242 186 14 241 196 14 210 166 10 22 18 6
56751- 2 2 6 2 2 6 2 2 6 2 2 6
56752- 2 2 6 2 2 6 6 6 6 121 92 8
56753-238 202 15 232 195 16 82 82 82 34 34 34
56754- 10 10 10 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 14 14 14 38 38 38 70 70 70 154 122 46
56760-190 142 34 200 144 11 197 138 11 197 138 11
56761-213 154 11 226 170 11 242 186 14 246 190 14
56762-246 190 14 246 190 14 246 190 14 246 190 14
56763-225 175 15 46 32 6 2 2 6 22 22 22
56764-158 158 158 250 250 250 253 253 253 253 253 253
56765-253 253 253 253 253 253 253 253 253 253 253 253
56766-253 253 253 253 253 253 253 253 253 253 253 253
56767-253 253 253 253 253 253 253 253 253 253 253 253
56768-253 253 253 253 253 253 253 253 253 253 253 253
56769-253 253 253 250 250 250 242 242 242 224 178 62
56770-239 182 13 236 186 11 213 154 11 46 32 6
56771- 2 2 6 2 2 6 2 2 6 2 2 6
56772- 2 2 6 2 2 6 61 42 6 225 175 15
56773-238 190 10 236 186 11 112 100 78 42 42 42
56774- 14 14 14 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 6 6 6
56779- 22 22 22 54 54 54 154 122 46 213 154 11
56780-226 170 11 230 174 11 226 170 11 226 170 11
56781-236 178 12 242 186 14 246 190 14 246 190 14
56782-246 190 14 246 190 14 246 190 14 246 190 14
56783-241 196 14 184 144 12 10 10 10 2 2 6
56784- 6 6 6 116 116 116 242 242 242 253 253 253
56785-253 253 253 253 253 253 253 253 253 253 253 253
56786-253 253 253 253 253 253 253 253 253 253 253 253
56787-253 253 253 253 253 253 253 253 253 253 253 253
56788-253 253 253 253 253 253 253 253 253 253 253 253
56789-253 253 253 231 231 231 198 198 198 214 170 54
56790-236 178 12 236 178 12 210 150 10 137 92 6
56791- 18 14 6 2 2 6 2 2 6 2 2 6
56792- 6 6 6 70 47 6 200 144 11 236 178 12
56793-239 182 13 239 182 13 124 112 88 58 58 58
56794- 22 22 22 6 6 6 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 10 10 10
56799- 30 30 30 70 70 70 180 133 36 226 170 11
56800-239 182 13 242 186 14 242 186 14 246 186 14
56801-246 190 14 246 190 14 246 190 14 246 190 14
56802-246 190 14 246 190 14 246 190 14 246 190 14
56803-246 190 14 232 195 16 98 70 6 2 2 6
56804- 2 2 6 2 2 6 66 66 66 221 221 221
56805-253 253 253 253 253 253 253 253 253 253 253 253
56806-253 253 253 253 253 253 253 253 253 253 253 253
56807-253 253 253 253 253 253 253 253 253 253 253 253
56808-253 253 253 253 253 253 253 253 253 253 253 253
56809-253 253 253 206 206 206 198 198 198 214 166 58
56810-230 174 11 230 174 11 216 158 10 192 133 9
56811-163 110 8 116 81 8 102 78 10 116 81 8
56812-167 114 7 197 138 11 226 170 11 239 182 13
56813-242 186 14 242 186 14 162 146 94 78 78 78
56814- 34 34 34 14 14 14 6 6 6 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 6 6 6
56819- 30 30 30 78 78 78 190 142 34 226 170 11
56820-239 182 13 246 190 14 246 190 14 246 190 14
56821-246 190 14 246 190 14 246 190 14 246 190 14
56822-246 190 14 246 190 14 246 190 14 246 190 14
56823-246 190 14 241 196 14 203 166 17 22 18 6
56824- 2 2 6 2 2 6 2 2 6 38 38 38
56825-218 218 218 253 253 253 253 253 253 253 253 253
56826-253 253 253 253 253 253 253 253 253 253 253 253
56827-253 253 253 253 253 253 253 253 253 253 253 253
56828-253 253 253 253 253 253 253 253 253 253 253 253
56829-250 250 250 206 206 206 198 198 198 202 162 69
56830-226 170 11 236 178 12 224 166 10 210 150 10
56831-200 144 11 197 138 11 192 133 9 197 138 11
56832-210 150 10 226 170 11 242 186 14 246 190 14
56833-246 190 14 246 186 14 225 175 15 124 112 88
56834- 62 62 62 30 30 30 14 14 14 6 6 6
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 10 10 10
56839- 30 30 30 78 78 78 174 135 50 224 166 10
56840-239 182 13 246 190 14 246 190 14 246 190 14
56841-246 190 14 246 190 14 246 190 14 246 190 14
56842-246 190 14 246 190 14 246 190 14 246 190 14
56843-246 190 14 246 190 14 241 196 14 139 102 15
56844- 2 2 6 2 2 6 2 2 6 2 2 6
56845- 78 78 78 250 250 250 253 253 253 253 253 253
56846-253 253 253 253 253 253 253 253 253 253 253 253
56847-253 253 253 253 253 253 253 253 253 253 253 253
56848-253 253 253 253 253 253 253 253 253 253 253 253
56849-250 250 250 214 214 214 198 198 198 190 150 46
56850-219 162 10 236 178 12 234 174 13 224 166 10
56851-216 158 10 213 154 11 213 154 11 216 158 10
56852-226 170 11 239 182 13 246 190 14 246 190 14
56853-246 190 14 246 190 14 242 186 14 206 162 42
56854-101 101 101 58 58 58 30 30 30 14 14 14
56855- 6 6 6 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 10 10 10
56859- 30 30 30 74 74 74 174 135 50 216 158 10
56860-236 178 12 246 190 14 246 190 14 246 190 14
56861-246 190 14 246 190 14 246 190 14 246 190 14
56862-246 190 14 246 190 14 246 190 14 246 190 14
56863-246 190 14 246 190 14 241 196 14 226 184 13
56864- 61 42 6 2 2 6 2 2 6 2 2 6
56865- 22 22 22 238 238 238 253 253 253 253 253 253
56866-253 253 253 253 253 253 253 253 253 253 253 253
56867-253 253 253 253 253 253 253 253 253 253 253 253
56868-253 253 253 253 253 253 253 253 253 253 253 253
56869-253 253 253 226 226 226 187 187 187 180 133 36
56870-216 158 10 236 178 12 239 182 13 236 178 12
56871-230 174 11 226 170 11 226 170 11 230 174 11
56872-236 178 12 242 186 14 246 190 14 246 190 14
56873-246 190 14 246 190 14 246 186 14 239 182 13
56874-206 162 42 106 106 106 66 66 66 34 34 34
56875- 14 14 14 6 6 6 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 6 6 6
56879- 26 26 26 70 70 70 163 133 67 213 154 11
56880-236 178 12 246 190 14 246 190 14 246 190 14
56881-246 190 14 246 190 14 246 190 14 246 190 14
56882-246 190 14 246 190 14 246 190 14 246 190 14
56883-246 190 14 246 190 14 246 190 14 241 196 14
56884-190 146 13 18 14 6 2 2 6 2 2 6
56885- 46 46 46 246 246 246 253 253 253 253 253 253
56886-253 253 253 253 253 253 253 253 253 253 253 253
56887-253 253 253 253 253 253 253 253 253 253 253 253
56888-253 253 253 253 253 253 253 253 253 253 253 253
56889-253 253 253 221 221 221 86 86 86 156 107 11
56890-216 158 10 236 178 12 242 186 14 246 186 14
56891-242 186 14 239 182 13 239 182 13 242 186 14
56892-242 186 14 246 186 14 246 190 14 246 190 14
56893-246 190 14 246 190 14 246 190 14 246 190 14
56894-242 186 14 225 175 15 142 122 72 66 66 66
56895- 30 30 30 10 10 10 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 6 6 6
56899- 26 26 26 70 70 70 163 133 67 210 150 10
56900-236 178 12 246 190 14 246 190 14 246 190 14
56901-246 190 14 246 190 14 246 190 14 246 190 14
56902-246 190 14 246 190 14 246 190 14 246 190 14
56903-246 190 14 246 190 14 246 190 14 246 190 14
56904-232 195 16 121 92 8 34 34 34 106 106 106
56905-221 221 221 253 253 253 253 253 253 253 253 253
56906-253 253 253 253 253 253 253 253 253 253 253 253
56907-253 253 253 253 253 253 253 253 253 253 253 253
56908-253 253 253 253 253 253 253 253 253 253 253 253
56909-242 242 242 82 82 82 18 14 6 163 110 8
56910-216 158 10 236 178 12 242 186 14 246 190 14
56911-246 190 14 246 190 14 246 190 14 246 190 14
56912-246 190 14 246 190 14 246 190 14 246 190 14
56913-246 190 14 246 190 14 246 190 14 246 190 14
56914-246 190 14 246 190 14 242 186 14 163 133 67
56915- 46 46 46 18 18 18 6 6 6 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 10 10 10
56919- 30 30 30 78 78 78 163 133 67 210 150 10
56920-236 178 12 246 186 14 246 190 14 246 190 14
56921-246 190 14 246 190 14 246 190 14 246 190 14
56922-246 190 14 246 190 14 246 190 14 246 190 14
56923-246 190 14 246 190 14 246 190 14 246 190 14
56924-241 196 14 215 174 15 190 178 144 253 253 253
56925-253 253 253 253 253 253 253 253 253 253 253 253
56926-253 253 253 253 253 253 253 253 253 253 253 253
56927-253 253 253 253 253 253 253 253 253 253 253 253
56928-253 253 253 253 253 253 253 253 253 218 218 218
56929- 58 58 58 2 2 6 22 18 6 167 114 7
56930-216 158 10 236 178 12 246 186 14 246 190 14
56931-246 190 14 246 190 14 246 190 14 246 190 14
56932-246 190 14 246 190 14 246 190 14 246 190 14
56933-246 190 14 246 190 14 246 190 14 246 190 14
56934-246 190 14 246 186 14 242 186 14 190 150 46
56935- 54 54 54 22 22 22 6 6 6 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 14 14 14
56939- 38 38 38 86 86 86 180 133 36 213 154 11
56940-236 178 12 246 186 14 246 190 14 246 190 14
56941-246 190 14 246 190 14 246 190 14 246 190 14
56942-246 190 14 246 190 14 246 190 14 246 190 14
56943-246 190 14 246 190 14 246 190 14 246 190 14
56944-246 190 14 232 195 16 190 146 13 214 214 214
56945-253 253 253 253 253 253 253 253 253 253 253 253
56946-253 253 253 253 253 253 253 253 253 253 253 253
56947-253 253 253 253 253 253 253 253 253 253 253 253
56948-253 253 253 250 250 250 170 170 170 26 26 26
56949- 2 2 6 2 2 6 37 26 9 163 110 8
56950-219 162 10 239 182 13 246 186 14 246 190 14
56951-246 190 14 246 190 14 246 190 14 246 190 14
56952-246 190 14 246 190 14 246 190 14 246 190 14
56953-246 190 14 246 190 14 246 190 14 246 190 14
56954-246 186 14 236 178 12 224 166 10 142 122 72
56955- 46 46 46 18 18 18 6 6 6 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 6 6 6 18 18 18
56959- 50 50 50 109 106 95 192 133 9 224 166 10
56960-242 186 14 246 190 14 246 190 14 246 190 14
56961-246 190 14 246 190 14 246 190 14 246 190 14
56962-246 190 14 246 190 14 246 190 14 246 190 14
56963-246 190 14 246 190 14 246 190 14 246 190 14
56964-242 186 14 226 184 13 210 162 10 142 110 46
56965-226 226 226 253 253 253 253 253 253 253 253 253
56966-253 253 253 253 253 253 253 253 253 253 253 253
56967-253 253 253 253 253 253 253 253 253 253 253 253
56968-198 198 198 66 66 66 2 2 6 2 2 6
56969- 2 2 6 2 2 6 50 34 6 156 107 11
56970-219 162 10 239 182 13 246 186 14 246 190 14
56971-246 190 14 246 190 14 246 190 14 246 190 14
56972-246 190 14 246 190 14 246 190 14 246 190 14
56973-246 190 14 246 190 14 246 190 14 242 186 14
56974-234 174 13 213 154 11 154 122 46 66 66 66
56975- 30 30 30 10 10 10 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 6 6 6 22 22 22
56979- 58 58 58 154 121 60 206 145 10 234 174 13
56980-242 186 14 246 186 14 246 190 14 246 190 14
56981-246 190 14 246 190 14 246 190 14 246 190 14
56982-246 190 14 246 190 14 246 190 14 246 190 14
56983-246 190 14 246 190 14 246 190 14 246 190 14
56984-246 186 14 236 178 12 210 162 10 163 110 8
56985- 61 42 6 138 138 138 218 218 218 250 250 250
56986-253 253 253 253 253 253 253 253 253 250 250 250
56987-242 242 242 210 210 210 144 144 144 66 66 66
56988- 6 6 6 2 2 6 2 2 6 2 2 6
56989- 2 2 6 2 2 6 61 42 6 163 110 8
56990-216 158 10 236 178 12 246 190 14 246 190 14
56991-246 190 14 246 190 14 246 190 14 246 190 14
56992-246 190 14 246 190 14 246 190 14 246 190 14
56993-246 190 14 239 182 13 230 174 11 216 158 10
56994-190 142 34 124 112 88 70 70 70 38 38 38
56995- 18 18 18 6 6 6 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 6 6 6 22 22 22
56999- 62 62 62 168 124 44 206 145 10 224 166 10
57000-236 178 12 239 182 13 242 186 14 242 186 14
57001-246 186 14 246 190 14 246 190 14 246 190 14
57002-246 190 14 246 190 14 246 190 14 246 190 14
57003-246 190 14 246 190 14 246 190 14 246 190 14
57004-246 190 14 236 178 12 216 158 10 175 118 6
57005- 80 54 7 2 2 6 6 6 6 30 30 30
57006- 54 54 54 62 62 62 50 50 50 38 38 38
57007- 14 14 14 2 2 6 2 2 6 2 2 6
57008- 2 2 6 2 2 6 2 2 6 2 2 6
57009- 2 2 6 6 6 6 80 54 7 167 114 7
57010-213 154 11 236 178 12 246 190 14 246 190 14
57011-246 190 14 246 190 14 246 190 14 246 190 14
57012-246 190 14 242 186 14 239 182 13 239 182 13
57013-230 174 11 210 150 10 174 135 50 124 112 88
57014- 82 82 82 54 54 54 34 34 34 18 18 18
57015- 6 6 6 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 6 6 6 18 18 18
57019- 50 50 50 158 118 36 192 133 9 200 144 11
57020-216 158 10 219 162 10 224 166 10 226 170 11
57021-230 174 11 236 178 12 239 182 13 239 182 13
57022-242 186 14 246 186 14 246 190 14 246 190 14
57023-246 190 14 246 190 14 246 190 14 246 190 14
57024-246 186 14 230 174 11 210 150 10 163 110 8
57025-104 69 6 10 10 10 2 2 6 2 2 6
57026- 2 2 6 2 2 6 2 2 6 2 2 6
57027- 2 2 6 2 2 6 2 2 6 2 2 6
57028- 2 2 6 2 2 6 2 2 6 2 2 6
57029- 2 2 6 6 6 6 91 60 6 167 114 7
57030-206 145 10 230 174 11 242 186 14 246 190 14
57031-246 190 14 246 190 14 246 186 14 242 186 14
57032-239 182 13 230 174 11 224 166 10 213 154 11
57033-180 133 36 124 112 88 86 86 86 58 58 58
57034- 38 38 38 22 22 22 10 10 10 6 6 6
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 14 14 14
57039- 34 34 34 70 70 70 138 110 50 158 118 36
57040-167 114 7 180 123 7 192 133 9 197 138 11
57041-200 144 11 206 145 10 213 154 11 219 162 10
57042-224 166 10 230 174 11 239 182 13 242 186 14
57043-246 186 14 246 186 14 246 186 14 246 186 14
57044-239 182 13 216 158 10 185 133 11 152 99 6
57045-104 69 6 18 14 6 2 2 6 2 2 6
57046- 2 2 6 2 2 6 2 2 6 2 2 6
57047- 2 2 6 2 2 6 2 2 6 2 2 6
57048- 2 2 6 2 2 6 2 2 6 2 2 6
57049- 2 2 6 6 6 6 80 54 7 152 99 6
57050-192 133 9 219 162 10 236 178 12 239 182 13
57051-246 186 14 242 186 14 239 182 13 236 178 12
57052-224 166 10 206 145 10 192 133 9 154 121 60
57053- 94 94 94 62 62 62 42 42 42 22 22 22
57054- 14 14 14 6 6 6 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 6 6 6
57059- 18 18 18 34 34 34 58 58 58 78 78 78
57060-101 98 89 124 112 88 142 110 46 156 107 11
57061-163 110 8 167 114 7 175 118 6 180 123 7
57062-185 133 11 197 138 11 210 150 10 219 162 10
57063-226 170 11 236 178 12 236 178 12 234 174 13
57064-219 162 10 197 138 11 163 110 8 130 83 6
57065- 91 60 6 10 10 10 2 2 6 2 2 6
57066- 18 18 18 38 38 38 38 38 38 38 38 38
57067- 38 38 38 38 38 38 38 38 38 38 38 38
57068- 38 38 38 38 38 38 26 26 26 2 2 6
57069- 2 2 6 6 6 6 70 47 6 137 92 6
57070-175 118 6 200 144 11 219 162 10 230 174 11
57071-234 174 13 230 174 11 219 162 10 210 150 10
57072-192 133 9 163 110 8 124 112 88 82 82 82
57073- 50 50 50 30 30 30 14 14 14 6 6 6
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 6 6 6 14 14 14 22 22 22 34 34 34
57080- 42 42 42 58 58 58 74 74 74 86 86 86
57081-101 98 89 122 102 70 130 98 46 121 87 25
57082-137 92 6 152 99 6 163 110 8 180 123 7
57083-185 133 11 197 138 11 206 145 10 200 144 11
57084-180 123 7 156 107 11 130 83 6 104 69 6
57085- 50 34 6 54 54 54 110 110 110 101 98 89
57086- 86 86 86 82 82 82 78 78 78 78 78 78
57087- 78 78 78 78 78 78 78 78 78 78 78 78
57088- 78 78 78 82 82 82 86 86 86 94 94 94
57089-106 106 106 101 101 101 86 66 34 124 80 6
57090-156 107 11 180 123 7 192 133 9 200 144 11
57091-206 145 10 200 144 11 192 133 9 175 118 6
57092-139 102 15 109 106 95 70 70 70 42 42 42
57093- 22 22 22 10 10 10 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 6 6 6 10 10 10
57100- 14 14 14 22 22 22 30 30 30 38 38 38
57101- 50 50 50 62 62 62 74 74 74 90 90 90
57102-101 98 89 112 100 78 121 87 25 124 80 6
57103-137 92 6 152 99 6 152 99 6 152 99 6
57104-138 86 6 124 80 6 98 70 6 86 66 30
57105-101 98 89 82 82 82 58 58 58 46 46 46
57106- 38 38 38 34 34 34 34 34 34 34 34 34
57107- 34 34 34 34 34 34 34 34 34 34 34 34
57108- 34 34 34 34 34 34 38 38 38 42 42 42
57109- 54 54 54 82 82 82 94 86 76 91 60 6
57110-134 86 6 156 107 11 167 114 7 175 118 6
57111-175 118 6 167 114 7 152 99 6 121 87 25
57112-101 98 89 62 62 62 34 34 34 18 18 18
57113- 6 6 6 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 6 6 6 6 6 6 10 10 10
57121- 18 18 18 22 22 22 30 30 30 42 42 42
57122- 50 50 50 66 66 66 86 86 86 101 98 89
57123-106 86 58 98 70 6 104 69 6 104 69 6
57124-104 69 6 91 60 6 82 62 34 90 90 90
57125- 62 62 62 38 38 38 22 22 22 14 14 14
57126- 10 10 10 10 10 10 10 10 10 10 10 10
57127- 10 10 10 10 10 10 6 6 6 10 10 10
57128- 10 10 10 10 10 10 10 10 10 14 14 14
57129- 22 22 22 42 42 42 70 70 70 89 81 66
57130- 80 54 7 104 69 6 124 80 6 137 92 6
57131-134 86 6 116 81 8 100 82 52 86 86 86
57132- 58 58 58 30 30 30 14 14 14 6 6 6
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 6 6 6 10 10 10 14 14 14
57142- 18 18 18 26 26 26 38 38 38 54 54 54
57143- 70 70 70 86 86 86 94 86 76 89 81 66
57144- 89 81 66 86 86 86 74 74 74 50 50 50
57145- 30 30 30 14 14 14 6 6 6 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 6 6 6 18 18 18 34 34 34 58 58 58
57150- 82 82 82 89 81 66 89 81 66 89 81 66
57151- 94 86 66 94 86 76 74 74 74 50 50 50
57152- 26 26 26 14 14 14 6 6 6 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 6 6 6 6 6 6 14 14 14 18 18 18
57163- 30 30 30 38 38 38 46 46 46 54 54 54
57164- 50 50 50 42 42 42 30 30 30 18 18 18
57165- 10 10 10 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 6 6 6 14 14 14 26 26 26
57170- 38 38 38 50 50 50 58 58 58 58 58 58
57171- 54 54 54 42 42 42 30 30 30 18 18 18
57172- 10 10 10 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 6 6 6
57183- 6 6 6 10 10 10 14 14 14 18 18 18
57184- 18 18 18 14 14 14 10 10 10 6 6 6
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 6 6 6
57190- 14 14 14 18 18 18 22 22 22 22 22 22
57191- 18 18 18 14 14 14 10 10 10 6 6 6
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57210+4 4 4 4 4 4
57211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57224+4 4 4 4 4 4
57225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57238+4 4 4 4 4 4
57239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57252+4 4 4 4 4 4
57253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57266+4 4 4 4 4 4
57267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57280+4 4 4 4 4 4
57281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57286+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57290+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57291+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57292+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57294+4 4 4 4 4 4
57295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57300+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57301+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57304+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57305+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57306+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57307+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57308+4 4 4 4 4 4
57309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57313+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57314+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57315+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57318+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57319+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57320+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57321+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57322+4 4 4 4 4 4
57323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57326+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57327+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57328+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57329+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57331+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57332+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57333+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57334+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57335+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57336+4 4 4 4 4 4
57337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57340+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57341+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57342+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57343+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57344+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57345+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57346+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57347+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57348+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57349+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57350+4 4 4 4 4 4
57351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57354+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57355+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57356+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57357+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57358+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57359+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57360+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57361+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57362+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57363+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57364+4 4 4 4 4 4
57365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57367+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57368+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57369+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57370+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57371+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57372+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57373+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57374+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57375+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57376+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57377+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57378+4 4 4 4 4 4
57379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57381+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57382+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57383+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57384+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57385+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57386+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57387+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57388+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57389+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57390+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57391+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57392+4 4 4 4 4 4
57393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57395+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57396+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57397+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57398+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57399+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57400+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57401+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57402+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57403+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57404+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57405+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57406+4 4 4 4 4 4
57407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57409+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57410+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57411+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57412+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57413+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57414+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57415+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57416+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57417+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57418+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57419+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57420+4 4 4 4 4 4
57421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57422+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57423+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57424+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57425+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57426+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57427+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57428+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57429+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57430+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57431+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57432+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57433+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57434+4 4 4 4 4 4
57435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57436+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57437+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57438+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57439+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57440+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57441+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57442+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57443+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57444+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57445+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57446+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57447+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57448+0 0 0 4 4 4
57449+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57450+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57451+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57452+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57453+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57454+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57455+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57456+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57457+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57458+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57459+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57460+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57461+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57462+2 0 0 0 0 0
57463+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57464+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57465+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57466+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57467+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57468+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57469+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57470+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57471+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57472+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57473+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57474+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57475+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57476+37 38 37 0 0 0
57477+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57478+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57479+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57480+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57481+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57482+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57483+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57484+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57485+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57486+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57487+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57488+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57489+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57490+85 115 134 4 0 0
57491+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57492+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57493+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57494+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57495+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57496+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57497+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57498+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57499+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57500+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57501+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57502+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57503+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57504+60 73 81 4 0 0
57505+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57506+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57507+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57508+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57509+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57510+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57511+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57512+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57513+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57514+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57515+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57516+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57517+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57518+16 19 21 4 0 0
57519+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57520+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57521+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57522+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57523+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57524+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57525+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57526+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57527+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57528+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57529+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57530+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57531+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57532+4 0 0 4 3 3
57533+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57534+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57535+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57537+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57538+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57539+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57540+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57541+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57542+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57543+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57544+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57545+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57546+3 2 2 4 4 4
57547+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57548+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57549+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57550+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57551+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57552+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57553+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57554+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57555+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57556+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57557+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57558+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57559+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57560+4 4 4 4 4 4
57561+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57562+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57563+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57564+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57565+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57566+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57567+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57568+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57569+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57570+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57571+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57572+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57573+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57574+4 4 4 4 4 4
57575+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57576+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57577+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57578+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57579+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57580+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57581+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57582+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57583+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57584+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57585+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57586+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57587+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57588+5 5 5 5 5 5
57589+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57590+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57591+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57592+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57593+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57594+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57595+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57596+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57597+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57598+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57599+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57600+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57601+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57602+5 5 5 4 4 4
57603+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57604+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57605+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57606+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57607+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57608+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57609+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57610+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57611+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57612+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57613+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57614+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57616+4 4 4 4 4 4
57617+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57618+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57619+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57620+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57621+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57622+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57623+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57624+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57625+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57626+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57627+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57628+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57630+4 4 4 4 4 4
57631+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57632+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57633+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57634+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57635+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57636+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57637+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57638+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57639+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57640+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57641+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57644+4 4 4 4 4 4
57645+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57646+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57647+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57648+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57649+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57650+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57651+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57652+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57653+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57654+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57655+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57658+4 4 4 4 4 4
57659+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57660+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57661+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57662+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57663+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57664+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57665+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57666+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57667+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57668+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57669+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57672+4 4 4 4 4 4
57673+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57674+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57675+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57676+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57677+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57678+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57679+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57680+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57681+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57682+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57683+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57686+4 4 4 4 4 4
57687+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57688+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57689+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57690+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57691+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57692+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57693+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57694+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57695+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57696+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57697+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57700+4 4 4 4 4 4
57701+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57702+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57703+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57704+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57705+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57706+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57707+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57708+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57709+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57710+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57711+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57714+4 4 4 4 4 4
57715+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57716+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57717+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57718+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57719+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57720+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57721+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57722+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57723+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57724+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57725+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57728+4 4 4 4 4 4
57729+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57730+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57731+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57732+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57733+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57734+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57735+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57736+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57737+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57738+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57739+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57742+4 4 4 4 4 4
57743+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57744+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57745+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57746+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57747+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57748+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57749+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57750+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57751+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57752+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57753+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57756+4 4 4 4 4 4
57757+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57758+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57759+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57760+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57761+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57762+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57763+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57764+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57765+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57766+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57767+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57770+4 4 4 4 4 4
57771+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57772+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57773+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57774+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57775+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57776+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57777+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57778+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57779+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57780+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57781+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57784+4 4 4 4 4 4
57785+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57786+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57787+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57788+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57789+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57790+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57791+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57792+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57793+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57794+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57795+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57798+4 4 4 4 4 4
57799+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57800+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57801+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57802+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57803+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57804+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57805+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57806+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57807+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57808+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57809+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57812+4 4 4 4 4 4
57813+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57814+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57815+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57816+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57817+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57818+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57819+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57820+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57821+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57822+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57823+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57826+4 4 4 4 4 4
57827+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57828+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57829+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57830+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57831+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57832+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57833+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57834+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57835+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57836+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57837+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57840+4 4 4 4 4 4
57841+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57842+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57843+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57844+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57845+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57846+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57847+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57848+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57849+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57850+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57851+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57854+4 4 4 4 4 4
57855+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57856+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57857+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57858+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57859+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57860+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57861+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57862+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57863+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57864+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57865+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57868+4 4 4 4 4 4
57869+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57870+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57871+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57872+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57873+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57874+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57875+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57876+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57877+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57878+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57879+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57882+4 4 4 4 4 4
57883+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57884+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57885+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57886+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57887+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57888+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57889+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57890+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57891+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57892+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57893+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57896+4 4 4 4 4 4
57897+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57898+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57899+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57900+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57901+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57902+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57903+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57904+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57905+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57906+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57907+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57910+4 4 4 4 4 4
57911+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57912+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57913+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57914+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57915+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57916+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57917+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57918+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57919+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57920+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57921+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57924+4 4 4 4 4 4
57925+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57926+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57927+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57928+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57929+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57930+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57931+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57932+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57933+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57934+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57935+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57938+4 4 4 4 4 4
57939+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57940+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57941+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57942+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57943+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57944+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57945+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57946+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57947+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57948+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57949+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57952+4 4 4 4 4 4
57953+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57954+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57955+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57956+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57957+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57958+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57959+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57960+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57961+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57962+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57963+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57966+4 4 4 4 4 4
57967+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57968+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57969+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57970+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57971+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57972+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57973+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57974+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57975+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57976+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57977+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57980+4 4 4 4 4 4
57981+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57982+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57983+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57984+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57986+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57987+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57989+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57990+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57991+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57994+4 4 4 4 4 4
57995+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57996+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57997+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57998+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57999+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58000+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58001+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58002+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58003+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58004+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58005+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 4 4 4
58009+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58010+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58011+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58012+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58013+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58014+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58015+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58016+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58017+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58018+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4
58023+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58024+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58025+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58026+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58027+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58028+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58029+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58030+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58031+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58032+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4
58037+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58038+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58039+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58040+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58041+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58042+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58043+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58044+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58045+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58046+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58050+4 4 4 4 4 4
58051+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58052+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58053+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58054+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58055+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58056+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58057+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58058+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58059+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58060+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58064+4 4 4 4 4 4
58065+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58066+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58067+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58068+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58069+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58070+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58071+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58072+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58073+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4
58079+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58080+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58081+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58082+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58083+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58084+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58085+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58086+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58087+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4
58093+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58094+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58095+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58096+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58097+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58098+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58099+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58100+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58101+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4
58107+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58108+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58109+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58110+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58111+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58112+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58113+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58114+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58120+4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58122+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58123+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58124+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58125+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58126+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58127+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58128+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58134+4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58136+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58137+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58138+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58139+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58140+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58141+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58142+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58148+4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58151+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58152+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58153+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58154+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58155+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58156+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58162+4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58165+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58166+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58167+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58168+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58169+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58170+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58176+4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58180+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58181+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58182+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58183+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58184+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58190+4 4 4 4 4 4
58191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58194+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58195+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58196+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58197+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58204+4 4 4 4 4 4
58205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58208+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58209+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58210+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58211+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58218+4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58222+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58223+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58224+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58225+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58232+4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58236+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58237+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58238+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58239+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58246+4 4 4 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58251+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58252+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58253+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58260+4 4 4 4 4 4
58261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58265+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58266+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58267+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58274+4 4 4 4 4 4
58275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58279+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58280+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58281+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58288+4 4 4 4 4 4
58289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58293+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58294+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58302+4 4 4 4 4 4
58303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58307+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58308+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58316+4 4 4 4 4 4
58317diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
58318index 70fba97..8ec7f86 100644
58319--- a/drivers/xen/events/events_base.c
58320+++ b/drivers/xen/events/events_base.c
58321@@ -1563,7 +1563,7 @@ void xen_irq_resume(void)
58322 restore_pirqs();
58323 }
58324
58325-static struct irq_chip xen_dynamic_chip __read_mostly = {
58326+static struct irq_chip xen_dynamic_chip = {
58327 .name = "xen-dyn",
58328
58329 .irq_disable = disable_dynirq,
58330@@ -1577,7 +1577,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
58331 .irq_retrigger = retrigger_dynirq,
58332 };
58333
58334-static struct irq_chip xen_pirq_chip __read_mostly = {
58335+static struct irq_chip xen_pirq_chip = {
58336 .name = "xen-pirq",
58337
58338 .irq_startup = startup_pirq,
58339@@ -1597,7 +1597,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
58340 .irq_retrigger = retrigger_dynirq,
58341 };
58342
58343-static struct irq_chip xen_percpu_chip __read_mostly = {
58344+static struct irq_chip xen_percpu_chip = {
58345 .name = "xen-percpu",
58346
58347 .irq_disable = disable_dynirq,
58348diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58349index fef20db..d28b1ab 100644
58350--- a/drivers/xen/xenfs/xenstored.c
58351+++ b/drivers/xen/xenfs/xenstored.c
58352@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58353 static int xsd_kva_open(struct inode *inode, struct file *file)
58354 {
58355 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58356+#ifdef CONFIG_GRKERNSEC_HIDESYM
58357+ NULL);
58358+#else
58359 xen_store_interface);
58360+#endif
58361+
58362 if (!file->private_data)
58363 return -ENOMEM;
58364 return 0;
58365diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58366index eb14e05..5156de7 100644
58367--- a/fs/9p/vfs_addr.c
58368+++ b/fs/9p/vfs_addr.c
58369@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58370
58371 retval = v9fs_file_write_internal(inode,
58372 v9inode->writeback_fid,
58373- (__force const char __user *)buffer,
58374+ (const char __force_user *)buffer,
58375 len, &offset, 0);
58376 if (retval > 0)
58377 retval = 0;
58378diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58379index 3662f1d..90558b5 100644
58380--- a/fs/9p/vfs_inode.c
58381+++ b/fs/9p/vfs_inode.c
58382@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58383 void
58384 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58385 {
58386- char *s = nd_get_link(nd);
58387+ const char *s = nd_get_link(nd);
58388
58389 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58390 dentry, IS_ERR(s) ? "<error>" : s);
58391diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58392index 270c481..0d8a962 100644
58393--- a/fs/Kconfig.binfmt
58394+++ b/fs/Kconfig.binfmt
58395@@ -106,7 +106,7 @@ config HAVE_AOUT
58396
58397 config BINFMT_AOUT
58398 tristate "Kernel support for a.out and ECOFF binaries"
58399- depends on HAVE_AOUT
58400+ depends on HAVE_AOUT && BROKEN
58401 ---help---
58402 A.out (Assembler.OUTput) is a set of formats for libraries and
58403 executables used in the earliest versions of UNIX. Linux used
58404diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58405index 8a1d38e..300a14e 100644
58406--- a/fs/afs/inode.c
58407+++ b/fs/afs/inode.c
58408@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58409 struct afs_vnode *vnode;
58410 struct super_block *sb;
58411 struct inode *inode;
58412- static atomic_t afs_autocell_ino;
58413+ static atomic_unchecked_t afs_autocell_ino;
58414
58415 _enter("{%x:%u},%*.*s,",
58416 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58417@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58418 data.fid.unique = 0;
58419 data.fid.vnode = 0;
58420
58421- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58422+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58423 afs_iget5_autocell_test, afs_iget5_set,
58424 &data);
58425 if (!inode) {
58426diff --git a/fs/aio.c b/fs/aio.c
58427index a793f70..46f45af 100644
58428--- a/fs/aio.c
58429+++ b/fs/aio.c
58430@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58431 size += sizeof(struct io_event) * nr_events;
58432
58433 nr_pages = PFN_UP(size);
58434- if (nr_pages < 0)
58435+ if (nr_pages <= 0)
58436 return -EINVAL;
58437
58438 file = aio_private_file(ctx, nr_pages);
58439diff --git a/fs/attr.c b/fs/attr.c
58440index 6530ced..4a827e2 100644
58441--- a/fs/attr.c
58442+++ b/fs/attr.c
58443@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58444 unsigned long limit;
58445
58446 limit = rlimit(RLIMIT_FSIZE);
58447+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58448 if (limit != RLIM_INFINITY && offset > limit)
58449 goto out_sig;
58450 if (offset > inode->i_sb->s_maxbytes)
58451diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58452index 116fd38..c04182da 100644
58453--- a/fs/autofs4/waitq.c
58454+++ b/fs/autofs4/waitq.c
58455@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58456 {
58457 unsigned long sigpipe, flags;
58458 mm_segment_t fs;
58459- const char *data = (const char *)addr;
58460+ const char __user *data = (const char __force_user *)addr;
58461 ssize_t wr = 0;
58462
58463 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58464@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58465 return 1;
58466 }
58467
58468+#ifdef CONFIG_GRKERNSEC_HIDESYM
58469+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58470+#endif
58471+
58472 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58473 enum autofs_notify notify)
58474 {
58475@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58476
58477 /* If this is a direct mount request create a dummy name */
58478 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58479+#ifdef CONFIG_GRKERNSEC_HIDESYM
58480+ /* this name does get written to userland via autofs4_write() */
58481+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58482+#else
58483 qstr.len = sprintf(name, "%p", dentry);
58484+#endif
58485 else {
58486 qstr.len = autofs4_getpath(sbi, dentry, &name);
58487 if (!qstr.len) {
58488diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58489index 2722387..56059b5 100644
58490--- a/fs/befs/endian.h
58491+++ b/fs/befs/endian.h
58492@@ -11,7 +11,7 @@
58493
58494 #include <asm/byteorder.h>
58495
58496-static inline u64
58497+static inline u64 __intentional_overflow(-1)
58498 fs64_to_cpu(const struct super_block *sb, fs64 n)
58499 {
58500 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58501@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58502 return (__force fs64)cpu_to_be64(n);
58503 }
58504
58505-static inline u32
58506+static inline u32 __intentional_overflow(-1)
58507 fs32_to_cpu(const struct super_block *sb, fs32 n)
58508 {
58509 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58510@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58511 return (__force fs32)cpu_to_be32(n);
58512 }
58513
58514-static inline u16
58515+static inline u16 __intentional_overflow(-1)
58516 fs16_to_cpu(const struct super_block *sb, fs16 n)
58517 {
58518 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58519diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58520index 4c55668..eeae150 100644
58521--- a/fs/binfmt_aout.c
58522+++ b/fs/binfmt_aout.c
58523@@ -16,6 +16,7 @@
58524 #include <linux/string.h>
58525 #include <linux/fs.h>
58526 #include <linux/file.h>
58527+#include <linux/security.h>
58528 #include <linux/stat.h>
58529 #include <linux/fcntl.h>
58530 #include <linux/ptrace.h>
58531@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58532 #endif
58533 # define START_STACK(u) ((void __user *)u.start_stack)
58534
58535+ memset(&dump, 0, sizeof(dump));
58536+
58537 fs = get_fs();
58538 set_fs(KERNEL_DS);
58539 has_dumped = 1;
58540@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58541
58542 /* If the size of the dump file exceeds the rlimit, then see what would happen
58543 if we wrote the stack, but not the data area. */
58544+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58545 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58546 dump.u_dsize = 0;
58547
58548 /* Make sure we have enough room to write the stack and data areas. */
58549+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58550 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58551 dump.u_ssize = 0;
58552
58553@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58554 rlim = rlimit(RLIMIT_DATA);
58555 if (rlim >= RLIM_INFINITY)
58556 rlim = ~0;
58557+
58558+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58559 if (ex.a_data + ex.a_bss > rlim)
58560 return -ENOMEM;
58561
58562@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58563
58564 install_exec_creds(bprm);
58565
58566+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58567+ current->mm->pax_flags = 0UL;
58568+#endif
58569+
58570+#ifdef CONFIG_PAX_PAGEEXEC
58571+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58572+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58573+
58574+#ifdef CONFIG_PAX_EMUTRAMP
58575+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58576+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58577+#endif
58578+
58579+#ifdef CONFIG_PAX_MPROTECT
58580+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58581+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58582+#endif
58583+
58584+ }
58585+#endif
58586+
58587 if (N_MAGIC(ex) == OMAGIC) {
58588 unsigned long text_addr, map_size;
58589 loff_t pos;
58590@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58591 return error;
58592
58593 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58594- PROT_READ | PROT_WRITE | PROT_EXEC,
58595+ PROT_READ | PROT_WRITE,
58596 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58597 fd_offset + ex.a_text);
58598 if (error != N_DATADDR(ex))
58599diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58600index 995986b..dcc4ef2 100644
58601--- a/fs/binfmt_elf.c
58602+++ b/fs/binfmt_elf.c
58603@@ -34,6 +34,7 @@
58604 #include <linux/utsname.h>
58605 #include <linux/coredump.h>
58606 #include <linux/sched.h>
58607+#include <linux/xattr.h>
58608 #include <asm/uaccess.h>
58609 #include <asm/param.h>
58610 #include <asm/page.h>
58611@@ -47,7 +48,7 @@
58612
58613 static int load_elf_binary(struct linux_binprm *bprm);
58614 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58615- int, int, unsigned long);
58616+ int, int, unsigned long) __intentional_overflow(-1);
58617
58618 #ifdef CONFIG_USELIB
58619 static int load_elf_library(struct file *);
58620@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58621 #define elf_core_dump NULL
58622 #endif
58623
58624+#ifdef CONFIG_PAX_MPROTECT
58625+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58626+#endif
58627+
58628+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58629+static void elf_handle_mmap(struct file *file);
58630+#endif
58631+
58632 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58633 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58634 #else
58635@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58636 .load_binary = load_elf_binary,
58637 .load_shlib = load_elf_library,
58638 .core_dump = elf_core_dump,
58639+
58640+#ifdef CONFIG_PAX_MPROTECT
58641+ .handle_mprotect= elf_handle_mprotect,
58642+#endif
58643+
58644+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58645+ .handle_mmap = elf_handle_mmap,
58646+#endif
58647+
58648 .min_coredump = ELF_EXEC_PAGESIZE,
58649 };
58650
58651@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58652
58653 static int set_brk(unsigned long start, unsigned long end)
58654 {
58655+ unsigned long e = end;
58656+
58657 start = ELF_PAGEALIGN(start);
58658 end = ELF_PAGEALIGN(end);
58659 if (end > start) {
58660@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58661 if (BAD_ADDR(addr))
58662 return addr;
58663 }
58664- current->mm->start_brk = current->mm->brk = end;
58665+ current->mm->start_brk = current->mm->brk = e;
58666 return 0;
58667 }
58668
58669@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58670 elf_addr_t __user *u_rand_bytes;
58671 const char *k_platform = ELF_PLATFORM;
58672 const char *k_base_platform = ELF_BASE_PLATFORM;
58673- unsigned char k_rand_bytes[16];
58674+ u32 k_rand_bytes[4];
58675 int items;
58676 elf_addr_t *elf_info;
58677 int ei_index = 0;
58678 const struct cred *cred = current_cred();
58679 struct vm_area_struct *vma;
58680+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58681
58682 /*
58683 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58684@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58685 * Generate 16 random bytes for userspace PRNG seeding.
58686 */
58687 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58688- u_rand_bytes = (elf_addr_t __user *)
58689- STACK_ALLOC(p, sizeof(k_rand_bytes));
58690+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58691+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58692+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58693+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58694+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58695+ u_rand_bytes = (elf_addr_t __user *) p;
58696 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58697 return -EFAULT;
58698
58699@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58700 return -EFAULT;
58701 current->mm->env_end = p;
58702
58703+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58704+
58705 /* Put the elf_info on the stack in the right place. */
58706 sp = (elf_addr_t __user *)envp + 1;
58707- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58708+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58709 return -EFAULT;
58710 return 0;
58711 }
58712@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58713 an ELF header */
58714
58715 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58716- struct file *interpreter, unsigned long *interp_map_addr,
58717+ struct file *interpreter,
58718 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58719 {
58720 struct elf_phdr *eppnt;
58721- unsigned long load_addr = 0;
58722+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58723 int load_addr_set = 0;
58724 unsigned long last_bss = 0, elf_bss = 0;
58725- unsigned long error = ~0UL;
58726+ unsigned long error = -EINVAL;
58727 unsigned long total_size;
58728 int i;
58729
58730@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58731 goto out;
58732 }
58733
58734+#ifdef CONFIG_PAX_SEGMEXEC
58735+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58736+ pax_task_size = SEGMEXEC_TASK_SIZE;
58737+#endif
58738+
58739 eppnt = interp_elf_phdata;
58740 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58741 if (eppnt->p_type == PT_LOAD) {
58742@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58743 map_addr = elf_map(interpreter, load_addr + vaddr,
58744 eppnt, elf_prot, elf_type, total_size);
58745 total_size = 0;
58746- if (!*interp_map_addr)
58747- *interp_map_addr = map_addr;
58748 error = map_addr;
58749 if (BAD_ADDR(map_addr))
58750 goto out;
58751@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58752 k = load_addr + eppnt->p_vaddr;
58753 if (BAD_ADDR(k) ||
58754 eppnt->p_filesz > eppnt->p_memsz ||
58755- eppnt->p_memsz > TASK_SIZE ||
58756- TASK_SIZE - eppnt->p_memsz < k) {
58757+ eppnt->p_memsz > pax_task_size ||
58758+ pax_task_size - eppnt->p_memsz < k) {
58759 error = -ENOMEM;
58760 goto out;
58761 }
58762@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58763 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58764
58765 /* Map the last of the bss segment */
58766- error = vm_brk(elf_bss, last_bss - elf_bss);
58767- if (BAD_ADDR(error))
58768- goto out;
58769+ if (last_bss > elf_bss) {
58770+ error = vm_brk(elf_bss, last_bss - elf_bss);
58771+ if (BAD_ADDR(error))
58772+ goto out;
58773+ }
58774 }
58775
58776 error = load_addr;
58777@@ -634,6 +666,336 @@ out:
58778 return error;
58779 }
58780
58781+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58782+#ifdef CONFIG_PAX_SOFTMODE
58783+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58784+{
58785+ unsigned long pax_flags = 0UL;
58786+
58787+#ifdef CONFIG_PAX_PAGEEXEC
58788+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58789+ pax_flags |= MF_PAX_PAGEEXEC;
58790+#endif
58791+
58792+#ifdef CONFIG_PAX_SEGMEXEC
58793+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58794+ pax_flags |= MF_PAX_SEGMEXEC;
58795+#endif
58796+
58797+#ifdef CONFIG_PAX_EMUTRAMP
58798+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58799+ pax_flags |= MF_PAX_EMUTRAMP;
58800+#endif
58801+
58802+#ifdef CONFIG_PAX_MPROTECT
58803+ if (elf_phdata->p_flags & PF_MPROTECT)
58804+ pax_flags |= MF_PAX_MPROTECT;
58805+#endif
58806+
58807+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58808+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58809+ pax_flags |= MF_PAX_RANDMMAP;
58810+#endif
58811+
58812+ return pax_flags;
58813+}
58814+#endif
58815+
58816+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58817+{
58818+ unsigned long pax_flags = 0UL;
58819+
58820+#ifdef CONFIG_PAX_PAGEEXEC
58821+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58822+ pax_flags |= MF_PAX_PAGEEXEC;
58823+#endif
58824+
58825+#ifdef CONFIG_PAX_SEGMEXEC
58826+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58827+ pax_flags |= MF_PAX_SEGMEXEC;
58828+#endif
58829+
58830+#ifdef CONFIG_PAX_EMUTRAMP
58831+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58832+ pax_flags |= MF_PAX_EMUTRAMP;
58833+#endif
58834+
58835+#ifdef CONFIG_PAX_MPROTECT
58836+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58837+ pax_flags |= MF_PAX_MPROTECT;
58838+#endif
58839+
58840+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58841+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58842+ pax_flags |= MF_PAX_RANDMMAP;
58843+#endif
58844+
58845+ return pax_flags;
58846+}
58847+#endif
58848+
58849+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58850+#ifdef CONFIG_PAX_SOFTMODE
58851+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58852+{
58853+ unsigned long pax_flags = 0UL;
58854+
58855+#ifdef CONFIG_PAX_PAGEEXEC
58856+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58857+ pax_flags |= MF_PAX_PAGEEXEC;
58858+#endif
58859+
58860+#ifdef CONFIG_PAX_SEGMEXEC
58861+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58862+ pax_flags |= MF_PAX_SEGMEXEC;
58863+#endif
58864+
58865+#ifdef CONFIG_PAX_EMUTRAMP
58866+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58867+ pax_flags |= MF_PAX_EMUTRAMP;
58868+#endif
58869+
58870+#ifdef CONFIG_PAX_MPROTECT
58871+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58872+ pax_flags |= MF_PAX_MPROTECT;
58873+#endif
58874+
58875+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58876+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58877+ pax_flags |= MF_PAX_RANDMMAP;
58878+#endif
58879+
58880+ return pax_flags;
58881+}
58882+#endif
58883+
58884+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58885+{
58886+ unsigned long pax_flags = 0UL;
58887+
58888+#ifdef CONFIG_PAX_PAGEEXEC
58889+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58890+ pax_flags |= MF_PAX_PAGEEXEC;
58891+#endif
58892+
58893+#ifdef CONFIG_PAX_SEGMEXEC
58894+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58895+ pax_flags |= MF_PAX_SEGMEXEC;
58896+#endif
58897+
58898+#ifdef CONFIG_PAX_EMUTRAMP
58899+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58900+ pax_flags |= MF_PAX_EMUTRAMP;
58901+#endif
58902+
58903+#ifdef CONFIG_PAX_MPROTECT
58904+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58905+ pax_flags |= MF_PAX_MPROTECT;
58906+#endif
58907+
58908+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58909+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58910+ pax_flags |= MF_PAX_RANDMMAP;
58911+#endif
58912+
58913+ return pax_flags;
58914+}
58915+#endif
58916+
58917+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58918+static unsigned long pax_parse_defaults(void)
58919+{
58920+ unsigned long pax_flags = 0UL;
58921+
58922+#ifdef CONFIG_PAX_SOFTMODE
58923+ if (pax_softmode)
58924+ return pax_flags;
58925+#endif
58926+
58927+#ifdef CONFIG_PAX_PAGEEXEC
58928+ pax_flags |= MF_PAX_PAGEEXEC;
58929+#endif
58930+
58931+#ifdef CONFIG_PAX_SEGMEXEC
58932+ pax_flags |= MF_PAX_SEGMEXEC;
58933+#endif
58934+
58935+#ifdef CONFIG_PAX_MPROTECT
58936+ pax_flags |= MF_PAX_MPROTECT;
58937+#endif
58938+
58939+#ifdef CONFIG_PAX_RANDMMAP
58940+ if (randomize_va_space)
58941+ pax_flags |= MF_PAX_RANDMMAP;
58942+#endif
58943+
58944+ return pax_flags;
58945+}
58946+
58947+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58948+{
58949+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58950+
58951+#ifdef CONFIG_PAX_EI_PAX
58952+
58953+#ifdef CONFIG_PAX_SOFTMODE
58954+ if (pax_softmode)
58955+ return pax_flags;
58956+#endif
58957+
58958+ pax_flags = 0UL;
58959+
58960+#ifdef CONFIG_PAX_PAGEEXEC
58961+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58962+ pax_flags |= MF_PAX_PAGEEXEC;
58963+#endif
58964+
58965+#ifdef CONFIG_PAX_SEGMEXEC
58966+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58967+ pax_flags |= MF_PAX_SEGMEXEC;
58968+#endif
58969+
58970+#ifdef CONFIG_PAX_EMUTRAMP
58971+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58972+ pax_flags |= MF_PAX_EMUTRAMP;
58973+#endif
58974+
58975+#ifdef CONFIG_PAX_MPROTECT
58976+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58977+ pax_flags |= MF_PAX_MPROTECT;
58978+#endif
58979+
58980+#ifdef CONFIG_PAX_ASLR
58981+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58982+ pax_flags |= MF_PAX_RANDMMAP;
58983+#endif
58984+
58985+#endif
58986+
58987+ return pax_flags;
58988+
58989+}
58990+
58991+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58992+{
58993+
58994+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58995+ unsigned long i;
58996+
58997+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58998+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58999+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59000+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59001+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59002+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59003+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59004+ return PAX_PARSE_FLAGS_FALLBACK;
59005+
59006+#ifdef CONFIG_PAX_SOFTMODE
59007+ if (pax_softmode)
59008+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59009+ else
59010+#endif
59011+
59012+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59013+ break;
59014+ }
59015+#endif
59016+
59017+ return PAX_PARSE_FLAGS_FALLBACK;
59018+}
59019+
59020+static unsigned long pax_parse_xattr_pax(struct file * const file)
59021+{
59022+
59023+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59024+ ssize_t xattr_size, i;
59025+ unsigned char xattr_value[sizeof("pemrs") - 1];
59026+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59027+
59028+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59029+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59030+ return PAX_PARSE_FLAGS_FALLBACK;
59031+
59032+ for (i = 0; i < xattr_size; i++)
59033+ switch (xattr_value[i]) {
59034+ default:
59035+ return PAX_PARSE_FLAGS_FALLBACK;
59036+
59037+#define parse_flag(option1, option2, flag) \
59038+ case option1: \
59039+ if (pax_flags_hardmode & MF_PAX_##flag) \
59040+ return PAX_PARSE_FLAGS_FALLBACK;\
59041+ pax_flags_hardmode |= MF_PAX_##flag; \
59042+ break; \
59043+ case option2: \
59044+ if (pax_flags_softmode & MF_PAX_##flag) \
59045+ return PAX_PARSE_FLAGS_FALLBACK;\
59046+ pax_flags_softmode |= MF_PAX_##flag; \
59047+ break;
59048+
59049+ parse_flag('p', 'P', PAGEEXEC);
59050+ parse_flag('e', 'E', EMUTRAMP);
59051+ parse_flag('m', 'M', MPROTECT);
59052+ parse_flag('r', 'R', RANDMMAP);
59053+ parse_flag('s', 'S', SEGMEXEC);
59054+
59055+#undef parse_flag
59056+ }
59057+
59058+ if (pax_flags_hardmode & pax_flags_softmode)
59059+ return PAX_PARSE_FLAGS_FALLBACK;
59060+
59061+#ifdef CONFIG_PAX_SOFTMODE
59062+ if (pax_softmode)
59063+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59064+ else
59065+#endif
59066+
59067+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59068+#else
59069+ return PAX_PARSE_FLAGS_FALLBACK;
59070+#endif
59071+
59072+}
59073+
59074+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59075+{
59076+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59077+
59078+ pax_flags = pax_parse_defaults();
59079+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59080+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59081+ xattr_pax_flags = pax_parse_xattr_pax(file);
59082+
59083+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59084+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59085+ pt_pax_flags != xattr_pax_flags)
59086+ return -EINVAL;
59087+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59088+ pax_flags = xattr_pax_flags;
59089+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59090+ pax_flags = pt_pax_flags;
59091+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59092+ pax_flags = ei_pax_flags;
59093+
59094+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59095+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59096+ if ((__supported_pte_mask & _PAGE_NX))
59097+ pax_flags &= ~MF_PAX_SEGMEXEC;
59098+ else
59099+ pax_flags &= ~MF_PAX_PAGEEXEC;
59100+ }
59101+#endif
59102+
59103+ if (0 > pax_check_flags(&pax_flags))
59104+ return -EINVAL;
59105+
59106+ current->mm->pax_flags = pax_flags;
59107+ return 0;
59108+}
59109+#endif
59110+
59111 /*
59112 * These are the functions used to load ELF style executables and shared
59113 * libraries. There is no binary dependent code anywhere else.
59114@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59115 {
59116 unsigned long random_variable = 0;
59117
59118+#ifdef CONFIG_PAX_RANDUSTACK
59119+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59120+ return stack_top - current->mm->delta_stack;
59121+#endif
59122+
59123 if ((current->flags & PF_RANDOMIZE) &&
59124 !(current->personality & ADDR_NO_RANDOMIZE)) {
59125 random_variable = (unsigned long) get_random_int();
59126@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59127 unsigned long load_addr = 0, load_bias = 0;
59128 int load_addr_set = 0;
59129 char * elf_interpreter = NULL;
59130- unsigned long error;
59131+ unsigned long error = 0;
59132 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
59133 unsigned long elf_bss, elf_brk;
59134 int retval, i;
59135@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59136 struct elfhdr interp_elf_ex;
59137 } *loc;
59138 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
59139+ unsigned long pax_task_size;
59140
59141 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59142 if (!loc) {
59143@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59144 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59145 may depend on the personality. */
59146 SET_PERSONALITY2(loc->elf_ex, &arch_state);
59147+
59148+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59149+ current->mm->pax_flags = 0UL;
59150+#endif
59151+
59152+#ifdef CONFIG_PAX_DLRESOLVE
59153+ current->mm->call_dl_resolve = 0UL;
59154+#endif
59155+
59156+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59157+ current->mm->call_syscall = 0UL;
59158+#endif
59159+
59160+#ifdef CONFIG_PAX_ASLR
59161+ current->mm->delta_mmap = 0UL;
59162+ current->mm->delta_stack = 0UL;
59163+#endif
59164+
59165+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59166+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59167+ send_sig(SIGKILL, current, 0);
59168+ goto out_free_dentry;
59169+ }
59170+#endif
59171+
59172+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59173+ pax_set_initial_flags(bprm);
59174+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59175+ if (pax_set_initial_flags_func)
59176+ (pax_set_initial_flags_func)(bprm);
59177+#endif
59178+
59179+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59180+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59181+ current->mm->context.user_cs_limit = PAGE_SIZE;
59182+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59183+ }
59184+#endif
59185+
59186+#ifdef CONFIG_PAX_SEGMEXEC
59187+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59188+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59189+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59190+ pax_task_size = SEGMEXEC_TASK_SIZE;
59191+ current->mm->def_flags |= VM_NOHUGEPAGE;
59192+ } else
59193+#endif
59194+
59195+ pax_task_size = TASK_SIZE;
59196+
59197+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59198+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59199+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59200+ put_cpu();
59201+ }
59202+#endif
59203+
59204+#ifdef CONFIG_PAX_ASLR
59205+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59206+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59207+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59208+ }
59209+#endif
59210+
59211+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59212+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59213+ executable_stack = EXSTACK_DISABLE_X;
59214+ current->personality &= ~READ_IMPLIES_EXEC;
59215+ } else
59216+#endif
59217+
59218 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59219 current->personality |= READ_IMPLIES_EXEC;
59220
59221@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59222 #else
59223 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59224 #endif
59225+
59226+#ifdef CONFIG_PAX_RANDMMAP
59227+ /* PaX: randomize base address at the default exe base if requested */
59228+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59229+#ifdef CONFIG_SPARC64
59230+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59231+#else
59232+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59233+#endif
59234+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59235+ elf_flags |= MAP_FIXED;
59236+ }
59237+#endif
59238+
59239 }
59240
59241 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59242@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59243 * allowed task size. Note that p_filesz must always be
59244 * <= p_memsz so it is only necessary to check p_memsz.
59245 */
59246- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59247- elf_ppnt->p_memsz > TASK_SIZE ||
59248- TASK_SIZE - elf_ppnt->p_memsz < k) {
59249+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59250+ elf_ppnt->p_memsz > pax_task_size ||
59251+ pax_task_size - elf_ppnt->p_memsz < k) {
59252 /* set_brk can never work. Avoid overflows. */
59253 retval = -EINVAL;
59254 goto out_free_dentry;
59255@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
59256 if (retval)
59257 goto out_free_dentry;
59258 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59259- retval = -EFAULT; /* Nobody gets to see this, but.. */
59260- goto out_free_dentry;
59261+ /*
59262+ * This bss-zeroing can fail if the ELF
59263+ * file specifies odd protections. So
59264+ * we don't check the return value
59265+ */
59266 }
59267
59268+#ifdef CONFIG_PAX_RANDMMAP
59269+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59270+ unsigned long start, size, flags;
59271+ vm_flags_t vm_flags;
59272+
59273+ start = ELF_PAGEALIGN(elf_brk);
59274+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59275+ flags = MAP_FIXED | MAP_PRIVATE;
59276+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59277+
59278+ down_write(&current->mm->mmap_sem);
59279+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59280+ retval = -ENOMEM;
59281+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59282+// if (current->personality & ADDR_NO_RANDOMIZE)
59283+// vm_flags |= VM_READ | VM_MAYREAD;
59284+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59285+ retval = IS_ERR_VALUE(start) ? start : 0;
59286+ }
59287+ up_write(&current->mm->mmap_sem);
59288+ if (retval == 0)
59289+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59290+ if (retval < 0)
59291+ goto out_free_dentry;
59292+ }
59293+#endif
59294+
59295 if (elf_interpreter) {
59296- unsigned long interp_map_addr = 0;
59297-
59298 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59299 interpreter,
59300- &interp_map_addr,
59301 load_bias, interp_elf_phdata);
59302 if (!IS_ERR((void *)elf_entry)) {
59303 /*
59304@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59305 * Decide what to dump of a segment, part, all or none.
59306 */
59307 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59308- unsigned long mm_flags)
59309+ unsigned long mm_flags, long signr)
59310 {
59311 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59312
59313@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59314 if (vma->vm_file == NULL)
59315 return 0;
59316
59317- if (FILTER(MAPPED_PRIVATE))
59318+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59319 goto whole;
59320
59321 /*
59322@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59323 {
59324 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59325 int i = 0;
59326- do
59327+ do {
59328 i += 2;
59329- while (auxv[i - 2] != AT_NULL);
59330+ } while (auxv[i - 2] != AT_NULL);
59331 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59332 }
59333
59334@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59335 {
59336 mm_segment_t old_fs = get_fs();
59337 set_fs(KERNEL_DS);
59338- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59339+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59340 set_fs(old_fs);
59341 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59342 }
59343@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59344 vma = next_vma(vma, gate_vma)) {
59345 unsigned long dump_size;
59346
59347- dump_size = vma_dump_size(vma, cprm->mm_flags);
59348+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59349 vma_filesz[i++] = dump_size;
59350 vma_data_size += dump_size;
59351 }
59352@@ -2314,6 +2794,167 @@ out:
59353
59354 #endif /* CONFIG_ELF_CORE */
59355
59356+#ifdef CONFIG_PAX_MPROTECT
59357+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59358+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59359+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59360+ *
59361+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59362+ * basis because we want to allow the common case and not the special ones.
59363+ */
59364+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59365+{
59366+ struct elfhdr elf_h;
59367+ struct elf_phdr elf_p;
59368+ unsigned long i;
59369+ unsigned long oldflags;
59370+ bool is_textrel_rw, is_textrel_rx, is_relro;
59371+
59372+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59373+ return;
59374+
59375+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59376+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59377+
59378+#ifdef CONFIG_PAX_ELFRELOCS
59379+ /* possible TEXTREL */
59380+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59381+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59382+#else
59383+ is_textrel_rw = false;
59384+ is_textrel_rx = false;
59385+#endif
59386+
59387+ /* possible RELRO */
59388+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59389+
59390+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59391+ return;
59392+
59393+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59394+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59395+
59396+#ifdef CONFIG_PAX_ETEXECRELOCS
59397+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59398+#else
59399+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59400+#endif
59401+
59402+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59403+ !elf_check_arch(&elf_h) ||
59404+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59405+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59406+ return;
59407+
59408+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59409+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59410+ return;
59411+ switch (elf_p.p_type) {
59412+ case PT_DYNAMIC:
59413+ if (!is_textrel_rw && !is_textrel_rx)
59414+ continue;
59415+ i = 0UL;
59416+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59417+ elf_dyn dyn;
59418+
59419+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59420+ break;
59421+ if (dyn.d_tag == DT_NULL)
59422+ break;
59423+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59424+ gr_log_textrel(vma);
59425+ if (is_textrel_rw)
59426+ vma->vm_flags |= VM_MAYWRITE;
59427+ else
59428+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59429+ vma->vm_flags &= ~VM_MAYWRITE;
59430+ break;
59431+ }
59432+ i++;
59433+ }
59434+ is_textrel_rw = false;
59435+ is_textrel_rx = false;
59436+ continue;
59437+
59438+ case PT_GNU_RELRO:
59439+ if (!is_relro)
59440+ continue;
59441+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59442+ vma->vm_flags &= ~VM_MAYWRITE;
59443+ is_relro = false;
59444+ continue;
59445+
59446+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59447+ case PT_PAX_FLAGS: {
59448+ const char *msg_mprotect = "", *msg_emutramp = "";
59449+ char *buffer_lib, *buffer_exe;
59450+
59451+ if (elf_p.p_flags & PF_NOMPROTECT)
59452+ msg_mprotect = "MPROTECT disabled";
59453+
59454+#ifdef CONFIG_PAX_EMUTRAMP
59455+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59456+ msg_emutramp = "EMUTRAMP enabled";
59457+#endif
59458+
59459+ if (!msg_mprotect[0] && !msg_emutramp[0])
59460+ continue;
59461+
59462+ if (!printk_ratelimit())
59463+ continue;
59464+
59465+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59466+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59467+ if (buffer_lib && buffer_exe) {
59468+ char *path_lib, *path_exe;
59469+
59470+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59471+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59472+
59473+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59474+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59475+
59476+ }
59477+ free_page((unsigned long)buffer_exe);
59478+ free_page((unsigned long)buffer_lib);
59479+ continue;
59480+ }
59481+#endif
59482+
59483+ }
59484+ }
59485+}
59486+#endif
59487+
59488+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59489+
59490+extern int grsec_enable_log_rwxmaps;
59491+
59492+static void elf_handle_mmap(struct file *file)
59493+{
59494+ struct elfhdr elf_h;
59495+ struct elf_phdr elf_p;
59496+ unsigned long i;
59497+
59498+ if (!grsec_enable_log_rwxmaps)
59499+ return;
59500+
59501+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59502+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59503+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59504+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59505+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59506+ return;
59507+
59508+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59509+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59510+ return;
59511+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59512+ gr_log_ptgnustack(file);
59513+ }
59514+}
59515+#endif
59516+
59517 static int __init init_elf_binfmt(void)
59518 {
59519 register_binfmt(&elf_format);
59520diff --git a/fs/block_dev.c b/fs/block_dev.c
59521index 975266b..c3d1856 100644
59522--- a/fs/block_dev.c
59523+++ b/fs/block_dev.c
59524@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59525 else if (bdev->bd_contains == bdev)
59526 return true; /* is a whole device which isn't held */
59527
59528- else if (whole->bd_holder == bd_may_claim)
59529+ else if (whole->bd_holder == (void *)bd_may_claim)
59530 return true; /* is a partition of a device that is being partitioned */
59531 else if (whole->bd_holder != NULL)
59532 return false; /* is a partition of a held device */
59533diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59534index 6d67f32..8f33187 100644
59535--- a/fs/btrfs/ctree.c
59536+++ b/fs/btrfs/ctree.c
59537@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59538 free_extent_buffer(buf);
59539 add_root_to_dirty_list(root);
59540 } else {
59541- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59542- parent_start = parent->start;
59543- else
59544+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59545+ if (parent)
59546+ parent_start = parent->start;
59547+ else
59548+ parent_start = 0;
59549+ } else
59550 parent_start = 0;
59551
59552 WARN_ON(trans->transid != btrfs_header_generation(parent));
59553diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59554index 82f0c7c..dff78a8 100644
59555--- a/fs/btrfs/delayed-inode.c
59556+++ b/fs/btrfs/delayed-inode.c
59557@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59558
59559 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59560 {
59561- int seq = atomic_inc_return(&delayed_root->items_seq);
59562+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59563 if ((atomic_dec_return(&delayed_root->items) <
59564 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59565 waitqueue_active(&delayed_root->wait))
59566@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59567
59568 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59569 {
59570- int val = atomic_read(&delayed_root->items_seq);
59571+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59572
59573 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59574 return 1;
59575@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59576 int seq;
59577 int ret;
59578
59579- seq = atomic_read(&delayed_root->items_seq);
59580+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59581
59582 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59583 if (ret)
59584diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59585index f70119f..ab5894d 100644
59586--- a/fs/btrfs/delayed-inode.h
59587+++ b/fs/btrfs/delayed-inode.h
59588@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59589 */
59590 struct list_head prepare_list;
59591 atomic_t items; /* for delayed items */
59592- atomic_t items_seq; /* for delayed items */
59593+ atomic_unchecked_t items_seq; /* for delayed items */
59594 int nodes; /* for delayed nodes */
59595 wait_queue_head_t wait;
59596 };
59597@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59598 struct btrfs_delayed_root *delayed_root)
59599 {
59600 atomic_set(&delayed_root->items, 0);
59601- atomic_set(&delayed_root->items_seq, 0);
59602+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59603 delayed_root->nodes = 0;
59604 spin_lock_init(&delayed_root->lock);
59605 init_waitqueue_head(&delayed_root->wait);
59606diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59607index 05fef19..f3774b8 100644
59608--- a/fs/btrfs/super.c
59609+++ b/fs/btrfs/super.c
59610@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59611 function, line, errstr);
59612 return;
59613 }
59614- ACCESS_ONCE(trans->transaction->aborted) = errno;
59615+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59616 /* Wake up anybody who may be waiting on this transaction */
59617 wake_up(&root->fs_info->transaction_wait);
59618 wake_up(&root->fs_info->transaction_blocked_wait);
59619diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59620index 94edb0a..e94dc93 100644
59621--- a/fs/btrfs/sysfs.c
59622+++ b/fs/btrfs/sysfs.c
59623@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59624 for (set = 0; set < FEAT_MAX; set++) {
59625 int i;
59626 struct attribute *attrs[2];
59627- struct attribute_group agroup = {
59628+ attribute_group_no_const agroup = {
59629 .name = "features",
59630 .attrs = attrs,
59631 };
59632diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59633index 2299bfd..4098e72 100644
59634--- a/fs/btrfs/tests/free-space-tests.c
59635+++ b/fs/btrfs/tests/free-space-tests.c
59636@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59637 * extent entry.
59638 */
59639 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59640- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59641+ pax_open_kernel();
59642+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59643+ pax_close_kernel();
59644
59645 /*
59646 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59647@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59648 if (ret)
59649 return ret;
59650
59651- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59652+ pax_open_kernel();
59653+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59654+ pax_close_kernel();
59655 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59656
59657 return 0;
59658diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59659index 154990c..d0cf699 100644
59660--- a/fs/btrfs/tree-log.h
59661+++ b/fs/btrfs/tree-log.h
59662@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59663 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59664 struct btrfs_trans_handle *trans)
59665 {
59666- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59667+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59668 }
59669
59670 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59671diff --git a/fs/buffer.c b/fs/buffer.c
59672index 20805db..2e8fc69 100644
59673--- a/fs/buffer.c
59674+++ b/fs/buffer.c
59675@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59676 bh_cachep = kmem_cache_create("buffer_head",
59677 sizeof(struct buffer_head), 0,
59678 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59679- SLAB_MEM_SPREAD),
59680+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59681 NULL);
59682
59683 /*
59684diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59685index fbb08e9..0fda764 100644
59686--- a/fs/cachefiles/bind.c
59687+++ b/fs/cachefiles/bind.c
59688@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59689 args);
59690
59691 /* start by checking things over */
59692- ASSERT(cache->fstop_percent >= 0 &&
59693- cache->fstop_percent < cache->fcull_percent &&
59694+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59695 cache->fcull_percent < cache->frun_percent &&
59696 cache->frun_percent < 100);
59697
59698- ASSERT(cache->bstop_percent >= 0 &&
59699- cache->bstop_percent < cache->bcull_percent &&
59700+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59701 cache->bcull_percent < cache->brun_percent &&
59702 cache->brun_percent < 100);
59703
59704diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59705index f601def..b2cf704 100644
59706--- a/fs/cachefiles/daemon.c
59707+++ b/fs/cachefiles/daemon.c
59708@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59709 if (n > buflen)
59710 return -EMSGSIZE;
59711
59712- if (copy_to_user(_buffer, buffer, n) != 0)
59713+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59714 return -EFAULT;
59715
59716 return n;
59717@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59718 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59719 return -EIO;
59720
59721- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59722+ if (datalen > PAGE_SIZE - 1)
59723 return -EOPNOTSUPP;
59724
59725 /* drag the command string into the kernel so we can parse it */
59726@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59727 if (args[0] != '%' || args[1] != '\0')
59728 return -EINVAL;
59729
59730- if (fstop < 0 || fstop >= cache->fcull_percent)
59731+ if (fstop >= cache->fcull_percent)
59732 return cachefiles_daemon_range_error(cache, args);
59733
59734 cache->fstop_percent = fstop;
59735@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59736 if (args[0] != '%' || args[1] != '\0')
59737 return -EINVAL;
59738
59739- if (bstop < 0 || bstop >= cache->bcull_percent)
59740+ if (bstop >= cache->bcull_percent)
59741 return cachefiles_daemon_range_error(cache, args);
59742
59743 cache->bstop_percent = bstop;
59744diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59745index 8c52472..c4e3a69 100644
59746--- a/fs/cachefiles/internal.h
59747+++ b/fs/cachefiles/internal.h
59748@@ -66,7 +66,7 @@ struct cachefiles_cache {
59749 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59750 struct rb_root active_nodes; /* active nodes (can't be culled) */
59751 rwlock_t active_lock; /* lock for active_nodes */
59752- atomic_t gravecounter; /* graveyard uniquifier */
59753+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59754 unsigned frun_percent; /* when to stop culling (% files) */
59755 unsigned fcull_percent; /* when to start culling (% files) */
59756 unsigned fstop_percent; /* when to stop allocating (% files) */
59757@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59758 * proc.c
59759 */
59760 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59761-extern atomic_t cachefiles_lookup_histogram[HZ];
59762-extern atomic_t cachefiles_mkdir_histogram[HZ];
59763-extern atomic_t cachefiles_create_histogram[HZ];
59764+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59765+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59766+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59767
59768 extern int __init cachefiles_proc_init(void);
59769 extern void cachefiles_proc_cleanup(void);
59770 static inline
59771-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59772+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59773 {
59774 unsigned long jif = jiffies - start_jif;
59775 if (jif >= HZ)
59776 jif = HZ - 1;
59777- atomic_inc(&histogram[jif]);
59778+ atomic_inc_unchecked(&histogram[jif]);
59779 }
59780
59781 #else
59782diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59783index 1e51714..411eded 100644
59784--- a/fs/cachefiles/namei.c
59785+++ b/fs/cachefiles/namei.c
59786@@ -309,7 +309,7 @@ try_again:
59787 /* first step is to make up a grave dentry in the graveyard */
59788 sprintf(nbuffer, "%08x%08x",
59789 (uint32_t) get_seconds(),
59790- (uint32_t) atomic_inc_return(&cache->gravecounter));
59791+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59792
59793 /* do the multiway lock magic */
59794 trap = lock_rename(cache->graveyard, dir);
59795diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59796index eccd339..4c1d995 100644
59797--- a/fs/cachefiles/proc.c
59798+++ b/fs/cachefiles/proc.c
59799@@ -14,9 +14,9 @@
59800 #include <linux/seq_file.h>
59801 #include "internal.h"
59802
59803-atomic_t cachefiles_lookup_histogram[HZ];
59804-atomic_t cachefiles_mkdir_histogram[HZ];
59805-atomic_t cachefiles_create_histogram[HZ];
59806+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59807+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59808+atomic_unchecked_t cachefiles_create_histogram[HZ];
59809
59810 /*
59811 * display the latency histogram
59812@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59813 return 0;
59814 default:
59815 index = (unsigned long) v - 3;
59816- x = atomic_read(&cachefiles_lookup_histogram[index]);
59817- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59818- z = atomic_read(&cachefiles_create_histogram[index]);
59819+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59820+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59821+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59822 if (x == 0 && y == 0 && z == 0)
59823 return 0;
59824
59825diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59826index 83e9976..bfd1eee 100644
59827--- a/fs/ceph/dir.c
59828+++ b/fs/ceph/dir.c
59829@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59830 struct dentry *dentry, *last;
59831 struct ceph_dentry_info *di;
59832 int err = 0;
59833+ char d_name[DNAME_INLINE_LEN];
59834+ const unsigned char *name;
59835
59836 /* claim ref on last dentry we returned */
59837 last = fi->dentry;
59838@@ -190,7 +192,12 @@ more:
59839
59840 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59841 dentry, dentry, dentry->d_inode);
59842- if (!dir_emit(ctx, dentry->d_name.name,
59843+ name = dentry->d_name.name;
59844+ if (name == dentry->d_iname) {
59845+ memcpy(d_name, name, dentry->d_name.len);
59846+ name = d_name;
59847+ }
59848+ if (!dir_emit(ctx, name,
59849 dentry->d_name.len,
59850 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59851 dentry->d_inode->i_mode >> 12)) {
59852@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59853 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59854 struct ceph_mds_client *mdsc = fsc->mdsc;
59855 unsigned frag = fpos_frag(ctx->pos);
59856- int off = fpos_off(ctx->pos);
59857+ unsigned int off = fpos_off(ctx->pos);
59858 int err;
59859 u32 ftype;
59860 struct ceph_mds_reply_info_parsed *rinfo;
59861diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59862index a63997b..ddc0577 100644
59863--- a/fs/ceph/super.c
59864+++ b/fs/ceph/super.c
59865@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59866 /*
59867 * construct our own bdi so we can control readahead, etc.
59868 */
59869-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59870+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59871
59872 static int ceph_register_bdi(struct super_block *sb,
59873 struct ceph_fs_client *fsc)
59874@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
59875 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
59876
59877 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59878- atomic_long_inc_return(&bdi_seq));
59879+ atomic_long_inc_return_unchecked(&bdi_seq));
59880 if (!err)
59881 sb->s_bdi = &fsc->backing_dev_info;
59882 return err;
59883diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59884index 7febcf2..62a5721 100644
59885--- a/fs/cifs/cifs_debug.c
59886+++ b/fs/cifs/cifs_debug.c
59887@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59888
59889 if (strtobool(&c, &bv) == 0) {
59890 #ifdef CONFIG_CIFS_STATS2
59891- atomic_set(&totBufAllocCount, 0);
59892- atomic_set(&totSmBufAllocCount, 0);
59893+ atomic_set_unchecked(&totBufAllocCount, 0);
59894+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59895 #endif /* CONFIG_CIFS_STATS2 */
59896 spin_lock(&cifs_tcp_ses_lock);
59897 list_for_each(tmp1, &cifs_tcp_ses_list) {
59898@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59899 tcon = list_entry(tmp3,
59900 struct cifs_tcon,
59901 tcon_list);
59902- atomic_set(&tcon->num_smbs_sent, 0);
59903+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59904 if (server->ops->clear_stats)
59905 server->ops->clear_stats(tcon);
59906 }
59907@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59908 smBufAllocCount.counter, cifs_min_small);
59909 #ifdef CONFIG_CIFS_STATS2
59910 seq_printf(m, "Total Large %d Small %d Allocations\n",
59911- atomic_read(&totBufAllocCount),
59912- atomic_read(&totSmBufAllocCount));
59913+ atomic_read_unchecked(&totBufAllocCount),
59914+ atomic_read_unchecked(&totSmBufAllocCount));
59915 #endif /* CONFIG_CIFS_STATS2 */
59916
59917 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59918@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59919 if (tcon->need_reconnect)
59920 seq_puts(m, "\tDISCONNECTED ");
59921 seq_printf(m, "\nSMBs: %d",
59922- atomic_read(&tcon->num_smbs_sent));
59923+ atomic_read_unchecked(&tcon->num_smbs_sent));
59924 if (server->ops->print_stats)
59925 server->ops->print_stats(m, tcon);
59926 }
59927diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59928index d72fe37..ded5511 100644
59929--- a/fs/cifs/cifsfs.c
59930+++ b/fs/cifs/cifsfs.c
59931@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59932 */
59933 cifs_req_cachep = kmem_cache_create("cifs_request",
59934 CIFSMaxBufSize + max_hdr_size, 0,
59935- SLAB_HWCACHE_ALIGN, NULL);
59936+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59937 if (cifs_req_cachep == NULL)
59938 return -ENOMEM;
59939
59940@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59941 efficient to alloc 1 per page off the slab compared to 17K (5page)
59942 alloc of large cifs buffers even when page debugging is on */
59943 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59944- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59945+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59946 NULL);
59947 if (cifs_sm_req_cachep == NULL) {
59948 mempool_destroy(cifs_req_poolp);
59949@@ -1204,8 +1204,8 @@ init_cifs(void)
59950 atomic_set(&bufAllocCount, 0);
59951 atomic_set(&smBufAllocCount, 0);
59952 #ifdef CONFIG_CIFS_STATS2
59953- atomic_set(&totBufAllocCount, 0);
59954- atomic_set(&totSmBufAllocCount, 0);
59955+ atomic_set_unchecked(&totBufAllocCount, 0);
59956+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59957 #endif /* CONFIG_CIFS_STATS2 */
59958
59959 atomic_set(&midCount, 0);
59960diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59961index 22b289a..bbbba08 100644
59962--- a/fs/cifs/cifsglob.h
59963+++ b/fs/cifs/cifsglob.h
59964@@ -823,35 +823,35 @@ struct cifs_tcon {
59965 __u16 Flags; /* optional support bits */
59966 enum statusEnum tidStatus;
59967 #ifdef CONFIG_CIFS_STATS
59968- atomic_t num_smbs_sent;
59969+ atomic_unchecked_t num_smbs_sent;
59970 union {
59971 struct {
59972- atomic_t num_writes;
59973- atomic_t num_reads;
59974- atomic_t num_flushes;
59975- atomic_t num_oplock_brks;
59976- atomic_t num_opens;
59977- atomic_t num_closes;
59978- atomic_t num_deletes;
59979- atomic_t num_mkdirs;
59980- atomic_t num_posixopens;
59981- atomic_t num_posixmkdirs;
59982- atomic_t num_rmdirs;
59983- atomic_t num_renames;
59984- atomic_t num_t2renames;
59985- atomic_t num_ffirst;
59986- atomic_t num_fnext;
59987- atomic_t num_fclose;
59988- atomic_t num_hardlinks;
59989- atomic_t num_symlinks;
59990- atomic_t num_locks;
59991- atomic_t num_acl_get;
59992- atomic_t num_acl_set;
59993+ atomic_unchecked_t num_writes;
59994+ atomic_unchecked_t num_reads;
59995+ atomic_unchecked_t num_flushes;
59996+ atomic_unchecked_t num_oplock_brks;
59997+ atomic_unchecked_t num_opens;
59998+ atomic_unchecked_t num_closes;
59999+ atomic_unchecked_t num_deletes;
60000+ atomic_unchecked_t num_mkdirs;
60001+ atomic_unchecked_t num_posixopens;
60002+ atomic_unchecked_t num_posixmkdirs;
60003+ atomic_unchecked_t num_rmdirs;
60004+ atomic_unchecked_t num_renames;
60005+ atomic_unchecked_t num_t2renames;
60006+ atomic_unchecked_t num_ffirst;
60007+ atomic_unchecked_t num_fnext;
60008+ atomic_unchecked_t num_fclose;
60009+ atomic_unchecked_t num_hardlinks;
60010+ atomic_unchecked_t num_symlinks;
60011+ atomic_unchecked_t num_locks;
60012+ atomic_unchecked_t num_acl_get;
60013+ atomic_unchecked_t num_acl_set;
60014 } cifs_stats;
60015 #ifdef CONFIG_CIFS_SMB2
60016 struct {
60017- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60018- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60019+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60020+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60021 } smb2_stats;
60022 #endif /* CONFIG_CIFS_SMB2 */
60023 } stats;
60024@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
60025 }
60026
60027 #ifdef CONFIG_CIFS_STATS
60028-#define cifs_stats_inc atomic_inc
60029+#define cifs_stats_inc atomic_inc_unchecked
60030
60031 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60032 unsigned int bytes)
60033@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60034 /* Various Debug counters */
60035 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60036 #ifdef CONFIG_CIFS_STATS2
60037-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60038-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60039+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60040+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60041 #endif
60042 GLOBAL_EXTERN atomic_t smBufAllocCount;
60043 GLOBAL_EXTERN atomic_t midCount;
60044diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60045index ca30c39..570fb94 100644
60046--- a/fs/cifs/file.c
60047+++ b/fs/cifs/file.c
60048@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
60049 index = mapping->writeback_index; /* Start from prev offset */
60050 end = -1;
60051 } else {
60052- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60053- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60054- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60055+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60056 range_whole = true;
60057+ index = 0;
60058+ end = ULONG_MAX;
60059+ } else {
60060+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60061+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60062+ }
60063 scanned = true;
60064 }
60065 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60066diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60067index 3379463..3af418a 100644
60068--- a/fs/cifs/misc.c
60069+++ b/fs/cifs/misc.c
60070@@ -170,7 +170,7 @@ cifs_buf_get(void)
60071 memset(ret_buf, 0, buf_size + 3);
60072 atomic_inc(&bufAllocCount);
60073 #ifdef CONFIG_CIFS_STATS2
60074- atomic_inc(&totBufAllocCount);
60075+ atomic_inc_unchecked(&totBufAllocCount);
60076 #endif /* CONFIG_CIFS_STATS2 */
60077 }
60078
60079@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60080 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60081 atomic_inc(&smBufAllocCount);
60082 #ifdef CONFIG_CIFS_STATS2
60083- atomic_inc(&totSmBufAllocCount);
60084+ atomic_inc_unchecked(&totSmBufAllocCount);
60085 #endif /* CONFIG_CIFS_STATS2 */
60086
60087 }
60088diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60089index d297903..1cb7516 100644
60090--- a/fs/cifs/smb1ops.c
60091+++ b/fs/cifs/smb1ops.c
60092@@ -622,27 +622,27 @@ static void
60093 cifs_clear_stats(struct cifs_tcon *tcon)
60094 {
60095 #ifdef CONFIG_CIFS_STATS
60096- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60097- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60098- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60099- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60100- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60101- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60102- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60103- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60104- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60105- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60106- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60107- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60108- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60109- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60110- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60111- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60112- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60113- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60114- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60115- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60116- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60117+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60118+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60119+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60120+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60121+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60122+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60123+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60124+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60125+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60126+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60127+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60128+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60129+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60130+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60131+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60132+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60133+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60134+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60135+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60136+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60137+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60138 #endif
60139 }
60140
60141@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60142 {
60143 #ifdef CONFIG_CIFS_STATS
60144 seq_printf(m, " Oplocks breaks: %d",
60145- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60146+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60147 seq_printf(m, "\nReads: %d Bytes: %llu",
60148- atomic_read(&tcon->stats.cifs_stats.num_reads),
60149+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60150 (long long)(tcon->bytes_read));
60151 seq_printf(m, "\nWrites: %d Bytes: %llu",
60152- atomic_read(&tcon->stats.cifs_stats.num_writes),
60153+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60154 (long long)(tcon->bytes_written));
60155 seq_printf(m, "\nFlushes: %d",
60156- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60157+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60158 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60159- atomic_read(&tcon->stats.cifs_stats.num_locks),
60160- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60161- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60162+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60163+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60164+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60165 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60166- atomic_read(&tcon->stats.cifs_stats.num_opens),
60167- atomic_read(&tcon->stats.cifs_stats.num_closes),
60168- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60169+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60170+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60171+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60172 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60173- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60174- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60175+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60176+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60177 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60178- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60179- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60180+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60181+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60182 seq_printf(m, "\nRenames: %d T2 Renames %d",
60183- atomic_read(&tcon->stats.cifs_stats.num_renames),
60184- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60185+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60186+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60187 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60188- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
60189- atomic_read(&tcon->stats.cifs_stats.num_fnext),
60190- atomic_read(&tcon->stats.cifs_stats.num_fclose));
60191+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
60192+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
60193+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
60194 #endif
60195 }
60196
60197diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
60198index eab05e1..ffe5ea4 100644
60199--- a/fs/cifs/smb2ops.c
60200+++ b/fs/cifs/smb2ops.c
60201@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
60202 #ifdef CONFIG_CIFS_STATS
60203 int i;
60204 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
60205- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60206- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60207+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60208+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60209 }
60210 #endif
60211 }
60212@@ -459,65 +459,65 @@ static void
60213 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60214 {
60215 #ifdef CONFIG_CIFS_STATS
60216- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60217- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60218+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60219+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60220 seq_printf(m, "\nNegotiates: %d sent %d failed",
60221- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
60222- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
60223+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
60224+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
60225 seq_printf(m, "\nSessionSetups: %d sent %d failed",
60226- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
60227- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
60228+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
60229+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
60230 seq_printf(m, "\nLogoffs: %d sent %d failed",
60231- atomic_read(&sent[SMB2_LOGOFF_HE]),
60232- atomic_read(&failed[SMB2_LOGOFF_HE]));
60233+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
60234+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
60235 seq_printf(m, "\nTreeConnects: %d sent %d failed",
60236- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
60237- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
60238+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
60239+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
60240 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
60241- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
60242- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
60243+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60244+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60245 seq_printf(m, "\nCreates: %d sent %d failed",
60246- atomic_read(&sent[SMB2_CREATE_HE]),
60247- atomic_read(&failed[SMB2_CREATE_HE]));
60248+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60249+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60250 seq_printf(m, "\nCloses: %d sent %d failed",
60251- atomic_read(&sent[SMB2_CLOSE_HE]),
60252- atomic_read(&failed[SMB2_CLOSE_HE]));
60253+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60254+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60255 seq_printf(m, "\nFlushes: %d sent %d failed",
60256- atomic_read(&sent[SMB2_FLUSH_HE]),
60257- atomic_read(&failed[SMB2_FLUSH_HE]));
60258+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60259+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60260 seq_printf(m, "\nReads: %d sent %d failed",
60261- atomic_read(&sent[SMB2_READ_HE]),
60262- atomic_read(&failed[SMB2_READ_HE]));
60263+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60264+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60265 seq_printf(m, "\nWrites: %d sent %d failed",
60266- atomic_read(&sent[SMB2_WRITE_HE]),
60267- atomic_read(&failed[SMB2_WRITE_HE]));
60268+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60269+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60270 seq_printf(m, "\nLocks: %d sent %d failed",
60271- atomic_read(&sent[SMB2_LOCK_HE]),
60272- atomic_read(&failed[SMB2_LOCK_HE]));
60273+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60274+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60275 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60276- atomic_read(&sent[SMB2_IOCTL_HE]),
60277- atomic_read(&failed[SMB2_IOCTL_HE]));
60278+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60279+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60280 seq_printf(m, "\nCancels: %d sent %d failed",
60281- atomic_read(&sent[SMB2_CANCEL_HE]),
60282- atomic_read(&failed[SMB2_CANCEL_HE]));
60283+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60284+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60285 seq_printf(m, "\nEchos: %d sent %d failed",
60286- atomic_read(&sent[SMB2_ECHO_HE]),
60287- atomic_read(&failed[SMB2_ECHO_HE]));
60288+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60289+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60290 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60291- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60292- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60293+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60294+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60295 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60296- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60297- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60298+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60299+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60300 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60301- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60302- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60303+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60304+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60305 seq_printf(m, "\nSetInfos: %d sent %d failed",
60306- atomic_read(&sent[SMB2_SET_INFO_HE]),
60307- atomic_read(&failed[SMB2_SET_INFO_HE]));
60308+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60309+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60310 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60311- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60312- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60313+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60314+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60315 #endif
60316 }
60317
60318diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60319index 65cd7a8..3518676 100644
60320--- a/fs/cifs/smb2pdu.c
60321+++ b/fs/cifs/smb2pdu.c
60322@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60323 default:
60324 cifs_dbg(VFS, "info level %u isn't supported\n",
60325 srch_inf->info_level);
60326- rc = -EINVAL;
60327- goto qdir_exit;
60328+ return -EINVAL;
60329 }
60330
60331 req->FileIndex = cpu_to_le32(index);
60332diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60333index 46ee6f2..89a9e7f 100644
60334--- a/fs/coda/cache.c
60335+++ b/fs/coda/cache.c
60336@@ -24,7 +24,7 @@
60337 #include "coda_linux.h"
60338 #include "coda_cache.h"
60339
60340-static atomic_t permission_epoch = ATOMIC_INIT(0);
60341+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60342
60343 /* replace or extend an acl cache hit */
60344 void coda_cache_enter(struct inode *inode, int mask)
60345@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60346 struct coda_inode_info *cii = ITOC(inode);
60347
60348 spin_lock(&cii->c_lock);
60349- cii->c_cached_epoch = atomic_read(&permission_epoch);
60350+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60351 if (!uid_eq(cii->c_uid, current_fsuid())) {
60352 cii->c_uid = current_fsuid();
60353 cii->c_cached_perm = mask;
60354@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60355 {
60356 struct coda_inode_info *cii = ITOC(inode);
60357 spin_lock(&cii->c_lock);
60358- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60359+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60360 spin_unlock(&cii->c_lock);
60361 }
60362
60363 /* remove all acl caches */
60364 void coda_cache_clear_all(struct super_block *sb)
60365 {
60366- atomic_inc(&permission_epoch);
60367+ atomic_inc_unchecked(&permission_epoch);
60368 }
60369
60370
60371@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60372 spin_lock(&cii->c_lock);
60373 hit = (mask & cii->c_cached_perm) == mask &&
60374 uid_eq(cii->c_uid, current_fsuid()) &&
60375- cii->c_cached_epoch == atomic_read(&permission_epoch);
60376+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60377 spin_unlock(&cii->c_lock);
60378
60379 return hit;
60380diff --git a/fs/compat.c b/fs/compat.c
60381index 6fd272d..dd34ba2 100644
60382--- a/fs/compat.c
60383+++ b/fs/compat.c
60384@@ -54,7 +54,7 @@
60385 #include <asm/ioctls.h>
60386 #include "internal.h"
60387
60388-int compat_log = 1;
60389+int compat_log = 0;
60390
60391 int compat_printk(const char *fmt, ...)
60392 {
60393@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60394
60395 set_fs(KERNEL_DS);
60396 /* The __user pointer cast is valid because of the set_fs() */
60397- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60398+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60399 set_fs(oldfs);
60400 /* truncating is ok because it's a user address */
60401 if (!ret)
60402@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60403 goto out;
60404
60405 ret = -EINVAL;
60406- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60407+ if (nr_segs > UIO_MAXIOV)
60408 goto out;
60409 if (nr_segs > fast_segs) {
60410 ret = -ENOMEM;
60411@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60412 struct compat_readdir_callback {
60413 struct dir_context ctx;
60414 struct compat_old_linux_dirent __user *dirent;
60415+ struct file * file;
60416 int result;
60417 };
60418
60419@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60420 buf->result = -EOVERFLOW;
60421 return -EOVERFLOW;
60422 }
60423+
60424+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60425+ return 0;
60426+
60427 buf->result++;
60428 dirent = buf->dirent;
60429 if (!access_ok(VERIFY_WRITE, dirent,
60430@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60431 if (!f.file)
60432 return -EBADF;
60433
60434+ buf.file = f.file;
60435 error = iterate_dir(f.file, &buf.ctx);
60436 if (buf.result)
60437 error = buf.result;
60438@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60439 struct dir_context ctx;
60440 struct compat_linux_dirent __user *current_dir;
60441 struct compat_linux_dirent __user *previous;
60442+ struct file * file;
60443 int count;
60444 int error;
60445 };
60446@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60447 buf->error = -EOVERFLOW;
60448 return -EOVERFLOW;
60449 }
60450+
60451+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60452+ return 0;
60453+
60454 dirent = buf->previous;
60455 if (dirent) {
60456 if (__put_user(offset, &dirent->d_off))
60457@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60458 if (!f.file)
60459 return -EBADF;
60460
60461+ buf.file = f.file;
60462 error = iterate_dir(f.file, &buf.ctx);
60463 if (error >= 0)
60464 error = buf.error;
60465@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60466 struct dir_context ctx;
60467 struct linux_dirent64 __user *current_dir;
60468 struct linux_dirent64 __user *previous;
60469+ struct file * file;
60470 int count;
60471 int error;
60472 };
60473@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60474 buf->error = -EINVAL; /* only used if we fail.. */
60475 if (reclen > buf->count)
60476 return -EINVAL;
60477+
60478+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60479+ return 0;
60480+
60481 dirent = buf->previous;
60482
60483 if (dirent) {
60484@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60485 if (!f.file)
60486 return -EBADF;
60487
60488+ buf.file = f.file;
60489 error = iterate_dir(f.file, &buf.ctx);
60490 if (error >= 0)
60491 error = buf.error;
60492diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60493index 4d24d17..4f8c09e 100644
60494--- a/fs/compat_binfmt_elf.c
60495+++ b/fs/compat_binfmt_elf.c
60496@@ -30,11 +30,13 @@
60497 #undef elf_phdr
60498 #undef elf_shdr
60499 #undef elf_note
60500+#undef elf_dyn
60501 #undef elf_addr_t
60502 #define elfhdr elf32_hdr
60503 #define elf_phdr elf32_phdr
60504 #define elf_shdr elf32_shdr
60505 #define elf_note elf32_note
60506+#define elf_dyn Elf32_Dyn
60507 #define elf_addr_t Elf32_Addr
60508
60509 /*
60510diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60511index afec645..9c65620 100644
60512--- a/fs/compat_ioctl.c
60513+++ b/fs/compat_ioctl.c
60514@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60515 return -EFAULT;
60516 if (__get_user(udata, &ss32->iomem_base))
60517 return -EFAULT;
60518- ss.iomem_base = compat_ptr(udata);
60519+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60520 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60521 __get_user(ss.port_high, &ss32->port_high))
60522 return -EFAULT;
60523@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60524 for (i = 0; i < nmsgs; i++) {
60525 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60526 return -EFAULT;
60527- if (get_user(datap, &umsgs[i].buf) ||
60528- put_user(compat_ptr(datap), &tmsgs[i].buf))
60529+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60530+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60531 return -EFAULT;
60532 }
60533 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60534@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60535 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60536 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60537 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60538- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60539+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60540 return -EFAULT;
60541
60542 return ioctl_preallocate(file, p);
60543@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60544 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60545 {
60546 unsigned int a, b;
60547- a = *(unsigned int *)p;
60548- b = *(unsigned int *)q;
60549+ a = *(const unsigned int *)p;
60550+ b = *(const unsigned int *)q;
60551 if (a > b)
60552 return 1;
60553 if (a < b)
60554diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60555index cf0db00..c7f70e8 100644
60556--- a/fs/configfs/dir.c
60557+++ b/fs/configfs/dir.c
60558@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60559 }
60560 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60561 struct configfs_dirent *next;
60562- const char *name;
60563+ const unsigned char * name;
60564+ char d_name[sizeof(next->s_dentry->d_iname)];
60565 int len;
60566 struct inode *inode = NULL;
60567
60568@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60569 continue;
60570
60571 name = configfs_get_name(next);
60572- len = strlen(name);
60573+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60574+ len = next->s_dentry->d_name.len;
60575+ memcpy(d_name, name, len);
60576+ name = d_name;
60577+ } else
60578+ len = strlen(name);
60579
60580 /*
60581 * We'll have a dentry and an inode for
60582diff --git a/fs/coredump.c b/fs/coredump.c
60583index f319926..55f4ec2 100644
60584--- a/fs/coredump.c
60585+++ b/fs/coredump.c
60586@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60587 struct pipe_inode_info *pipe = file->private_data;
60588
60589 pipe_lock(pipe);
60590- pipe->readers++;
60591- pipe->writers--;
60592+ atomic_inc(&pipe->readers);
60593+ atomic_dec(&pipe->writers);
60594 wake_up_interruptible_sync(&pipe->wait);
60595 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60596 pipe_unlock(pipe);
60597@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60598 * We actually want wait_event_freezable() but then we need
60599 * to clear TIF_SIGPENDING and improve dump_interrupted().
60600 */
60601- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60602+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60603
60604 pipe_lock(pipe);
60605- pipe->readers--;
60606- pipe->writers++;
60607+ atomic_dec(&pipe->readers);
60608+ atomic_inc(&pipe->writers);
60609 pipe_unlock(pipe);
60610 }
60611
60612@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60613 struct files_struct *displaced;
60614 bool need_nonrelative = false;
60615 bool core_dumped = false;
60616- static atomic_t core_dump_count = ATOMIC_INIT(0);
60617+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60618+ long signr = siginfo->si_signo;
60619+ int dumpable;
60620 struct coredump_params cprm = {
60621 .siginfo = siginfo,
60622 .regs = signal_pt_regs(),
60623@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60624 .mm_flags = mm->flags,
60625 };
60626
60627- audit_core_dumps(siginfo->si_signo);
60628+ audit_core_dumps(signr);
60629+
60630+ dumpable = __get_dumpable(cprm.mm_flags);
60631+
60632+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60633+ gr_handle_brute_attach(dumpable);
60634
60635 binfmt = mm->binfmt;
60636 if (!binfmt || !binfmt->core_dump)
60637 goto fail;
60638- if (!__get_dumpable(cprm.mm_flags))
60639+ if (!dumpable)
60640 goto fail;
60641
60642 cred = prepare_creds();
60643@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60644 need_nonrelative = true;
60645 }
60646
60647- retval = coredump_wait(siginfo->si_signo, &core_state);
60648+ retval = coredump_wait(signr, &core_state);
60649 if (retval < 0)
60650 goto fail_creds;
60651
60652@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60653 }
60654 cprm.limit = RLIM_INFINITY;
60655
60656- dump_count = atomic_inc_return(&core_dump_count);
60657+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60658 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60659 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60660 task_tgid_vnr(current), current->comm);
60661@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60662 } else {
60663 struct inode *inode;
60664
60665+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60666+
60667 if (cprm.limit < binfmt->min_coredump)
60668 goto fail_unlock;
60669
60670@@ -681,7 +690,7 @@ close_fail:
60671 filp_close(cprm.file, NULL);
60672 fail_dropcount:
60673 if (ispipe)
60674- atomic_dec(&core_dump_count);
60675+ atomic_dec_unchecked(&core_dump_count);
60676 fail_unlock:
60677 kfree(cn.corename);
60678 coredump_finish(mm, core_dumped);
60679@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60680 struct file *file = cprm->file;
60681 loff_t pos = file->f_pos;
60682 ssize_t n;
60683+
60684+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60685 if (cprm->written + nr > cprm->limit)
60686 return 0;
60687 while (nr) {
60688diff --git a/fs/dcache.c b/fs/dcache.c
60689index c71e373..5c1f656 100644
60690--- a/fs/dcache.c
60691+++ b/fs/dcache.c
60692@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
60693 * dentry_iput drops the locks, at which point nobody (except
60694 * transient RCU lookups) can reach this dentry.
60695 */
60696- BUG_ON(dentry->d_lockref.count > 0);
60697+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
60698 this_cpu_dec(nr_dentry);
60699 if (dentry->d_op && dentry->d_op->d_release)
60700 dentry->d_op->d_release(dentry);
60701@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60702 struct dentry *parent = dentry->d_parent;
60703 if (IS_ROOT(dentry))
60704 return NULL;
60705- if (unlikely(dentry->d_lockref.count < 0))
60706+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
60707 return NULL;
60708 if (likely(spin_trylock(&parent->d_lock)))
60709 return parent;
60710@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
60711 */
60712 if (unlikely(ret < 0)) {
60713 spin_lock(&dentry->d_lock);
60714- if (dentry->d_lockref.count > 1) {
60715- dentry->d_lockref.count--;
60716+ if (__lockref_read(&dentry->d_lockref) > 1) {
60717+ __lockref_dec(&dentry->d_lockref);
60718 spin_unlock(&dentry->d_lock);
60719 return 1;
60720 }
60721@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
60722 * else could have killed it and marked it dead. Either way, we
60723 * don't need to do anything else.
60724 */
60725- if (dentry->d_lockref.count) {
60726+ if (__lockref_read(&dentry->d_lockref)) {
60727 spin_unlock(&dentry->d_lock);
60728 return 1;
60729 }
60730@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
60731 * lock, and we just tested that it was zero, so we can just
60732 * set it to 1.
60733 */
60734- dentry->d_lockref.count = 1;
60735+ __lockref_set(&dentry->d_lockref, 1);
60736 return 0;
60737 }
60738
60739@@ -751,7 +751,7 @@ repeat:
60740 dentry->d_flags |= DCACHE_REFERENCED;
60741 dentry_lru_add(dentry);
60742
60743- dentry->d_lockref.count--;
60744+ __lockref_dec(&dentry->d_lockref);
60745 spin_unlock(&dentry->d_lock);
60746 return;
60747
60748@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
60749 /* This must be called with d_lock held */
60750 static inline void __dget_dlock(struct dentry *dentry)
60751 {
60752- dentry->d_lockref.count++;
60753+ __lockref_inc(&dentry->d_lockref);
60754 }
60755
60756 static inline void __dget(struct dentry *dentry)
60757@@ -807,8 +807,8 @@ repeat:
60758 goto repeat;
60759 }
60760 rcu_read_unlock();
60761- BUG_ON(!ret->d_lockref.count);
60762- ret->d_lockref.count++;
60763+ BUG_ON(!__lockref_read(&ret->d_lockref));
60764+ __lockref_inc(&ret->d_lockref);
60765 spin_unlock(&ret->d_lock);
60766 return ret;
60767 }
60768@@ -886,9 +886,9 @@ restart:
60769 spin_lock(&inode->i_lock);
60770 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60771 spin_lock(&dentry->d_lock);
60772- if (!dentry->d_lockref.count) {
60773+ if (!__lockref_read(&dentry->d_lockref)) {
60774 struct dentry *parent = lock_parent(dentry);
60775- if (likely(!dentry->d_lockref.count)) {
60776+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60777 __dentry_kill(dentry);
60778 dput(parent);
60779 goto restart;
60780@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
60781 * We found an inuse dentry which was not removed from
60782 * the LRU because of laziness during lookup. Do not free it.
60783 */
60784- if (dentry->d_lockref.count > 0) {
60785+ if (__lockref_read(&dentry->d_lockref) > 0) {
60786 spin_unlock(&dentry->d_lock);
60787 if (parent)
60788 spin_unlock(&parent->d_lock);
60789@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
60790 dentry = parent;
60791 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60792 parent = lock_parent(dentry);
60793- if (dentry->d_lockref.count != 1) {
60794- dentry->d_lockref.count--;
60795+ if (__lockref_read(&dentry->d_lockref) != 1) {
60796+ __lockref_inc(&dentry->d_lockref);
60797 spin_unlock(&dentry->d_lock);
60798 if (parent)
60799 spin_unlock(&parent->d_lock);
60800@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
60801 * counts, just remove them from the LRU. Otherwise give them
60802 * another pass through the LRU.
60803 */
60804- if (dentry->d_lockref.count) {
60805+ if (__lockref_read(&dentry->d_lockref)) {
60806 d_lru_isolate(lru, dentry);
60807 spin_unlock(&dentry->d_lock);
60808 return LRU_REMOVED;
60809@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60810 } else {
60811 if (dentry->d_flags & DCACHE_LRU_LIST)
60812 d_lru_del(dentry);
60813- if (!dentry->d_lockref.count) {
60814+ if (!__lockref_read(&dentry->d_lockref)) {
60815 d_shrink_add(dentry, &data->dispose);
60816 data->found++;
60817 }
60818@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60819 return D_WALK_CONTINUE;
60820
60821 /* root with refcount 1 is fine */
60822- if (dentry == _data && dentry->d_lockref.count == 1)
60823+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60824 return D_WALK_CONTINUE;
60825
60826 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60827@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60828 dentry->d_inode ?
60829 dentry->d_inode->i_ino : 0UL,
60830 dentry,
60831- dentry->d_lockref.count,
60832+ __lockref_read(&dentry->d_lockref),
60833 dentry->d_sb->s_type->name,
60834 dentry->d_sb->s_id);
60835 WARN_ON(1);
60836@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60837 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60838 if (name->len > DNAME_INLINE_LEN-1) {
60839 size_t size = offsetof(struct external_name, name[1]);
60840- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60841+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60842 if (!p) {
60843 kmem_cache_free(dentry_cache, dentry);
60844 return NULL;
60845@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60846 smp_wmb();
60847 dentry->d_name.name = dname;
60848
60849- dentry->d_lockref.count = 1;
60850+ __lockref_set(&dentry->d_lockref, 1);
60851 dentry->d_flags = 0;
60852 spin_lock_init(&dentry->d_lock);
60853 seqcount_init(&dentry->d_seq);
60854@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60855 dentry->d_sb = sb;
60856 dentry->d_op = NULL;
60857 dentry->d_fsdata = NULL;
60858+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60859+ atomic_set(&dentry->chroot_refcnt, 0);
60860+#endif
60861 INIT_HLIST_BL_NODE(&dentry->d_hash);
60862 INIT_LIST_HEAD(&dentry->d_lru);
60863 INIT_LIST_HEAD(&dentry->d_subdirs);
60864@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60865 goto next;
60866 }
60867
60868- dentry->d_lockref.count++;
60869+ __lockref_inc(&dentry->d_lockref);
60870 found = dentry;
60871 spin_unlock(&dentry->d_lock);
60872 break;
60873@@ -2358,7 +2361,7 @@ again:
60874 spin_lock(&dentry->d_lock);
60875 inode = dentry->d_inode;
60876 isdir = S_ISDIR(inode->i_mode);
60877- if (dentry->d_lockref.count == 1) {
60878+ if (__lockref_read(&dentry->d_lockref) == 1) {
60879 if (!spin_trylock(&inode->i_lock)) {
60880 spin_unlock(&dentry->d_lock);
60881 cpu_relax();
60882@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60883
60884 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60885 dentry->d_flags |= DCACHE_GENOCIDE;
60886- dentry->d_lockref.count--;
60887+ __lockref_dec(&dentry->d_lockref);
60888 }
60889 }
60890 return D_WALK_CONTINUE;
60891@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
60892 mempages -= reserve;
60893
60894 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60895- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60896+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60897+ SLAB_NO_SANITIZE, NULL);
60898
60899 dcache_init();
60900 inode_init();
60901diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60902index 96400ab..906103d 100644
60903--- a/fs/debugfs/inode.c
60904+++ b/fs/debugfs/inode.c
60905@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
60906 }
60907 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
60908
60909+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60910+extern int grsec_enable_sysfs_restrict;
60911+#endif
60912+
60913 /**
60914 * debugfs_create_dir - create a directory in the debugfs filesystem
60915 * @name: a pointer to a string containing the name of the directory to
60916@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
60917 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60918 * returned.
60919 */
60920+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60921+extern int grsec_enable_sysfs_restrict;
60922+#endif
60923+
60924 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60925 {
60926 struct dentry *dentry = start_creating(name, parent);
60927@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60928 if (unlikely(!inode))
60929 return failed_creating(dentry);
60930
60931- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60932+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60933+ if (grsec_enable_sysfs_restrict)
60934+ inode->i_mode = S_IFDIR | S_IRWXU;
60935+ else
60936+#endif
60937+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60938 inode->i_op = &simple_dir_inode_operations;
60939 inode->i_fop = &simple_dir_operations;
60940
60941diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60942index b08b518..d6acffa 100644
60943--- a/fs/ecryptfs/inode.c
60944+++ b/fs/ecryptfs/inode.c
60945@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60946 old_fs = get_fs();
60947 set_fs(get_ds());
60948 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60949- (char __user *)lower_buf,
60950+ (char __force_user *)lower_buf,
60951 PATH_MAX);
60952 set_fs(old_fs);
60953 if (rc < 0)
60954diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60955index e4141f2..d8263e8 100644
60956--- a/fs/ecryptfs/miscdev.c
60957+++ b/fs/ecryptfs/miscdev.c
60958@@ -304,7 +304,7 @@ check_list:
60959 goto out_unlock_msg_ctx;
60960 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60961 if (msg_ctx->msg) {
60962- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60963+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60964 goto out_unlock_msg_ctx;
60965 i += packet_length_size;
60966 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60967diff --git a/fs/exec.c b/fs/exec.c
60968index 00400cf..b9dca28 100644
60969--- a/fs/exec.c
60970+++ b/fs/exec.c
60971@@ -56,8 +56,20 @@
60972 #include <linux/pipe_fs_i.h>
60973 #include <linux/oom.h>
60974 #include <linux/compat.h>
60975+#include <linux/random.h>
60976+#include <linux/seq_file.h>
60977+#include <linux/coredump.h>
60978+#include <linux/mman.h>
60979+
60980+#ifdef CONFIG_PAX_REFCOUNT
60981+#include <linux/kallsyms.h>
60982+#include <linux/kdebug.h>
60983+#endif
60984+
60985+#include <trace/events/fs.h>
60986
60987 #include <asm/uaccess.h>
60988+#include <asm/sections.h>
60989 #include <asm/mmu_context.h>
60990 #include <asm/tlb.h>
60991
60992@@ -66,19 +78,34 @@
60993
60994 #include <trace/events/sched.h>
60995
60996+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60997+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60998+{
60999+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61000+}
61001+#endif
61002+
61003+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61004+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61005+EXPORT_SYMBOL(pax_set_initial_flags_func);
61006+#endif
61007+
61008 int suid_dumpable = 0;
61009
61010 static LIST_HEAD(formats);
61011 static DEFINE_RWLOCK(binfmt_lock);
61012
61013+extern int gr_process_kernel_exec_ban(void);
61014+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61015+
61016 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61017 {
61018 BUG_ON(!fmt);
61019 if (WARN_ON(!fmt->load_binary))
61020 return;
61021 write_lock(&binfmt_lock);
61022- insert ? list_add(&fmt->lh, &formats) :
61023- list_add_tail(&fmt->lh, &formats);
61024+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61025+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61026 write_unlock(&binfmt_lock);
61027 }
61028
61029@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61030 void unregister_binfmt(struct linux_binfmt * fmt)
61031 {
61032 write_lock(&binfmt_lock);
61033- list_del(&fmt->lh);
61034+ pax_list_del((struct list_head *)&fmt->lh);
61035 write_unlock(&binfmt_lock);
61036 }
61037
61038@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61039 int write)
61040 {
61041 struct page *page;
61042- int ret;
61043
61044-#ifdef CONFIG_STACK_GROWSUP
61045- if (write) {
61046- ret = expand_downwards(bprm->vma, pos);
61047- if (ret < 0)
61048- return NULL;
61049- }
61050-#endif
61051- ret = get_user_pages(current, bprm->mm, pos,
61052- 1, write, 1, &page, NULL);
61053- if (ret <= 0)
61054+ if (0 > expand_downwards(bprm->vma, pos))
61055+ return NULL;
61056+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61057 return NULL;
61058
61059 if (write) {
61060@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61061 if (size <= ARG_MAX)
61062 return page;
61063
61064+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61065+ // only allow 512KB for argv+env on suid/sgid binaries
61066+ // to prevent easy ASLR exhaustion
61067+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61068+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61069+ (size > (512 * 1024))) {
61070+ put_page(page);
61071+ return NULL;
61072+ }
61073+#endif
61074+
61075 /*
61076 * Limit to 1/4-th the stack size for the argv+env strings.
61077 * This ensures that:
61078@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61079 vma->vm_end = STACK_TOP_MAX;
61080 vma->vm_start = vma->vm_end - PAGE_SIZE;
61081 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61082+
61083+#ifdef CONFIG_PAX_SEGMEXEC
61084+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61085+#endif
61086+
61087 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61088 INIT_LIST_HEAD(&vma->anon_vma_chain);
61089
61090@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61091 arch_bprm_mm_init(mm, vma);
61092 up_write(&mm->mmap_sem);
61093 bprm->p = vma->vm_end - sizeof(void *);
61094+
61095+#ifdef CONFIG_PAX_RANDUSTACK
61096+ if (randomize_va_space)
61097+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61098+#endif
61099+
61100 return 0;
61101 err:
61102 up_write(&mm->mmap_sem);
61103@@ -396,7 +437,7 @@ struct user_arg_ptr {
61104 } ptr;
61105 };
61106
61107-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61108+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61109 {
61110 const char __user *native;
61111
61112@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61113 compat_uptr_t compat;
61114
61115 if (get_user(compat, argv.ptr.compat + nr))
61116- return ERR_PTR(-EFAULT);
61117+ return (const char __force_user *)ERR_PTR(-EFAULT);
61118
61119 return compat_ptr(compat);
61120 }
61121 #endif
61122
61123 if (get_user(native, argv.ptr.native + nr))
61124- return ERR_PTR(-EFAULT);
61125+ return (const char __force_user *)ERR_PTR(-EFAULT);
61126
61127 return native;
61128 }
61129@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
61130 if (!p)
61131 break;
61132
61133- if (IS_ERR(p))
61134+ if (IS_ERR((const char __force_kernel *)p))
61135 return -EFAULT;
61136
61137 if (i >= max)
61138@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61139
61140 ret = -EFAULT;
61141 str = get_user_arg_ptr(argv, argc);
61142- if (IS_ERR(str))
61143+ if (IS_ERR((const char __force_kernel *)str))
61144 goto out;
61145
61146 len = strnlen_user(str, MAX_ARG_STRLEN);
61147@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61148 int r;
61149 mm_segment_t oldfs = get_fs();
61150 struct user_arg_ptr argv = {
61151- .ptr.native = (const char __user *const __user *)__argv,
61152+ .ptr.native = (const char __user * const __force_user *)__argv,
61153 };
61154
61155 set_fs(KERNEL_DS);
61156@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61157 unsigned long new_end = old_end - shift;
61158 struct mmu_gather tlb;
61159
61160- BUG_ON(new_start > new_end);
61161+ if (new_start >= new_end || new_start < mmap_min_addr)
61162+ return -ENOMEM;
61163
61164 /*
61165 * ensure there are no vmas between where we want to go
61166@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61167 if (vma != find_vma(mm, new_start))
61168 return -EFAULT;
61169
61170+#ifdef CONFIG_PAX_SEGMEXEC
61171+ BUG_ON(pax_find_mirror_vma(vma));
61172+#endif
61173+
61174 /*
61175 * cover the whole range: [new_start, old_end)
61176 */
61177@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61178 stack_top = arch_align_stack(stack_top);
61179 stack_top = PAGE_ALIGN(stack_top);
61180
61181- if (unlikely(stack_top < mmap_min_addr) ||
61182- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61183- return -ENOMEM;
61184-
61185 stack_shift = vma->vm_end - stack_top;
61186
61187 bprm->p -= stack_shift;
61188@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61189 bprm->exec -= stack_shift;
61190
61191 down_write(&mm->mmap_sem);
61192+
61193+ /* Move stack pages down in memory. */
61194+ if (stack_shift) {
61195+ ret = shift_arg_pages(vma, stack_shift);
61196+ if (ret)
61197+ goto out_unlock;
61198+ }
61199+
61200 vm_flags = VM_STACK_FLAGS;
61201
61202+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61203+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61204+ vm_flags &= ~VM_EXEC;
61205+
61206+#ifdef CONFIG_PAX_MPROTECT
61207+ if (mm->pax_flags & MF_PAX_MPROTECT)
61208+ vm_flags &= ~VM_MAYEXEC;
61209+#endif
61210+
61211+ }
61212+#endif
61213+
61214 /*
61215 * Adjust stack execute permissions; explicitly enable for
61216 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61217@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61218 goto out_unlock;
61219 BUG_ON(prev != vma);
61220
61221- /* Move stack pages down in memory. */
61222- if (stack_shift) {
61223- ret = shift_arg_pages(vma, stack_shift);
61224- if (ret)
61225- goto out_unlock;
61226- }
61227-
61228 /* mprotect_fixup is overkill to remove the temporary stack flags */
61229 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
61230
61231@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
61232 #endif
61233 current->mm->start_stack = bprm->p;
61234 ret = expand_stack(vma, stack_base);
61235+
61236+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
61237+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
61238+ unsigned long size;
61239+ vm_flags_t vm_flags;
61240+
61241+ size = STACK_TOP - vma->vm_end;
61242+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
61243+
61244+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
61245+
61246+#ifdef CONFIG_X86
61247+ if (!ret) {
61248+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
61249+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
61250+ }
61251+#endif
61252+
61253+ }
61254+#endif
61255+
61256 if (ret)
61257 ret = -EFAULT;
61258
61259@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
61260 if (err)
61261 goto exit;
61262
61263- if (name->name[0] != '\0')
61264+ if (name->name[0] != '\0') {
61265 fsnotify_open(file);
61266+ trace_open_exec(name->name);
61267+ }
61268
61269 out:
61270 return file;
61271@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
61272 old_fs = get_fs();
61273 set_fs(get_ds());
61274 /* The cast to a user pointer is valid due to the set_fs() */
61275- result = vfs_read(file, (void __user *)addr, count, &pos);
61276+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
61277 set_fs(old_fs);
61278 return result;
61279 }
61280@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
61281 tsk->mm = mm;
61282 tsk->active_mm = mm;
61283 activate_mm(active_mm, mm);
61284+ populate_stack();
61285 tsk->mm->vmacache_seqnum = 0;
61286 vmacache_flush(tsk);
61287 task_unlock(tsk);
61288@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
61289 if (!thread_group_leader(tsk)) {
61290 struct task_struct *leader = tsk->group_leader;
61291
61292- sig->notify_count = -1; /* for exit_notify() */
61293 for (;;) {
61294 threadgroup_change_begin(tsk);
61295 write_lock_irq(&tasklist_lock);
61296+ /*
61297+ * Do this under tasklist_lock to ensure that
61298+ * exit_notify() can't miss ->group_exit_task
61299+ */
61300+ sig->notify_count = -1;
61301 if (likely(leader->exit_state))
61302 break;
61303 __set_current_state(TASK_KILLABLE);
61304@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61305 }
61306 rcu_read_unlock();
61307
61308- if (p->fs->users > n_fs)
61309+ if (atomic_read(&p->fs->users) > n_fs)
61310 bprm->unsafe |= LSM_UNSAFE_SHARE;
61311 else
61312 p->fs->in_exec = 1;
61313@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61314 return ret;
61315 }
61316
61317+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61318+static DEFINE_PER_CPU(u64, exec_counter);
61319+static int __init init_exec_counters(void)
61320+{
61321+ unsigned int cpu;
61322+
61323+ for_each_possible_cpu(cpu) {
61324+ per_cpu(exec_counter, cpu) = (u64)cpu;
61325+ }
61326+
61327+ return 0;
61328+}
61329+early_initcall(init_exec_counters);
61330+static inline void increment_exec_counter(void)
61331+{
61332+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61333+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61334+}
61335+#else
61336+static inline void increment_exec_counter(void) {}
61337+#endif
61338+
61339+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61340+ struct user_arg_ptr argv);
61341+
61342 /*
61343 * sys_execve() executes a new program.
61344 */
61345@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61346 struct user_arg_ptr envp,
61347 int flags)
61348 {
61349+#ifdef CONFIG_GRKERNSEC
61350+ struct file *old_exec_file;
61351+ struct acl_subject_label *old_acl;
61352+ struct rlimit old_rlim[RLIM_NLIMITS];
61353+#endif
61354 char *pathbuf = NULL;
61355 struct linux_binprm *bprm;
61356 struct file *file;
61357@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
61358 if (IS_ERR(filename))
61359 return PTR_ERR(filename);
61360
61361+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61362+
61363 /*
61364 * We move the actual failure in case of RLIMIT_NPROC excess from
61365 * set*uid() to execve() because too many poorly written programs
61366@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61367 if (IS_ERR(file))
61368 goto out_unmark;
61369
61370+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61371+ retval = -EPERM;
61372+ goto out_unmark;
61373+ }
61374+
61375 sched_exec();
61376
61377 bprm->file = file;
61378@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61379 }
61380 bprm->interp = bprm->filename;
61381
61382+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61383+ retval = -EACCES;
61384+ goto out_unmark;
61385+ }
61386+
61387 retval = bprm_mm_init(bprm);
61388 if (retval)
61389 goto out_unmark;
61390@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61391 if (retval < 0)
61392 goto out;
61393
61394+#ifdef CONFIG_GRKERNSEC
61395+ old_acl = current->acl;
61396+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61397+ old_exec_file = current->exec_file;
61398+ get_file(file);
61399+ current->exec_file = file;
61400+#endif
61401+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61402+ /* limit suid stack to 8MB
61403+ * we saved the old limits above and will restore them if this exec fails
61404+ */
61405+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61406+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61407+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61408+#endif
61409+
61410+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61411+ retval = -EPERM;
61412+ goto out_fail;
61413+ }
61414+
61415+ if (!gr_tpe_allow(file)) {
61416+ retval = -EACCES;
61417+ goto out_fail;
61418+ }
61419+
61420+ if (gr_check_crash_exec(file)) {
61421+ retval = -EACCES;
61422+ goto out_fail;
61423+ }
61424+
61425+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61426+ bprm->unsafe);
61427+ if (retval < 0)
61428+ goto out_fail;
61429+
61430 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61431 if (retval < 0)
61432- goto out;
61433+ goto out_fail;
61434
61435 bprm->exec = bprm->p;
61436 retval = copy_strings(bprm->envc, envp, bprm);
61437 if (retval < 0)
61438- goto out;
61439+ goto out_fail;
61440
61441 retval = copy_strings(bprm->argc, argv, bprm);
61442 if (retval < 0)
61443- goto out;
61444+ goto out_fail;
61445+
61446+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61447+
61448+ gr_handle_exec_args(bprm, argv);
61449
61450 retval = exec_binprm(bprm);
61451 if (retval < 0)
61452- goto out;
61453+ goto out_fail;
61454+#ifdef CONFIG_GRKERNSEC
61455+ if (old_exec_file)
61456+ fput(old_exec_file);
61457+#endif
61458
61459 /* execve succeeded */
61460+
61461+ increment_exec_counter();
61462 current->fs->in_exec = 0;
61463 current->in_execve = 0;
61464 acct_update_integrals(current);
61465@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61466 put_files_struct(displaced);
61467 return retval;
61468
61469+out_fail:
61470+#ifdef CONFIG_GRKERNSEC
61471+ current->acl = old_acl;
61472+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61473+ fput(current->exec_file);
61474+ current->exec_file = old_exec_file;
61475+#endif
61476+
61477 out:
61478 if (bprm->mm) {
61479 acct_arg_size(bprm, 0);
61480@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61481 argv, envp, flags);
61482 }
61483 #endif
61484+
61485+int pax_check_flags(unsigned long *flags)
61486+{
61487+ int retval = 0;
61488+
61489+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61490+ if (*flags & MF_PAX_SEGMEXEC)
61491+ {
61492+ *flags &= ~MF_PAX_SEGMEXEC;
61493+ retval = -EINVAL;
61494+ }
61495+#endif
61496+
61497+ if ((*flags & MF_PAX_PAGEEXEC)
61498+
61499+#ifdef CONFIG_PAX_PAGEEXEC
61500+ && (*flags & MF_PAX_SEGMEXEC)
61501+#endif
61502+
61503+ )
61504+ {
61505+ *flags &= ~MF_PAX_PAGEEXEC;
61506+ retval = -EINVAL;
61507+ }
61508+
61509+ if ((*flags & MF_PAX_MPROTECT)
61510+
61511+#ifdef CONFIG_PAX_MPROTECT
61512+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61513+#endif
61514+
61515+ )
61516+ {
61517+ *flags &= ~MF_PAX_MPROTECT;
61518+ retval = -EINVAL;
61519+ }
61520+
61521+ if ((*flags & MF_PAX_EMUTRAMP)
61522+
61523+#ifdef CONFIG_PAX_EMUTRAMP
61524+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61525+#endif
61526+
61527+ )
61528+ {
61529+ *flags &= ~MF_PAX_EMUTRAMP;
61530+ retval = -EINVAL;
61531+ }
61532+
61533+ return retval;
61534+}
61535+
61536+EXPORT_SYMBOL(pax_check_flags);
61537+
61538+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61539+char *pax_get_path(const struct path *path, char *buf, int buflen)
61540+{
61541+ char *pathname = d_path(path, buf, buflen);
61542+
61543+ if (IS_ERR(pathname))
61544+ goto toolong;
61545+
61546+ pathname = mangle_path(buf, pathname, "\t\n\\");
61547+ if (!pathname)
61548+ goto toolong;
61549+
61550+ *pathname = 0;
61551+ return buf;
61552+
61553+toolong:
61554+ return "<path too long>";
61555+}
61556+EXPORT_SYMBOL(pax_get_path);
61557+
61558+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61559+{
61560+ struct task_struct *tsk = current;
61561+ struct mm_struct *mm = current->mm;
61562+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61563+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61564+ char *path_exec = NULL;
61565+ char *path_fault = NULL;
61566+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61567+ siginfo_t info = { };
61568+
61569+ if (buffer_exec && buffer_fault) {
61570+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61571+
61572+ down_read(&mm->mmap_sem);
61573+ vma = mm->mmap;
61574+ while (vma && (!vma_exec || !vma_fault)) {
61575+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61576+ vma_exec = vma;
61577+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61578+ vma_fault = vma;
61579+ vma = vma->vm_next;
61580+ }
61581+ if (vma_exec)
61582+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61583+ if (vma_fault) {
61584+ start = vma_fault->vm_start;
61585+ end = vma_fault->vm_end;
61586+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61587+ if (vma_fault->vm_file)
61588+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61589+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61590+ path_fault = "<heap>";
61591+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61592+ path_fault = "<stack>";
61593+ else
61594+ path_fault = "<anonymous mapping>";
61595+ }
61596+ up_read(&mm->mmap_sem);
61597+ }
61598+ if (tsk->signal->curr_ip)
61599+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61600+ else
61601+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61602+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61603+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61604+ free_page((unsigned long)buffer_exec);
61605+ free_page((unsigned long)buffer_fault);
61606+ pax_report_insns(regs, pc, sp);
61607+ info.si_signo = SIGKILL;
61608+ info.si_errno = 0;
61609+ info.si_code = SI_KERNEL;
61610+ info.si_pid = 0;
61611+ info.si_uid = 0;
61612+ do_coredump(&info);
61613+}
61614+#endif
61615+
61616+#ifdef CONFIG_PAX_REFCOUNT
61617+void pax_report_refcount_overflow(struct pt_regs *regs)
61618+{
61619+ if (current->signal->curr_ip)
61620+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61621+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61622+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61623+ else
61624+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61625+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61626+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61627+ preempt_disable();
61628+ show_regs(regs);
61629+ preempt_enable();
61630+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61631+}
61632+#endif
61633+
61634+#ifdef CONFIG_PAX_USERCOPY
61635+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61636+static noinline int check_stack_object(const void *obj, unsigned long len)
61637+{
61638+ const void * const stack = task_stack_page(current);
61639+ const void * const stackend = stack + THREAD_SIZE;
61640+
61641+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61642+ const void *frame = NULL;
61643+ const void *oldframe;
61644+#endif
61645+
61646+ if (obj + len < obj)
61647+ return -1;
61648+
61649+ if (obj + len <= stack || stackend <= obj)
61650+ return 0;
61651+
61652+ if (obj < stack || stackend < obj + len)
61653+ return -1;
61654+
61655+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61656+ oldframe = __builtin_frame_address(1);
61657+ if (oldframe)
61658+ frame = __builtin_frame_address(2);
61659+ /*
61660+ low ----------------------------------------------> high
61661+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61662+ ^----------------^
61663+ allow copies only within here
61664+ */
61665+ while (stack <= frame && frame < stackend) {
61666+ /* if obj + len extends past the last frame, this
61667+ check won't pass and the next frame will be 0,
61668+ causing us to bail out and correctly report
61669+ the copy as invalid
61670+ */
61671+ if (obj + len <= frame)
61672+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61673+ oldframe = frame;
61674+ frame = *(const void * const *)frame;
61675+ }
61676+ return -1;
61677+#else
61678+ return 1;
61679+#endif
61680+}
61681+
61682+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61683+{
61684+ if (current->signal->curr_ip)
61685+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61686+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61687+ else
61688+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61689+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61690+ dump_stack();
61691+ gr_handle_kernel_exploit();
61692+ do_group_exit(SIGKILL);
61693+}
61694+#endif
61695+
61696+#ifdef CONFIG_PAX_USERCOPY
61697+
61698+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61699+{
61700+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61701+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61702+#ifdef CONFIG_MODULES
61703+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61704+#else
61705+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61706+#endif
61707+
61708+#else
61709+ unsigned long textlow = (unsigned long)_stext;
61710+ unsigned long texthigh = (unsigned long)_etext;
61711+
61712+#ifdef CONFIG_X86_64
61713+ /* check against linear mapping as well */
61714+ if (high > (unsigned long)__va(__pa(textlow)) &&
61715+ low < (unsigned long)__va(__pa(texthigh)))
61716+ return true;
61717+#endif
61718+
61719+#endif
61720+
61721+ if (high <= textlow || low >= texthigh)
61722+ return false;
61723+ else
61724+ return true;
61725+}
61726+#endif
61727+
61728+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61729+{
61730+#ifdef CONFIG_PAX_USERCOPY
61731+ const char *type;
61732+#endif
61733+
61734+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61735+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61736+ unsigned long currentsp = (unsigned long)&stackstart;
61737+ if (unlikely((currentsp < stackstart + 512 ||
61738+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61739+ BUG();
61740+#endif
61741+
61742+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61743+ if (const_size)
61744+ return;
61745+#endif
61746+
61747+#ifdef CONFIG_PAX_USERCOPY
61748+ if (!n)
61749+ return;
61750+
61751+ type = check_heap_object(ptr, n);
61752+ if (!type) {
61753+ int ret = check_stack_object(ptr, n);
61754+ if (ret == 1 || ret == 2)
61755+ return;
61756+ if (ret == 0) {
61757+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61758+ type = "<kernel text>";
61759+ else
61760+ return;
61761+ } else
61762+ type = "<process stack>";
61763+ }
61764+
61765+ pax_report_usercopy(ptr, n, to_user, type);
61766+#endif
61767+
61768+}
61769+EXPORT_SYMBOL(__check_object_size);
61770+
61771+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61772+void pax_track_stack(void)
61773+{
61774+ unsigned long sp = (unsigned long)&sp;
61775+ if (sp < current_thread_info()->lowest_stack &&
61776+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61777+ current_thread_info()->lowest_stack = sp;
61778+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61779+ BUG();
61780+}
61781+EXPORT_SYMBOL(pax_track_stack);
61782+#endif
61783+
61784+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61785+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61786+{
61787+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61788+ dump_stack();
61789+ do_group_exit(SIGKILL);
61790+}
61791+EXPORT_SYMBOL(report_size_overflow);
61792+#endif
61793diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61794index 9f9992b..8b59411 100644
61795--- a/fs/ext2/balloc.c
61796+++ b/fs/ext2/balloc.c
61797@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61798
61799 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61800 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61801- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61802+ if (free_blocks < root_blocks + 1 &&
61803 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61804 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61805- !in_group_p (sbi->s_resgid))) {
61806+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61807 return 0;
61808 }
61809 return 1;
61810diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61811index d0e746e..82e06f0 100644
61812--- a/fs/ext2/super.c
61813+++ b/fs/ext2/super.c
61814@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61815 #ifdef CONFIG_EXT2_FS_XATTR
61816 if (test_opt(sb, XATTR_USER))
61817 seq_puts(seq, ",user_xattr");
61818- if (!test_opt(sb, XATTR_USER) &&
61819- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61820+ if (!test_opt(sb, XATTR_USER))
61821 seq_puts(seq, ",nouser_xattr");
61822- }
61823 #endif
61824
61825 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61826@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61827 if (def_mount_opts & EXT2_DEFM_UID16)
61828 set_opt(sbi->s_mount_opt, NO_UID32);
61829 #ifdef CONFIG_EXT2_FS_XATTR
61830- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61831- set_opt(sbi->s_mount_opt, XATTR_USER);
61832+ /* always enable user xattrs */
61833+ set_opt(sbi->s_mount_opt, XATTR_USER);
61834 #endif
61835 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61836 if (def_mount_opts & EXT2_DEFM_ACL)
61837diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61838index 9142614..97484fa 100644
61839--- a/fs/ext2/xattr.c
61840+++ b/fs/ext2/xattr.c
61841@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61842 struct buffer_head *bh = NULL;
61843 struct ext2_xattr_entry *entry;
61844 char *end;
61845- size_t rest = buffer_size;
61846+ size_t rest = buffer_size, total_size = 0;
61847 int error;
61848
61849 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61850@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61851 buffer += size;
61852 }
61853 rest -= size;
61854+ total_size += size;
61855 }
61856 }
61857- error = buffer_size - rest; /* total size */
61858+ error = total_size;
61859
61860 cleanup:
61861 brelse(bh);
61862diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61863index 158b5d4..2432610 100644
61864--- a/fs/ext3/balloc.c
61865+++ b/fs/ext3/balloc.c
61866@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61867
61868 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61869 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61870- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61871+ if (free_blocks < root_blocks + 1 &&
61872 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61873 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61874- !in_group_p (sbi->s_resgid))) {
61875+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61876 return 0;
61877 }
61878 return 1;
61879diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61880index d4dbf3c..906a6fb 100644
61881--- a/fs/ext3/super.c
61882+++ b/fs/ext3/super.c
61883@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61884 #ifdef CONFIG_EXT3_FS_XATTR
61885 if (test_opt(sb, XATTR_USER))
61886 seq_puts(seq, ",user_xattr");
61887- if (!test_opt(sb, XATTR_USER) &&
61888- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61889+ if (!test_opt(sb, XATTR_USER))
61890 seq_puts(seq, ",nouser_xattr");
61891- }
61892 #endif
61893 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61894 if (test_opt(sb, POSIX_ACL))
61895@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61896 if (def_mount_opts & EXT3_DEFM_UID16)
61897 set_opt(sbi->s_mount_opt, NO_UID32);
61898 #ifdef CONFIG_EXT3_FS_XATTR
61899- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61900- set_opt(sbi->s_mount_opt, XATTR_USER);
61901+ /* always enable user xattrs */
61902+ set_opt(sbi->s_mount_opt, XATTR_USER);
61903 #endif
61904 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61905 if (def_mount_opts & EXT3_DEFM_ACL)
61906diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61907index c6874be..f8a6ae8 100644
61908--- a/fs/ext3/xattr.c
61909+++ b/fs/ext3/xattr.c
61910@@ -330,7 +330,7 @@ static int
61911 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61912 char *buffer, size_t buffer_size)
61913 {
61914- size_t rest = buffer_size;
61915+ size_t rest = buffer_size, total_size = 0;
61916
61917 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61918 const struct xattr_handler *handler =
61919@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61920 buffer += size;
61921 }
61922 rest -= size;
61923+ total_size += size;
61924 }
61925 }
61926- return buffer_size - rest;
61927+ return total_size;
61928 }
61929
61930 static int
61931diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61932index 83a6f49..d4e4d03 100644
61933--- a/fs/ext4/balloc.c
61934+++ b/fs/ext4/balloc.c
61935@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61936 /* Hm, nope. Are (enough) root reserved clusters available? */
61937 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61938 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61939- capable(CAP_SYS_RESOURCE) ||
61940- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61941+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61942+ capable_nolog(CAP_SYS_RESOURCE)) {
61943
61944 if (free_clusters >= (nclusters + dirty_clusters +
61945 resv_clusters))
61946diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61947index f63c3d5..3c1a033 100644
61948--- a/fs/ext4/ext4.h
61949+++ b/fs/ext4/ext4.h
61950@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
61951 unsigned long s_mb_last_start;
61952
61953 /* stats for buddy allocator */
61954- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61955- atomic_t s_bal_success; /* we found long enough chunks */
61956- atomic_t s_bal_allocated; /* in blocks */
61957- atomic_t s_bal_ex_scanned; /* total extents scanned */
61958- atomic_t s_bal_goals; /* goal hits */
61959- atomic_t s_bal_breaks; /* too long searches */
61960- atomic_t s_bal_2orders; /* 2^order hits */
61961+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61962+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61963+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61964+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61965+ atomic_unchecked_t s_bal_goals; /* goal hits */
61966+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61967+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61968 spinlock_t s_bal_lock;
61969 unsigned long s_mb_buddies_generated;
61970 unsigned long long s_mb_generation_time;
61971- atomic_t s_mb_lost_chunks;
61972- atomic_t s_mb_preallocated;
61973- atomic_t s_mb_discarded;
61974+ atomic_unchecked_t s_mb_lost_chunks;
61975+ atomic_unchecked_t s_mb_preallocated;
61976+ atomic_unchecked_t s_mb_discarded;
61977 atomic_t s_lock_busy;
61978
61979 /* locality groups */
61980diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61981index 8d1e602..abf497b 100644
61982--- a/fs/ext4/mballoc.c
61983+++ b/fs/ext4/mballoc.c
61984@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61985 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61986
61987 if (EXT4_SB(sb)->s_mb_stats)
61988- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61989+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61990
61991 break;
61992 }
61993@@ -2211,7 +2211,7 @@ repeat:
61994 ac->ac_status = AC_STATUS_CONTINUE;
61995 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61996 cr = 3;
61997- atomic_inc(&sbi->s_mb_lost_chunks);
61998+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61999 goto repeat;
62000 }
62001 }
62002@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
62003 if (sbi->s_mb_stats) {
62004 ext4_msg(sb, KERN_INFO,
62005 "mballoc: %u blocks %u reqs (%u success)",
62006- atomic_read(&sbi->s_bal_allocated),
62007- atomic_read(&sbi->s_bal_reqs),
62008- atomic_read(&sbi->s_bal_success));
62009+ atomic_read_unchecked(&sbi->s_bal_allocated),
62010+ atomic_read_unchecked(&sbi->s_bal_reqs),
62011+ atomic_read_unchecked(&sbi->s_bal_success));
62012 ext4_msg(sb, KERN_INFO,
62013 "mballoc: %u extents scanned, %u goal hits, "
62014 "%u 2^N hits, %u breaks, %u lost",
62015- atomic_read(&sbi->s_bal_ex_scanned),
62016- atomic_read(&sbi->s_bal_goals),
62017- atomic_read(&sbi->s_bal_2orders),
62018- atomic_read(&sbi->s_bal_breaks),
62019- atomic_read(&sbi->s_mb_lost_chunks));
62020+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62021+ atomic_read_unchecked(&sbi->s_bal_goals),
62022+ atomic_read_unchecked(&sbi->s_bal_2orders),
62023+ atomic_read_unchecked(&sbi->s_bal_breaks),
62024+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62025 ext4_msg(sb, KERN_INFO,
62026 "mballoc: %lu generated and it took %Lu",
62027 sbi->s_mb_buddies_generated,
62028 sbi->s_mb_generation_time);
62029 ext4_msg(sb, KERN_INFO,
62030 "mballoc: %u preallocated, %u discarded",
62031- atomic_read(&sbi->s_mb_preallocated),
62032- atomic_read(&sbi->s_mb_discarded));
62033+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62034+ atomic_read_unchecked(&sbi->s_mb_discarded));
62035 }
62036
62037 free_percpu(sbi->s_locality_groups);
62038@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62039 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62040
62041 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62042- atomic_inc(&sbi->s_bal_reqs);
62043- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62044+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62045+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62046 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62047- atomic_inc(&sbi->s_bal_success);
62048- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62049+ atomic_inc_unchecked(&sbi->s_bal_success);
62050+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62051 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62052 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62053- atomic_inc(&sbi->s_bal_goals);
62054+ atomic_inc_unchecked(&sbi->s_bal_goals);
62055 if (ac->ac_found > sbi->s_mb_max_to_scan)
62056- atomic_inc(&sbi->s_bal_breaks);
62057+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62058 }
62059
62060 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62061@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62062 trace_ext4_mb_new_inode_pa(ac, pa);
62063
62064 ext4_mb_use_inode_pa(ac, pa);
62065- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62066+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62067
62068 ei = EXT4_I(ac->ac_inode);
62069 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62070@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62071 trace_ext4_mb_new_group_pa(ac, pa);
62072
62073 ext4_mb_use_group_pa(ac, pa);
62074- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62075+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62076
62077 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62078 lg = ac->ac_lg;
62079@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62080 * from the bitmap and continue.
62081 */
62082 }
62083- atomic_add(free, &sbi->s_mb_discarded);
62084+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62085
62086 return err;
62087 }
62088@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62089 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62090 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62091 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62092- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62093+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62094 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62095
62096 return 0;
62097diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62098index 8313ca3..8a37d08 100644
62099--- a/fs/ext4/mmp.c
62100+++ b/fs/ext4/mmp.c
62101@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62102 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62103 const char *function, unsigned int line, const char *msg)
62104 {
62105- __ext4_warning(sb, function, line, msg);
62106+ __ext4_warning(sb, function, line, "%s", msg);
62107 __ext4_warning(sb, function, line,
62108 "MMP failure info: last update time: %llu, last update "
62109 "node: %s, last update device: %s\n",
62110diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
62111index 8a8ec62..1b02de5 100644
62112--- a/fs/ext4/resize.c
62113+++ b/fs/ext4/resize.c
62114@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62115
62116 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
62117 for (count2 = count; count > 0; count -= count2, block += count2) {
62118- ext4_fsblk_t start;
62119+ ext4_fsblk_t start, diff;
62120 struct buffer_head *bh;
62121 ext4_group_t group;
62122 int err;
62123@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62124 start = ext4_group_first_block_no(sb, group);
62125 group -= flex_gd->groups[0].group;
62126
62127- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
62128- if (count2 > count)
62129- count2 = count;
62130-
62131 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
62132 BUG_ON(flex_gd->count > 1);
62133 continue;
62134@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62135 err = ext4_journal_get_write_access(handle, bh);
62136 if (err)
62137 return err;
62138+
62139+ diff = block - start;
62140+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
62141+ if (count2 > count)
62142+ count2 = count;
62143+
62144 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
62145- block - start, count2);
62146- ext4_set_bits(bh->b_data, block - start, count2);
62147+ diff, count2);
62148+ ext4_set_bits(bh->b_data, diff, count2);
62149
62150 err = ext4_handle_dirty_metadata(handle, NULL, bh);
62151 if (unlikely(err))
62152diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62153index e061e66..87bc092 100644
62154--- a/fs/ext4/super.c
62155+++ b/fs/ext4/super.c
62156@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62157 }
62158
62159 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62160-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62161+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62162 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62163
62164 #ifdef CONFIG_QUOTA
62165@@ -2443,7 +2443,7 @@ struct ext4_attr {
62166 int offset;
62167 int deprecated_val;
62168 } u;
62169-};
62170+} __do_const;
62171
62172 static int parse_strtoull(const char *buf,
62173 unsigned long long max, unsigned long long *value)
62174diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62175index 1e09fc7..0400dd4 100644
62176--- a/fs/ext4/xattr.c
62177+++ b/fs/ext4/xattr.c
62178@@ -399,7 +399,7 @@ static int
62179 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62180 char *buffer, size_t buffer_size)
62181 {
62182- size_t rest = buffer_size;
62183+ size_t rest = buffer_size, total_size = 0;
62184
62185 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62186 const struct xattr_handler *handler =
62187@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62188 buffer += size;
62189 }
62190 rest -= size;
62191+ total_size += size;
62192 }
62193 }
62194- return buffer_size - rest;
62195+ return total_size;
62196 }
62197
62198 static int
62199diff --git a/fs/fcntl.c b/fs/fcntl.c
62200index ee85cd4..9dd0d20 100644
62201--- a/fs/fcntl.c
62202+++ b/fs/fcntl.c
62203@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62204 int force)
62205 {
62206 security_file_set_fowner(filp);
62207+ if (gr_handle_chroot_fowner(pid, type))
62208+ return;
62209+ if (gr_check_protected_task_fowner(pid, type))
62210+ return;
62211 f_modown(filp, pid, type, force);
62212 }
62213 EXPORT_SYMBOL(__f_setown);
62214diff --git a/fs/fhandle.c b/fs/fhandle.c
62215index 999ff5c..2281df9 100644
62216--- a/fs/fhandle.c
62217+++ b/fs/fhandle.c
62218@@ -8,6 +8,7 @@
62219 #include <linux/fs_struct.h>
62220 #include <linux/fsnotify.h>
62221 #include <linux/personality.h>
62222+#include <linux/grsecurity.h>
62223 #include <asm/uaccess.h>
62224 #include "internal.h"
62225 #include "mount.h"
62226@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62227 } else
62228 retval = 0;
62229 /* copy the mount id */
62230- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62231- sizeof(*mnt_id)) ||
62232+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62233 copy_to_user(ufh, handle,
62234 sizeof(struct file_handle) + handle_bytes))
62235 retval = -EFAULT;
62236@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62237 * the directory. Ideally we would like CAP_DAC_SEARCH.
62238 * But we don't have that
62239 */
62240- if (!capable(CAP_DAC_READ_SEARCH)) {
62241+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62242 retval = -EPERM;
62243 goto out_err;
62244 }
62245@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62246 goto out_err;
62247 }
62248 /* copy the full handle */
62249- if (copy_from_user(handle, ufh,
62250- sizeof(struct file_handle) +
62251+ *handle = f_handle;
62252+ if (copy_from_user(&handle->f_handle,
62253+ &ufh->f_handle,
62254 f_handle.handle_bytes)) {
62255 retval = -EFAULT;
62256 goto out_handle;
62257diff --git a/fs/file.c b/fs/file.c
62258index ee738ea..f6c15629 100644
62259--- a/fs/file.c
62260+++ b/fs/file.c
62261@@ -16,6 +16,7 @@
62262 #include <linux/slab.h>
62263 #include <linux/vmalloc.h>
62264 #include <linux/file.h>
62265+#include <linux/security.h>
62266 #include <linux/fdtable.h>
62267 #include <linux/bitops.h>
62268 #include <linux/interrupt.h>
62269@@ -139,7 +140,7 @@ out:
62270 * Return <0 error code on error; 1 on successful completion.
62271 * The files->file_lock should be held on entry, and will be held on exit.
62272 */
62273-static int expand_fdtable(struct files_struct *files, int nr)
62274+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62275 __releases(files->file_lock)
62276 __acquires(files->file_lock)
62277 {
62278@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62279 * expanded and execution may have blocked.
62280 * The files->file_lock should be held on entry, and will be held on exit.
62281 */
62282-static int expand_files(struct files_struct *files, int nr)
62283+static int expand_files(struct files_struct *files, unsigned int nr)
62284 {
62285 struct fdtable *fdt;
62286
62287@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62288 if (!file)
62289 return __close_fd(files, fd);
62290
62291+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62292 if (fd >= rlimit(RLIMIT_NOFILE))
62293 return -EBADF;
62294
62295@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62296 if (unlikely(oldfd == newfd))
62297 return -EINVAL;
62298
62299+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62300 if (newfd >= rlimit(RLIMIT_NOFILE))
62301 return -EBADF;
62302
62303@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62304 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62305 {
62306 int err;
62307+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62308 if (from >= rlimit(RLIMIT_NOFILE))
62309 return -EINVAL;
62310 err = alloc_fd(from, flags);
62311diff --git a/fs/filesystems.c b/fs/filesystems.c
62312index 5797d45..7d7d79a 100644
62313--- a/fs/filesystems.c
62314+++ b/fs/filesystems.c
62315@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62316 int len = dot ? dot - name : strlen(name);
62317
62318 fs = __get_fs_type(name, len);
62319+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62320+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62321+#else
62322 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62323+#endif
62324 fs = __get_fs_type(name, len);
62325
62326 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62327diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62328index 7dca743..1ff87ae 100644
62329--- a/fs/fs_struct.c
62330+++ b/fs/fs_struct.c
62331@@ -4,6 +4,7 @@
62332 #include <linux/path.h>
62333 #include <linux/slab.h>
62334 #include <linux/fs_struct.h>
62335+#include <linux/grsecurity.h>
62336 #include "internal.h"
62337
62338 /*
62339@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62340 struct path old_root;
62341
62342 path_get(path);
62343+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
62344 spin_lock(&fs->lock);
62345 write_seqcount_begin(&fs->seq);
62346 old_root = fs->root;
62347 fs->root = *path;
62348+ gr_set_chroot_entries(current, path);
62349 write_seqcount_end(&fs->seq);
62350 spin_unlock(&fs->lock);
62351- if (old_root.dentry)
62352+ if (old_root.dentry) {
62353+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
62354 path_put(&old_root);
62355+ }
62356 }
62357
62358 /*
62359@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62360 int hits = 0;
62361 spin_lock(&fs->lock);
62362 write_seqcount_begin(&fs->seq);
62363+ /* this root replacement is only done by pivot_root,
62364+ leave grsec's chroot tagging alone for this task
62365+ so that a pivoted root isn't treated as a chroot
62366+ */
62367 hits += replace_path(&fs->root, old_root, new_root);
62368 hits += replace_path(&fs->pwd, old_root, new_root);
62369 write_seqcount_end(&fs->seq);
62370@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62371
62372 void free_fs_struct(struct fs_struct *fs)
62373 {
62374+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62375 path_put(&fs->root);
62376 path_put(&fs->pwd);
62377 kmem_cache_free(fs_cachep, fs);
62378@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
62379 task_lock(tsk);
62380 spin_lock(&fs->lock);
62381 tsk->fs = NULL;
62382- kill = !--fs->users;
62383+ gr_clear_chroot_entries(tsk);
62384+ kill = !atomic_dec_return(&fs->users);
62385 spin_unlock(&fs->lock);
62386 task_unlock(tsk);
62387 if (kill)
62388@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62389 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62390 /* We don't need to lock fs - think why ;-) */
62391 if (fs) {
62392- fs->users = 1;
62393+ atomic_set(&fs->users, 1);
62394 fs->in_exec = 0;
62395 spin_lock_init(&fs->lock);
62396 seqcount_init(&fs->seq);
62397@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62398 spin_lock(&old->lock);
62399 fs->root = old->root;
62400 path_get(&fs->root);
62401+ /* instead of calling gr_set_chroot_entries here,
62402+ we call it from every caller of this function
62403+ */
62404 fs->pwd = old->pwd;
62405 path_get(&fs->pwd);
62406 spin_unlock(&old->lock);
62407+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62408 }
62409 return fs;
62410 }
62411@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
62412
62413 task_lock(current);
62414 spin_lock(&fs->lock);
62415- kill = !--fs->users;
62416+ kill = !atomic_dec_return(&fs->users);
62417 current->fs = new_fs;
62418+ gr_set_chroot_entries(current, &new_fs->root);
62419 spin_unlock(&fs->lock);
62420 task_unlock(current);
62421
62422@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62423
62424 int current_umask(void)
62425 {
62426- return current->fs->umask;
62427+ return current->fs->umask | gr_acl_umask();
62428 }
62429 EXPORT_SYMBOL(current_umask);
62430
62431 /* to be mentioned only in INIT_TASK */
62432 struct fs_struct init_fs = {
62433- .users = 1,
62434+ .users = ATOMIC_INIT(1),
62435 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62436 .seq = SEQCNT_ZERO(init_fs.seq),
62437 .umask = 0022,
62438diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62439index 89acec7..a575262 100644
62440--- a/fs/fscache/cookie.c
62441+++ b/fs/fscache/cookie.c
62442@@ -19,7 +19,7 @@
62443
62444 struct kmem_cache *fscache_cookie_jar;
62445
62446-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62447+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62448
62449 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62450 static int fscache_alloc_object(struct fscache_cache *cache,
62451@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62452 parent ? (char *) parent->def->name : "<no-parent>",
62453 def->name, netfs_data, enable);
62454
62455- fscache_stat(&fscache_n_acquires);
62456+ fscache_stat_unchecked(&fscache_n_acquires);
62457
62458 /* if there's no parent cookie, then we don't create one here either */
62459 if (!parent) {
62460- fscache_stat(&fscache_n_acquires_null);
62461+ fscache_stat_unchecked(&fscache_n_acquires_null);
62462 _leave(" [no parent]");
62463 return NULL;
62464 }
62465@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62466 /* allocate and initialise a cookie */
62467 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62468 if (!cookie) {
62469- fscache_stat(&fscache_n_acquires_oom);
62470+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62471 _leave(" [ENOMEM]");
62472 return NULL;
62473 }
62474@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62475
62476 switch (cookie->def->type) {
62477 case FSCACHE_COOKIE_TYPE_INDEX:
62478- fscache_stat(&fscache_n_cookie_index);
62479+ fscache_stat_unchecked(&fscache_n_cookie_index);
62480 break;
62481 case FSCACHE_COOKIE_TYPE_DATAFILE:
62482- fscache_stat(&fscache_n_cookie_data);
62483+ fscache_stat_unchecked(&fscache_n_cookie_data);
62484 break;
62485 default:
62486- fscache_stat(&fscache_n_cookie_special);
62487+ fscache_stat_unchecked(&fscache_n_cookie_special);
62488 break;
62489 }
62490
62491@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62492 } else {
62493 atomic_dec(&parent->n_children);
62494 __fscache_cookie_put(cookie);
62495- fscache_stat(&fscache_n_acquires_nobufs);
62496+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62497 _leave(" = NULL");
62498 return NULL;
62499 }
62500@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62501 }
62502 }
62503
62504- fscache_stat(&fscache_n_acquires_ok);
62505+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62506 _leave(" = %p", cookie);
62507 return cookie;
62508 }
62509@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62510 cache = fscache_select_cache_for_object(cookie->parent);
62511 if (!cache) {
62512 up_read(&fscache_addremove_sem);
62513- fscache_stat(&fscache_n_acquires_no_cache);
62514+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62515 _leave(" = -ENOMEDIUM [no cache]");
62516 return -ENOMEDIUM;
62517 }
62518@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62519 object = cache->ops->alloc_object(cache, cookie);
62520 fscache_stat_d(&fscache_n_cop_alloc_object);
62521 if (IS_ERR(object)) {
62522- fscache_stat(&fscache_n_object_no_alloc);
62523+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62524 ret = PTR_ERR(object);
62525 goto error;
62526 }
62527
62528- fscache_stat(&fscache_n_object_alloc);
62529+ fscache_stat_unchecked(&fscache_n_object_alloc);
62530
62531- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62532+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62533
62534 _debug("ALLOC OBJ%x: %s {%lx}",
62535 object->debug_id, cookie->def->name, object->events);
62536@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62537
62538 _enter("{%s}", cookie->def->name);
62539
62540- fscache_stat(&fscache_n_invalidates);
62541+ fscache_stat_unchecked(&fscache_n_invalidates);
62542
62543 /* Only permit invalidation of data files. Invalidating an index will
62544 * require the caller to release all its attachments to the tree rooted
62545@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62546 {
62547 struct fscache_object *object;
62548
62549- fscache_stat(&fscache_n_updates);
62550+ fscache_stat_unchecked(&fscache_n_updates);
62551
62552 if (!cookie) {
62553- fscache_stat(&fscache_n_updates_null);
62554+ fscache_stat_unchecked(&fscache_n_updates_null);
62555 _leave(" [no cookie]");
62556 return;
62557 }
62558@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62559 */
62560 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62561 {
62562- fscache_stat(&fscache_n_relinquishes);
62563+ fscache_stat_unchecked(&fscache_n_relinquishes);
62564 if (retire)
62565- fscache_stat(&fscache_n_relinquishes_retire);
62566+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62567
62568 if (!cookie) {
62569- fscache_stat(&fscache_n_relinquishes_null);
62570+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62571 _leave(" [no cookie]");
62572 return;
62573 }
62574@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62575 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62576 goto inconsistent;
62577
62578- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62579+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62580
62581 __fscache_use_cookie(cookie);
62582 if (fscache_submit_op(object, op) < 0)
62583diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62584index 7872a62..d91b19f 100644
62585--- a/fs/fscache/internal.h
62586+++ b/fs/fscache/internal.h
62587@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62588 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62589 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62590 struct fscache_operation *,
62591- atomic_t *,
62592- atomic_t *,
62593+ atomic_unchecked_t *,
62594+ atomic_unchecked_t *,
62595 void (*)(struct fscache_operation *));
62596 extern void fscache_invalidate_writes(struct fscache_cookie *);
62597
62598@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62599 * stats.c
62600 */
62601 #ifdef CONFIG_FSCACHE_STATS
62602-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62603-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62604+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62605+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62606
62607-extern atomic_t fscache_n_op_pend;
62608-extern atomic_t fscache_n_op_run;
62609-extern atomic_t fscache_n_op_enqueue;
62610-extern atomic_t fscache_n_op_deferred_release;
62611-extern atomic_t fscache_n_op_release;
62612-extern atomic_t fscache_n_op_gc;
62613-extern atomic_t fscache_n_op_cancelled;
62614-extern atomic_t fscache_n_op_rejected;
62615+extern atomic_unchecked_t fscache_n_op_pend;
62616+extern atomic_unchecked_t fscache_n_op_run;
62617+extern atomic_unchecked_t fscache_n_op_enqueue;
62618+extern atomic_unchecked_t fscache_n_op_deferred_release;
62619+extern atomic_unchecked_t fscache_n_op_release;
62620+extern atomic_unchecked_t fscache_n_op_gc;
62621+extern atomic_unchecked_t fscache_n_op_cancelled;
62622+extern atomic_unchecked_t fscache_n_op_rejected;
62623
62624-extern atomic_t fscache_n_attr_changed;
62625-extern atomic_t fscache_n_attr_changed_ok;
62626-extern atomic_t fscache_n_attr_changed_nobufs;
62627-extern atomic_t fscache_n_attr_changed_nomem;
62628-extern atomic_t fscache_n_attr_changed_calls;
62629+extern atomic_unchecked_t fscache_n_attr_changed;
62630+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62631+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62632+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62633+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62634
62635-extern atomic_t fscache_n_allocs;
62636-extern atomic_t fscache_n_allocs_ok;
62637-extern atomic_t fscache_n_allocs_wait;
62638-extern atomic_t fscache_n_allocs_nobufs;
62639-extern atomic_t fscache_n_allocs_intr;
62640-extern atomic_t fscache_n_allocs_object_dead;
62641-extern atomic_t fscache_n_alloc_ops;
62642-extern atomic_t fscache_n_alloc_op_waits;
62643+extern atomic_unchecked_t fscache_n_allocs;
62644+extern atomic_unchecked_t fscache_n_allocs_ok;
62645+extern atomic_unchecked_t fscache_n_allocs_wait;
62646+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62647+extern atomic_unchecked_t fscache_n_allocs_intr;
62648+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62649+extern atomic_unchecked_t fscache_n_alloc_ops;
62650+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62651
62652-extern atomic_t fscache_n_retrievals;
62653-extern atomic_t fscache_n_retrievals_ok;
62654-extern atomic_t fscache_n_retrievals_wait;
62655-extern atomic_t fscache_n_retrievals_nodata;
62656-extern atomic_t fscache_n_retrievals_nobufs;
62657-extern atomic_t fscache_n_retrievals_intr;
62658-extern atomic_t fscache_n_retrievals_nomem;
62659-extern atomic_t fscache_n_retrievals_object_dead;
62660-extern atomic_t fscache_n_retrieval_ops;
62661-extern atomic_t fscache_n_retrieval_op_waits;
62662+extern atomic_unchecked_t fscache_n_retrievals;
62663+extern atomic_unchecked_t fscache_n_retrievals_ok;
62664+extern atomic_unchecked_t fscache_n_retrievals_wait;
62665+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62666+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62667+extern atomic_unchecked_t fscache_n_retrievals_intr;
62668+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62669+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62670+extern atomic_unchecked_t fscache_n_retrieval_ops;
62671+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62672
62673-extern atomic_t fscache_n_stores;
62674-extern atomic_t fscache_n_stores_ok;
62675-extern atomic_t fscache_n_stores_again;
62676-extern atomic_t fscache_n_stores_nobufs;
62677-extern atomic_t fscache_n_stores_oom;
62678-extern atomic_t fscache_n_store_ops;
62679-extern atomic_t fscache_n_store_calls;
62680-extern atomic_t fscache_n_store_pages;
62681-extern atomic_t fscache_n_store_radix_deletes;
62682-extern atomic_t fscache_n_store_pages_over_limit;
62683+extern atomic_unchecked_t fscache_n_stores;
62684+extern atomic_unchecked_t fscache_n_stores_ok;
62685+extern atomic_unchecked_t fscache_n_stores_again;
62686+extern atomic_unchecked_t fscache_n_stores_nobufs;
62687+extern atomic_unchecked_t fscache_n_stores_oom;
62688+extern atomic_unchecked_t fscache_n_store_ops;
62689+extern atomic_unchecked_t fscache_n_store_calls;
62690+extern atomic_unchecked_t fscache_n_store_pages;
62691+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62692+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62693
62694-extern atomic_t fscache_n_store_vmscan_not_storing;
62695-extern atomic_t fscache_n_store_vmscan_gone;
62696-extern atomic_t fscache_n_store_vmscan_busy;
62697-extern atomic_t fscache_n_store_vmscan_cancelled;
62698-extern atomic_t fscache_n_store_vmscan_wait;
62699+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62700+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62701+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62702+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62703+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62704
62705-extern atomic_t fscache_n_marks;
62706-extern atomic_t fscache_n_uncaches;
62707+extern atomic_unchecked_t fscache_n_marks;
62708+extern atomic_unchecked_t fscache_n_uncaches;
62709
62710-extern atomic_t fscache_n_acquires;
62711-extern atomic_t fscache_n_acquires_null;
62712-extern atomic_t fscache_n_acquires_no_cache;
62713-extern atomic_t fscache_n_acquires_ok;
62714-extern atomic_t fscache_n_acquires_nobufs;
62715-extern atomic_t fscache_n_acquires_oom;
62716+extern atomic_unchecked_t fscache_n_acquires;
62717+extern atomic_unchecked_t fscache_n_acquires_null;
62718+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62719+extern atomic_unchecked_t fscache_n_acquires_ok;
62720+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62721+extern atomic_unchecked_t fscache_n_acquires_oom;
62722
62723-extern atomic_t fscache_n_invalidates;
62724-extern atomic_t fscache_n_invalidates_run;
62725+extern atomic_unchecked_t fscache_n_invalidates;
62726+extern atomic_unchecked_t fscache_n_invalidates_run;
62727
62728-extern atomic_t fscache_n_updates;
62729-extern atomic_t fscache_n_updates_null;
62730-extern atomic_t fscache_n_updates_run;
62731+extern atomic_unchecked_t fscache_n_updates;
62732+extern atomic_unchecked_t fscache_n_updates_null;
62733+extern atomic_unchecked_t fscache_n_updates_run;
62734
62735-extern atomic_t fscache_n_relinquishes;
62736-extern atomic_t fscache_n_relinquishes_null;
62737-extern atomic_t fscache_n_relinquishes_waitcrt;
62738-extern atomic_t fscache_n_relinquishes_retire;
62739+extern atomic_unchecked_t fscache_n_relinquishes;
62740+extern atomic_unchecked_t fscache_n_relinquishes_null;
62741+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62742+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62743
62744-extern atomic_t fscache_n_cookie_index;
62745-extern atomic_t fscache_n_cookie_data;
62746-extern atomic_t fscache_n_cookie_special;
62747+extern atomic_unchecked_t fscache_n_cookie_index;
62748+extern atomic_unchecked_t fscache_n_cookie_data;
62749+extern atomic_unchecked_t fscache_n_cookie_special;
62750
62751-extern atomic_t fscache_n_object_alloc;
62752-extern atomic_t fscache_n_object_no_alloc;
62753-extern atomic_t fscache_n_object_lookups;
62754-extern atomic_t fscache_n_object_lookups_negative;
62755-extern atomic_t fscache_n_object_lookups_positive;
62756-extern atomic_t fscache_n_object_lookups_timed_out;
62757-extern atomic_t fscache_n_object_created;
62758-extern atomic_t fscache_n_object_avail;
62759-extern atomic_t fscache_n_object_dead;
62760+extern atomic_unchecked_t fscache_n_object_alloc;
62761+extern atomic_unchecked_t fscache_n_object_no_alloc;
62762+extern atomic_unchecked_t fscache_n_object_lookups;
62763+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62764+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62765+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62766+extern atomic_unchecked_t fscache_n_object_created;
62767+extern atomic_unchecked_t fscache_n_object_avail;
62768+extern atomic_unchecked_t fscache_n_object_dead;
62769
62770-extern atomic_t fscache_n_checkaux_none;
62771-extern atomic_t fscache_n_checkaux_okay;
62772-extern atomic_t fscache_n_checkaux_update;
62773-extern atomic_t fscache_n_checkaux_obsolete;
62774+extern atomic_unchecked_t fscache_n_checkaux_none;
62775+extern atomic_unchecked_t fscache_n_checkaux_okay;
62776+extern atomic_unchecked_t fscache_n_checkaux_update;
62777+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62778
62779 extern atomic_t fscache_n_cop_alloc_object;
62780 extern atomic_t fscache_n_cop_lookup_object;
62781@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62782 atomic_inc(stat);
62783 }
62784
62785+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62786+{
62787+ atomic_inc_unchecked(stat);
62788+}
62789+
62790 static inline void fscache_stat_d(atomic_t *stat)
62791 {
62792 atomic_dec(stat);
62793@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62794
62795 #define __fscache_stat(stat) (NULL)
62796 #define fscache_stat(stat) do {} while (0)
62797+#define fscache_stat_unchecked(stat) do {} while (0)
62798 #define fscache_stat_d(stat) do {} while (0)
62799 #endif
62800
62801diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62802index da032da..0076ce7 100644
62803--- a/fs/fscache/object.c
62804+++ b/fs/fscache/object.c
62805@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62806 _debug("LOOKUP \"%s\" in \"%s\"",
62807 cookie->def->name, object->cache->tag->name);
62808
62809- fscache_stat(&fscache_n_object_lookups);
62810+ fscache_stat_unchecked(&fscache_n_object_lookups);
62811 fscache_stat(&fscache_n_cop_lookup_object);
62812 ret = object->cache->ops->lookup_object(object);
62813 fscache_stat_d(&fscache_n_cop_lookup_object);
62814@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62815 if (ret == -ETIMEDOUT) {
62816 /* probably stuck behind another object, so move this one to
62817 * the back of the queue */
62818- fscache_stat(&fscache_n_object_lookups_timed_out);
62819+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62820 _leave(" [timeout]");
62821 return NO_TRANSIT;
62822 }
62823@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62824 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62825
62826 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62827- fscache_stat(&fscache_n_object_lookups_negative);
62828+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62829
62830 /* Allow write requests to begin stacking up and read requests to begin
62831 * returning ENODATA.
62832@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62833 /* if we were still looking up, then we must have a positive lookup
62834 * result, in which case there may be data available */
62835 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62836- fscache_stat(&fscache_n_object_lookups_positive);
62837+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62838
62839 /* We do (presumably) have data */
62840 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62841@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62842 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62843 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62844 } else {
62845- fscache_stat(&fscache_n_object_created);
62846+ fscache_stat_unchecked(&fscache_n_object_created);
62847 }
62848
62849 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62850@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62851 fscache_stat_d(&fscache_n_cop_lookup_complete);
62852
62853 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62854- fscache_stat(&fscache_n_object_avail);
62855+ fscache_stat_unchecked(&fscache_n_object_avail);
62856
62857 _leave("");
62858 return transit_to(JUMPSTART_DEPS);
62859@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62860
62861 /* this just shifts the object release to the work processor */
62862 fscache_put_object(object);
62863- fscache_stat(&fscache_n_object_dead);
62864+ fscache_stat_unchecked(&fscache_n_object_dead);
62865
62866 _leave("");
62867 return transit_to(OBJECT_DEAD);
62868@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62869 enum fscache_checkaux result;
62870
62871 if (!object->cookie->def->check_aux) {
62872- fscache_stat(&fscache_n_checkaux_none);
62873+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62874 return FSCACHE_CHECKAUX_OKAY;
62875 }
62876
62877@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62878 switch (result) {
62879 /* entry okay as is */
62880 case FSCACHE_CHECKAUX_OKAY:
62881- fscache_stat(&fscache_n_checkaux_okay);
62882+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62883 break;
62884
62885 /* entry requires update */
62886 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62887- fscache_stat(&fscache_n_checkaux_update);
62888+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62889 break;
62890
62891 /* entry requires deletion */
62892 case FSCACHE_CHECKAUX_OBSOLETE:
62893- fscache_stat(&fscache_n_checkaux_obsolete);
62894+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62895 break;
62896
62897 default:
62898@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62899 {
62900 const struct fscache_state *s;
62901
62902- fscache_stat(&fscache_n_invalidates_run);
62903+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62904 fscache_stat(&fscache_n_cop_invalidate_object);
62905 s = _fscache_invalidate_object(object, event);
62906 fscache_stat_d(&fscache_n_cop_invalidate_object);
62907@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62908 {
62909 _enter("{OBJ%x},%d", object->debug_id, event);
62910
62911- fscache_stat(&fscache_n_updates_run);
62912+ fscache_stat_unchecked(&fscache_n_updates_run);
62913 fscache_stat(&fscache_n_cop_update_object);
62914 object->cache->ops->update_object(object);
62915 fscache_stat_d(&fscache_n_cop_update_object);
62916diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62917index e7b87a0..a85d47a 100644
62918--- a/fs/fscache/operation.c
62919+++ b/fs/fscache/operation.c
62920@@ -17,7 +17,7 @@
62921 #include <linux/slab.h>
62922 #include "internal.h"
62923
62924-atomic_t fscache_op_debug_id;
62925+atomic_unchecked_t fscache_op_debug_id;
62926 EXPORT_SYMBOL(fscache_op_debug_id);
62927
62928 /**
62929@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62930 ASSERTCMP(atomic_read(&op->usage), >, 0);
62931 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62932
62933- fscache_stat(&fscache_n_op_enqueue);
62934+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62935 switch (op->flags & FSCACHE_OP_TYPE) {
62936 case FSCACHE_OP_ASYNC:
62937 _debug("queue async");
62938@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62939 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62940 if (op->processor)
62941 fscache_enqueue_operation(op);
62942- fscache_stat(&fscache_n_op_run);
62943+ fscache_stat_unchecked(&fscache_n_op_run);
62944 }
62945
62946 /*
62947@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62948 if (object->n_in_progress > 0) {
62949 atomic_inc(&op->usage);
62950 list_add_tail(&op->pend_link, &object->pending_ops);
62951- fscache_stat(&fscache_n_op_pend);
62952+ fscache_stat_unchecked(&fscache_n_op_pend);
62953 } else if (!list_empty(&object->pending_ops)) {
62954 atomic_inc(&op->usage);
62955 list_add_tail(&op->pend_link, &object->pending_ops);
62956- fscache_stat(&fscache_n_op_pend);
62957+ fscache_stat_unchecked(&fscache_n_op_pend);
62958 fscache_start_operations(object);
62959 } else {
62960 ASSERTCMP(object->n_in_progress, ==, 0);
62961@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62962 object->n_exclusive++; /* reads and writes must wait */
62963 atomic_inc(&op->usage);
62964 list_add_tail(&op->pend_link, &object->pending_ops);
62965- fscache_stat(&fscache_n_op_pend);
62966+ fscache_stat_unchecked(&fscache_n_op_pend);
62967 ret = 0;
62968 } else {
62969 /* If we're in any other state, there must have been an I/O
62970@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62971 if (object->n_exclusive > 0) {
62972 atomic_inc(&op->usage);
62973 list_add_tail(&op->pend_link, &object->pending_ops);
62974- fscache_stat(&fscache_n_op_pend);
62975+ fscache_stat_unchecked(&fscache_n_op_pend);
62976 } else if (!list_empty(&object->pending_ops)) {
62977 atomic_inc(&op->usage);
62978 list_add_tail(&op->pend_link, &object->pending_ops);
62979- fscache_stat(&fscache_n_op_pend);
62980+ fscache_stat_unchecked(&fscache_n_op_pend);
62981 fscache_start_operations(object);
62982 } else {
62983 ASSERTCMP(object->n_exclusive, ==, 0);
62984@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62985 object->n_ops++;
62986 atomic_inc(&op->usage);
62987 list_add_tail(&op->pend_link, &object->pending_ops);
62988- fscache_stat(&fscache_n_op_pend);
62989+ fscache_stat_unchecked(&fscache_n_op_pend);
62990 ret = 0;
62991 } else if (fscache_object_is_dying(object)) {
62992- fscache_stat(&fscache_n_op_rejected);
62993+ fscache_stat_unchecked(&fscache_n_op_rejected);
62994 op->state = FSCACHE_OP_ST_CANCELLED;
62995 ret = -ENOBUFS;
62996 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62997@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62998 ret = -EBUSY;
62999 if (op->state == FSCACHE_OP_ST_PENDING) {
63000 ASSERT(!list_empty(&op->pend_link));
63001- fscache_stat(&fscache_n_op_cancelled);
63002+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63003 list_del_init(&op->pend_link);
63004 if (do_cancel)
63005 do_cancel(op);
63006@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63007 while (!list_empty(&object->pending_ops)) {
63008 op = list_entry(object->pending_ops.next,
63009 struct fscache_operation, pend_link);
63010- fscache_stat(&fscache_n_op_cancelled);
63011+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63012 list_del_init(&op->pend_link);
63013
63014 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63015@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63016 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63017 op->state = FSCACHE_OP_ST_DEAD;
63018
63019- fscache_stat(&fscache_n_op_release);
63020+ fscache_stat_unchecked(&fscache_n_op_release);
63021
63022 if (op->release) {
63023 op->release(op);
63024@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63025 * lock, and defer it otherwise */
63026 if (!spin_trylock(&object->lock)) {
63027 _debug("defer put");
63028- fscache_stat(&fscache_n_op_deferred_release);
63029+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63030
63031 cache = object->cache;
63032 spin_lock(&cache->op_gc_list_lock);
63033@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63034
63035 _debug("GC DEFERRED REL OBJ%x OP%x",
63036 object->debug_id, op->debug_id);
63037- fscache_stat(&fscache_n_op_gc);
63038+ fscache_stat_unchecked(&fscache_n_op_gc);
63039
63040 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63041 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63042diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63043index de33b3f..8be4d29 100644
63044--- a/fs/fscache/page.c
63045+++ b/fs/fscache/page.c
63046@@ -74,7 +74,7 @@ try_again:
63047 val = radix_tree_lookup(&cookie->stores, page->index);
63048 if (!val) {
63049 rcu_read_unlock();
63050- fscache_stat(&fscache_n_store_vmscan_not_storing);
63051+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63052 __fscache_uncache_page(cookie, page);
63053 return true;
63054 }
63055@@ -104,11 +104,11 @@ try_again:
63056 spin_unlock(&cookie->stores_lock);
63057
63058 if (xpage) {
63059- fscache_stat(&fscache_n_store_vmscan_cancelled);
63060- fscache_stat(&fscache_n_store_radix_deletes);
63061+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63062+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63063 ASSERTCMP(xpage, ==, page);
63064 } else {
63065- fscache_stat(&fscache_n_store_vmscan_gone);
63066+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63067 }
63068
63069 wake_up_bit(&cookie->flags, 0);
63070@@ -123,11 +123,11 @@ page_busy:
63071 * sleeping on memory allocation, so we may need to impose a timeout
63072 * too. */
63073 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63074- fscache_stat(&fscache_n_store_vmscan_busy);
63075+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63076 return false;
63077 }
63078
63079- fscache_stat(&fscache_n_store_vmscan_wait);
63080+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63081 if (!release_page_wait_timeout(cookie, page))
63082 _debug("fscache writeout timeout page: %p{%lx}",
63083 page, page->index);
63084@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63085 FSCACHE_COOKIE_STORING_TAG);
63086 if (!radix_tree_tag_get(&cookie->stores, page->index,
63087 FSCACHE_COOKIE_PENDING_TAG)) {
63088- fscache_stat(&fscache_n_store_radix_deletes);
63089+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63090 xpage = radix_tree_delete(&cookie->stores, page->index);
63091 }
63092 spin_unlock(&cookie->stores_lock);
63093@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63094
63095 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63096
63097- fscache_stat(&fscache_n_attr_changed_calls);
63098+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63099
63100 if (fscache_object_is_active(object)) {
63101 fscache_stat(&fscache_n_cop_attr_changed);
63102@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63103
63104 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63105
63106- fscache_stat(&fscache_n_attr_changed);
63107+ fscache_stat_unchecked(&fscache_n_attr_changed);
63108
63109 op = kzalloc(sizeof(*op), GFP_KERNEL);
63110 if (!op) {
63111- fscache_stat(&fscache_n_attr_changed_nomem);
63112+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63113 _leave(" = -ENOMEM");
63114 return -ENOMEM;
63115 }
63116@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63117 if (fscache_submit_exclusive_op(object, op) < 0)
63118 goto nobufs_dec;
63119 spin_unlock(&cookie->lock);
63120- fscache_stat(&fscache_n_attr_changed_ok);
63121+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63122 fscache_put_operation(op);
63123 _leave(" = 0");
63124 return 0;
63125@@ -242,7 +242,7 @@ nobufs:
63126 kfree(op);
63127 if (wake_cookie)
63128 __fscache_wake_unused_cookie(cookie);
63129- fscache_stat(&fscache_n_attr_changed_nobufs);
63130+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63131 _leave(" = %d", -ENOBUFS);
63132 return -ENOBUFS;
63133 }
63134@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63135 /* allocate a retrieval operation and attempt to submit it */
63136 op = kzalloc(sizeof(*op), GFP_NOIO);
63137 if (!op) {
63138- fscache_stat(&fscache_n_retrievals_nomem);
63139+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63140 return NULL;
63141 }
63142
63143@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63144 return 0;
63145 }
63146
63147- fscache_stat(&fscache_n_retrievals_wait);
63148+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63149
63150 jif = jiffies;
63151 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63152 TASK_INTERRUPTIBLE) != 0) {
63153- fscache_stat(&fscache_n_retrievals_intr);
63154+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63155 _leave(" = -ERESTARTSYS");
63156 return -ERESTARTSYS;
63157 }
63158@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63159 */
63160 int fscache_wait_for_operation_activation(struct fscache_object *object,
63161 struct fscache_operation *op,
63162- atomic_t *stat_op_waits,
63163- atomic_t *stat_object_dead,
63164+ atomic_unchecked_t *stat_op_waits,
63165+ atomic_unchecked_t *stat_object_dead,
63166 void (*do_cancel)(struct fscache_operation *))
63167 {
63168 int ret;
63169@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63170
63171 _debug(">>> WT");
63172 if (stat_op_waits)
63173- fscache_stat(stat_op_waits);
63174+ fscache_stat_unchecked(stat_op_waits);
63175 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63176 TASK_INTERRUPTIBLE) != 0) {
63177 ret = fscache_cancel_op(op, do_cancel);
63178@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63179 check_if_dead:
63180 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63181 if (stat_object_dead)
63182- fscache_stat(stat_object_dead);
63183+ fscache_stat_unchecked(stat_object_dead);
63184 _leave(" = -ENOBUFS [cancelled]");
63185 return -ENOBUFS;
63186 }
63187@@ -381,7 +381,7 @@ check_if_dead:
63188 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63189 fscache_cancel_op(op, do_cancel);
63190 if (stat_object_dead)
63191- fscache_stat(stat_object_dead);
63192+ fscache_stat_unchecked(stat_object_dead);
63193 return -ENOBUFS;
63194 }
63195 return 0;
63196@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63197
63198 _enter("%p,%p,,,", cookie, page);
63199
63200- fscache_stat(&fscache_n_retrievals);
63201+ fscache_stat_unchecked(&fscache_n_retrievals);
63202
63203 if (hlist_empty(&cookie->backing_objects))
63204 goto nobufs;
63205@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63206 goto nobufs_unlock_dec;
63207 spin_unlock(&cookie->lock);
63208
63209- fscache_stat(&fscache_n_retrieval_ops);
63210+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63211
63212 /* pin the netfs read context in case we need to do the actual netfs
63213 * read because we've encountered a cache read failure */
63214@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63215
63216 error:
63217 if (ret == -ENOMEM)
63218- fscache_stat(&fscache_n_retrievals_nomem);
63219+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63220 else if (ret == -ERESTARTSYS)
63221- fscache_stat(&fscache_n_retrievals_intr);
63222+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63223 else if (ret == -ENODATA)
63224- fscache_stat(&fscache_n_retrievals_nodata);
63225+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63226 else if (ret < 0)
63227- fscache_stat(&fscache_n_retrievals_nobufs);
63228+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63229 else
63230- fscache_stat(&fscache_n_retrievals_ok);
63231+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63232
63233 fscache_put_retrieval(op);
63234 _leave(" = %d", ret);
63235@@ -505,7 +505,7 @@ nobufs_unlock:
63236 __fscache_wake_unused_cookie(cookie);
63237 kfree(op);
63238 nobufs:
63239- fscache_stat(&fscache_n_retrievals_nobufs);
63240+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63241 _leave(" = -ENOBUFS");
63242 return -ENOBUFS;
63243 }
63244@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63245
63246 _enter("%p,,%d,,,", cookie, *nr_pages);
63247
63248- fscache_stat(&fscache_n_retrievals);
63249+ fscache_stat_unchecked(&fscache_n_retrievals);
63250
63251 if (hlist_empty(&cookie->backing_objects))
63252 goto nobufs;
63253@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63254 goto nobufs_unlock_dec;
63255 spin_unlock(&cookie->lock);
63256
63257- fscache_stat(&fscache_n_retrieval_ops);
63258+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63259
63260 /* pin the netfs read context in case we need to do the actual netfs
63261 * read because we've encountered a cache read failure */
63262@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63263
63264 error:
63265 if (ret == -ENOMEM)
63266- fscache_stat(&fscache_n_retrievals_nomem);
63267+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63268 else if (ret == -ERESTARTSYS)
63269- fscache_stat(&fscache_n_retrievals_intr);
63270+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63271 else if (ret == -ENODATA)
63272- fscache_stat(&fscache_n_retrievals_nodata);
63273+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63274 else if (ret < 0)
63275- fscache_stat(&fscache_n_retrievals_nobufs);
63276+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63277 else
63278- fscache_stat(&fscache_n_retrievals_ok);
63279+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63280
63281 fscache_put_retrieval(op);
63282 _leave(" = %d", ret);
63283@@ -636,7 +636,7 @@ nobufs_unlock:
63284 if (wake_cookie)
63285 __fscache_wake_unused_cookie(cookie);
63286 nobufs:
63287- fscache_stat(&fscache_n_retrievals_nobufs);
63288+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63289 _leave(" = -ENOBUFS");
63290 return -ENOBUFS;
63291 }
63292@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63293
63294 _enter("%p,%p,,,", cookie, page);
63295
63296- fscache_stat(&fscache_n_allocs);
63297+ fscache_stat_unchecked(&fscache_n_allocs);
63298
63299 if (hlist_empty(&cookie->backing_objects))
63300 goto nobufs;
63301@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63302 goto nobufs_unlock_dec;
63303 spin_unlock(&cookie->lock);
63304
63305- fscache_stat(&fscache_n_alloc_ops);
63306+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63307
63308 ret = fscache_wait_for_operation_activation(
63309 object, &op->op,
63310@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63311
63312 error:
63313 if (ret == -ERESTARTSYS)
63314- fscache_stat(&fscache_n_allocs_intr);
63315+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63316 else if (ret < 0)
63317- fscache_stat(&fscache_n_allocs_nobufs);
63318+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63319 else
63320- fscache_stat(&fscache_n_allocs_ok);
63321+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63322
63323 fscache_put_retrieval(op);
63324 _leave(" = %d", ret);
63325@@ -730,7 +730,7 @@ nobufs_unlock:
63326 if (wake_cookie)
63327 __fscache_wake_unused_cookie(cookie);
63328 nobufs:
63329- fscache_stat(&fscache_n_allocs_nobufs);
63330+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63331 _leave(" = -ENOBUFS");
63332 return -ENOBUFS;
63333 }
63334@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63335
63336 spin_lock(&cookie->stores_lock);
63337
63338- fscache_stat(&fscache_n_store_calls);
63339+ fscache_stat_unchecked(&fscache_n_store_calls);
63340
63341 /* find a page to store */
63342 page = NULL;
63343@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63344 page = results[0];
63345 _debug("gang %d [%lx]", n, page->index);
63346 if (page->index > op->store_limit) {
63347- fscache_stat(&fscache_n_store_pages_over_limit);
63348+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63349 goto superseded;
63350 }
63351
63352@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63353 spin_unlock(&cookie->stores_lock);
63354 spin_unlock(&object->lock);
63355
63356- fscache_stat(&fscache_n_store_pages);
63357+ fscache_stat_unchecked(&fscache_n_store_pages);
63358 fscache_stat(&fscache_n_cop_write_page);
63359 ret = object->cache->ops->write_page(op, page);
63360 fscache_stat_d(&fscache_n_cop_write_page);
63361@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63362 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63363 ASSERT(PageFsCache(page));
63364
63365- fscache_stat(&fscache_n_stores);
63366+ fscache_stat_unchecked(&fscache_n_stores);
63367
63368 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63369 _leave(" = -ENOBUFS [invalidating]");
63370@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63371 spin_unlock(&cookie->stores_lock);
63372 spin_unlock(&object->lock);
63373
63374- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63375+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63376 op->store_limit = object->store_limit;
63377
63378 __fscache_use_cookie(cookie);
63379@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63380
63381 spin_unlock(&cookie->lock);
63382 radix_tree_preload_end();
63383- fscache_stat(&fscache_n_store_ops);
63384- fscache_stat(&fscache_n_stores_ok);
63385+ fscache_stat_unchecked(&fscache_n_store_ops);
63386+ fscache_stat_unchecked(&fscache_n_stores_ok);
63387
63388 /* the work queue now carries its own ref on the object */
63389 fscache_put_operation(&op->op);
63390@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63391 return 0;
63392
63393 already_queued:
63394- fscache_stat(&fscache_n_stores_again);
63395+ fscache_stat_unchecked(&fscache_n_stores_again);
63396 already_pending:
63397 spin_unlock(&cookie->stores_lock);
63398 spin_unlock(&object->lock);
63399 spin_unlock(&cookie->lock);
63400 radix_tree_preload_end();
63401 kfree(op);
63402- fscache_stat(&fscache_n_stores_ok);
63403+ fscache_stat_unchecked(&fscache_n_stores_ok);
63404 _leave(" = 0");
63405 return 0;
63406
63407@@ -1039,14 +1039,14 @@ nobufs:
63408 kfree(op);
63409 if (wake_cookie)
63410 __fscache_wake_unused_cookie(cookie);
63411- fscache_stat(&fscache_n_stores_nobufs);
63412+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63413 _leave(" = -ENOBUFS");
63414 return -ENOBUFS;
63415
63416 nomem_free:
63417 kfree(op);
63418 nomem:
63419- fscache_stat(&fscache_n_stores_oom);
63420+ fscache_stat_unchecked(&fscache_n_stores_oom);
63421 _leave(" = -ENOMEM");
63422 return -ENOMEM;
63423 }
63424@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63425 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63426 ASSERTCMP(page, !=, NULL);
63427
63428- fscache_stat(&fscache_n_uncaches);
63429+ fscache_stat_unchecked(&fscache_n_uncaches);
63430
63431 /* cache withdrawal may beat us to it */
63432 if (!PageFsCache(page))
63433@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63434 struct fscache_cookie *cookie = op->op.object->cookie;
63435
63436 #ifdef CONFIG_FSCACHE_STATS
63437- atomic_inc(&fscache_n_marks);
63438+ atomic_inc_unchecked(&fscache_n_marks);
63439 #endif
63440
63441 _debug("- mark %p{%lx}", page, page->index);
63442diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63443index 40d13c7..ddf52b9 100644
63444--- a/fs/fscache/stats.c
63445+++ b/fs/fscache/stats.c
63446@@ -18,99 +18,99 @@
63447 /*
63448 * operation counters
63449 */
63450-atomic_t fscache_n_op_pend;
63451-atomic_t fscache_n_op_run;
63452-atomic_t fscache_n_op_enqueue;
63453-atomic_t fscache_n_op_requeue;
63454-atomic_t fscache_n_op_deferred_release;
63455-atomic_t fscache_n_op_release;
63456-atomic_t fscache_n_op_gc;
63457-atomic_t fscache_n_op_cancelled;
63458-atomic_t fscache_n_op_rejected;
63459+atomic_unchecked_t fscache_n_op_pend;
63460+atomic_unchecked_t fscache_n_op_run;
63461+atomic_unchecked_t fscache_n_op_enqueue;
63462+atomic_unchecked_t fscache_n_op_requeue;
63463+atomic_unchecked_t fscache_n_op_deferred_release;
63464+atomic_unchecked_t fscache_n_op_release;
63465+atomic_unchecked_t fscache_n_op_gc;
63466+atomic_unchecked_t fscache_n_op_cancelled;
63467+atomic_unchecked_t fscache_n_op_rejected;
63468
63469-atomic_t fscache_n_attr_changed;
63470-atomic_t fscache_n_attr_changed_ok;
63471-atomic_t fscache_n_attr_changed_nobufs;
63472-atomic_t fscache_n_attr_changed_nomem;
63473-atomic_t fscache_n_attr_changed_calls;
63474+atomic_unchecked_t fscache_n_attr_changed;
63475+atomic_unchecked_t fscache_n_attr_changed_ok;
63476+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63477+atomic_unchecked_t fscache_n_attr_changed_nomem;
63478+atomic_unchecked_t fscache_n_attr_changed_calls;
63479
63480-atomic_t fscache_n_allocs;
63481-atomic_t fscache_n_allocs_ok;
63482-atomic_t fscache_n_allocs_wait;
63483-atomic_t fscache_n_allocs_nobufs;
63484-atomic_t fscache_n_allocs_intr;
63485-atomic_t fscache_n_allocs_object_dead;
63486-atomic_t fscache_n_alloc_ops;
63487-atomic_t fscache_n_alloc_op_waits;
63488+atomic_unchecked_t fscache_n_allocs;
63489+atomic_unchecked_t fscache_n_allocs_ok;
63490+atomic_unchecked_t fscache_n_allocs_wait;
63491+atomic_unchecked_t fscache_n_allocs_nobufs;
63492+atomic_unchecked_t fscache_n_allocs_intr;
63493+atomic_unchecked_t fscache_n_allocs_object_dead;
63494+atomic_unchecked_t fscache_n_alloc_ops;
63495+atomic_unchecked_t fscache_n_alloc_op_waits;
63496
63497-atomic_t fscache_n_retrievals;
63498-atomic_t fscache_n_retrievals_ok;
63499-atomic_t fscache_n_retrievals_wait;
63500-atomic_t fscache_n_retrievals_nodata;
63501-atomic_t fscache_n_retrievals_nobufs;
63502-atomic_t fscache_n_retrievals_intr;
63503-atomic_t fscache_n_retrievals_nomem;
63504-atomic_t fscache_n_retrievals_object_dead;
63505-atomic_t fscache_n_retrieval_ops;
63506-atomic_t fscache_n_retrieval_op_waits;
63507+atomic_unchecked_t fscache_n_retrievals;
63508+atomic_unchecked_t fscache_n_retrievals_ok;
63509+atomic_unchecked_t fscache_n_retrievals_wait;
63510+atomic_unchecked_t fscache_n_retrievals_nodata;
63511+atomic_unchecked_t fscache_n_retrievals_nobufs;
63512+atomic_unchecked_t fscache_n_retrievals_intr;
63513+atomic_unchecked_t fscache_n_retrievals_nomem;
63514+atomic_unchecked_t fscache_n_retrievals_object_dead;
63515+atomic_unchecked_t fscache_n_retrieval_ops;
63516+atomic_unchecked_t fscache_n_retrieval_op_waits;
63517
63518-atomic_t fscache_n_stores;
63519-atomic_t fscache_n_stores_ok;
63520-atomic_t fscache_n_stores_again;
63521-atomic_t fscache_n_stores_nobufs;
63522-atomic_t fscache_n_stores_oom;
63523-atomic_t fscache_n_store_ops;
63524-atomic_t fscache_n_store_calls;
63525-atomic_t fscache_n_store_pages;
63526-atomic_t fscache_n_store_radix_deletes;
63527-atomic_t fscache_n_store_pages_over_limit;
63528+atomic_unchecked_t fscache_n_stores;
63529+atomic_unchecked_t fscache_n_stores_ok;
63530+atomic_unchecked_t fscache_n_stores_again;
63531+atomic_unchecked_t fscache_n_stores_nobufs;
63532+atomic_unchecked_t fscache_n_stores_oom;
63533+atomic_unchecked_t fscache_n_store_ops;
63534+atomic_unchecked_t fscache_n_store_calls;
63535+atomic_unchecked_t fscache_n_store_pages;
63536+atomic_unchecked_t fscache_n_store_radix_deletes;
63537+atomic_unchecked_t fscache_n_store_pages_over_limit;
63538
63539-atomic_t fscache_n_store_vmscan_not_storing;
63540-atomic_t fscache_n_store_vmscan_gone;
63541-atomic_t fscache_n_store_vmscan_busy;
63542-atomic_t fscache_n_store_vmscan_cancelled;
63543-atomic_t fscache_n_store_vmscan_wait;
63544+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63545+atomic_unchecked_t fscache_n_store_vmscan_gone;
63546+atomic_unchecked_t fscache_n_store_vmscan_busy;
63547+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63548+atomic_unchecked_t fscache_n_store_vmscan_wait;
63549
63550-atomic_t fscache_n_marks;
63551-atomic_t fscache_n_uncaches;
63552+atomic_unchecked_t fscache_n_marks;
63553+atomic_unchecked_t fscache_n_uncaches;
63554
63555-atomic_t fscache_n_acquires;
63556-atomic_t fscache_n_acquires_null;
63557-atomic_t fscache_n_acquires_no_cache;
63558-atomic_t fscache_n_acquires_ok;
63559-atomic_t fscache_n_acquires_nobufs;
63560-atomic_t fscache_n_acquires_oom;
63561+atomic_unchecked_t fscache_n_acquires;
63562+atomic_unchecked_t fscache_n_acquires_null;
63563+atomic_unchecked_t fscache_n_acquires_no_cache;
63564+atomic_unchecked_t fscache_n_acquires_ok;
63565+atomic_unchecked_t fscache_n_acquires_nobufs;
63566+atomic_unchecked_t fscache_n_acquires_oom;
63567
63568-atomic_t fscache_n_invalidates;
63569-atomic_t fscache_n_invalidates_run;
63570+atomic_unchecked_t fscache_n_invalidates;
63571+atomic_unchecked_t fscache_n_invalidates_run;
63572
63573-atomic_t fscache_n_updates;
63574-atomic_t fscache_n_updates_null;
63575-atomic_t fscache_n_updates_run;
63576+atomic_unchecked_t fscache_n_updates;
63577+atomic_unchecked_t fscache_n_updates_null;
63578+atomic_unchecked_t fscache_n_updates_run;
63579
63580-atomic_t fscache_n_relinquishes;
63581-atomic_t fscache_n_relinquishes_null;
63582-atomic_t fscache_n_relinquishes_waitcrt;
63583-atomic_t fscache_n_relinquishes_retire;
63584+atomic_unchecked_t fscache_n_relinquishes;
63585+atomic_unchecked_t fscache_n_relinquishes_null;
63586+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63587+atomic_unchecked_t fscache_n_relinquishes_retire;
63588
63589-atomic_t fscache_n_cookie_index;
63590-atomic_t fscache_n_cookie_data;
63591-atomic_t fscache_n_cookie_special;
63592+atomic_unchecked_t fscache_n_cookie_index;
63593+atomic_unchecked_t fscache_n_cookie_data;
63594+atomic_unchecked_t fscache_n_cookie_special;
63595
63596-atomic_t fscache_n_object_alloc;
63597-atomic_t fscache_n_object_no_alloc;
63598-atomic_t fscache_n_object_lookups;
63599-atomic_t fscache_n_object_lookups_negative;
63600-atomic_t fscache_n_object_lookups_positive;
63601-atomic_t fscache_n_object_lookups_timed_out;
63602-atomic_t fscache_n_object_created;
63603-atomic_t fscache_n_object_avail;
63604-atomic_t fscache_n_object_dead;
63605+atomic_unchecked_t fscache_n_object_alloc;
63606+atomic_unchecked_t fscache_n_object_no_alloc;
63607+atomic_unchecked_t fscache_n_object_lookups;
63608+atomic_unchecked_t fscache_n_object_lookups_negative;
63609+atomic_unchecked_t fscache_n_object_lookups_positive;
63610+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63611+atomic_unchecked_t fscache_n_object_created;
63612+atomic_unchecked_t fscache_n_object_avail;
63613+atomic_unchecked_t fscache_n_object_dead;
63614
63615-atomic_t fscache_n_checkaux_none;
63616-atomic_t fscache_n_checkaux_okay;
63617-atomic_t fscache_n_checkaux_update;
63618-atomic_t fscache_n_checkaux_obsolete;
63619+atomic_unchecked_t fscache_n_checkaux_none;
63620+atomic_unchecked_t fscache_n_checkaux_okay;
63621+atomic_unchecked_t fscache_n_checkaux_update;
63622+atomic_unchecked_t fscache_n_checkaux_obsolete;
63623
63624 atomic_t fscache_n_cop_alloc_object;
63625 atomic_t fscache_n_cop_lookup_object;
63626@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63627 seq_puts(m, "FS-Cache statistics\n");
63628
63629 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63630- atomic_read(&fscache_n_cookie_index),
63631- atomic_read(&fscache_n_cookie_data),
63632- atomic_read(&fscache_n_cookie_special));
63633+ atomic_read_unchecked(&fscache_n_cookie_index),
63634+ atomic_read_unchecked(&fscache_n_cookie_data),
63635+ atomic_read_unchecked(&fscache_n_cookie_special));
63636
63637 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63638- atomic_read(&fscache_n_object_alloc),
63639- atomic_read(&fscache_n_object_no_alloc),
63640- atomic_read(&fscache_n_object_avail),
63641- atomic_read(&fscache_n_object_dead));
63642+ atomic_read_unchecked(&fscache_n_object_alloc),
63643+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63644+ atomic_read_unchecked(&fscache_n_object_avail),
63645+ atomic_read_unchecked(&fscache_n_object_dead));
63646 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63647- atomic_read(&fscache_n_checkaux_none),
63648- atomic_read(&fscache_n_checkaux_okay),
63649- atomic_read(&fscache_n_checkaux_update),
63650- atomic_read(&fscache_n_checkaux_obsolete));
63651+ atomic_read_unchecked(&fscache_n_checkaux_none),
63652+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63653+ atomic_read_unchecked(&fscache_n_checkaux_update),
63654+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63655
63656 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63657- atomic_read(&fscache_n_marks),
63658- atomic_read(&fscache_n_uncaches));
63659+ atomic_read_unchecked(&fscache_n_marks),
63660+ atomic_read_unchecked(&fscache_n_uncaches));
63661
63662 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63663 " oom=%u\n",
63664- atomic_read(&fscache_n_acquires),
63665- atomic_read(&fscache_n_acquires_null),
63666- atomic_read(&fscache_n_acquires_no_cache),
63667- atomic_read(&fscache_n_acquires_ok),
63668- atomic_read(&fscache_n_acquires_nobufs),
63669- atomic_read(&fscache_n_acquires_oom));
63670+ atomic_read_unchecked(&fscache_n_acquires),
63671+ atomic_read_unchecked(&fscache_n_acquires_null),
63672+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63673+ atomic_read_unchecked(&fscache_n_acquires_ok),
63674+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63675+ atomic_read_unchecked(&fscache_n_acquires_oom));
63676
63677 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63678- atomic_read(&fscache_n_object_lookups),
63679- atomic_read(&fscache_n_object_lookups_negative),
63680- atomic_read(&fscache_n_object_lookups_positive),
63681- atomic_read(&fscache_n_object_created),
63682- atomic_read(&fscache_n_object_lookups_timed_out));
63683+ atomic_read_unchecked(&fscache_n_object_lookups),
63684+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63685+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63686+ atomic_read_unchecked(&fscache_n_object_created),
63687+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63688
63689 seq_printf(m, "Invals : n=%u run=%u\n",
63690- atomic_read(&fscache_n_invalidates),
63691- atomic_read(&fscache_n_invalidates_run));
63692+ atomic_read_unchecked(&fscache_n_invalidates),
63693+ atomic_read_unchecked(&fscache_n_invalidates_run));
63694
63695 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63696- atomic_read(&fscache_n_updates),
63697- atomic_read(&fscache_n_updates_null),
63698- atomic_read(&fscache_n_updates_run));
63699+ atomic_read_unchecked(&fscache_n_updates),
63700+ atomic_read_unchecked(&fscache_n_updates_null),
63701+ atomic_read_unchecked(&fscache_n_updates_run));
63702
63703 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63704- atomic_read(&fscache_n_relinquishes),
63705- atomic_read(&fscache_n_relinquishes_null),
63706- atomic_read(&fscache_n_relinquishes_waitcrt),
63707- atomic_read(&fscache_n_relinquishes_retire));
63708+ atomic_read_unchecked(&fscache_n_relinquishes),
63709+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63710+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63711+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63712
63713 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63714- atomic_read(&fscache_n_attr_changed),
63715- atomic_read(&fscache_n_attr_changed_ok),
63716- atomic_read(&fscache_n_attr_changed_nobufs),
63717- atomic_read(&fscache_n_attr_changed_nomem),
63718- atomic_read(&fscache_n_attr_changed_calls));
63719+ atomic_read_unchecked(&fscache_n_attr_changed),
63720+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63721+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63722+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63723+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63724
63725 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63726- atomic_read(&fscache_n_allocs),
63727- atomic_read(&fscache_n_allocs_ok),
63728- atomic_read(&fscache_n_allocs_wait),
63729- atomic_read(&fscache_n_allocs_nobufs),
63730- atomic_read(&fscache_n_allocs_intr));
63731+ atomic_read_unchecked(&fscache_n_allocs),
63732+ atomic_read_unchecked(&fscache_n_allocs_ok),
63733+ atomic_read_unchecked(&fscache_n_allocs_wait),
63734+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63735+ atomic_read_unchecked(&fscache_n_allocs_intr));
63736 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63737- atomic_read(&fscache_n_alloc_ops),
63738- atomic_read(&fscache_n_alloc_op_waits),
63739- atomic_read(&fscache_n_allocs_object_dead));
63740+ atomic_read_unchecked(&fscache_n_alloc_ops),
63741+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63742+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63743
63744 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63745 " int=%u oom=%u\n",
63746- atomic_read(&fscache_n_retrievals),
63747- atomic_read(&fscache_n_retrievals_ok),
63748- atomic_read(&fscache_n_retrievals_wait),
63749- atomic_read(&fscache_n_retrievals_nodata),
63750- atomic_read(&fscache_n_retrievals_nobufs),
63751- atomic_read(&fscache_n_retrievals_intr),
63752- atomic_read(&fscache_n_retrievals_nomem));
63753+ atomic_read_unchecked(&fscache_n_retrievals),
63754+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63755+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63756+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63757+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63758+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63759+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63760 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63761- atomic_read(&fscache_n_retrieval_ops),
63762- atomic_read(&fscache_n_retrieval_op_waits),
63763- atomic_read(&fscache_n_retrievals_object_dead));
63764+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63765+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63766+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63767
63768 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63769- atomic_read(&fscache_n_stores),
63770- atomic_read(&fscache_n_stores_ok),
63771- atomic_read(&fscache_n_stores_again),
63772- atomic_read(&fscache_n_stores_nobufs),
63773- atomic_read(&fscache_n_stores_oom));
63774+ atomic_read_unchecked(&fscache_n_stores),
63775+ atomic_read_unchecked(&fscache_n_stores_ok),
63776+ atomic_read_unchecked(&fscache_n_stores_again),
63777+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63778+ atomic_read_unchecked(&fscache_n_stores_oom));
63779 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63780- atomic_read(&fscache_n_store_ops),
63781- atomic_read(&fscache_n_store_calls),
63782- atomic_read(&fscache_n_store_pages),
63783- atomic_read(&fscache_n_store_radix_deletes),
63784- atomic_read(&fscache_n_store_pages_over_limit));
63785+ atomic_read_unchecked(&fscache_n_store_ops),
63786+ atomic_read_unchecked(&fscache_n_store_calls),
63787+ atomic_read_unchecked(&fscache_n_store_pages),
63788+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63789+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63790
63791 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63792- atomic_read(&fscache_n_store_vmscan_not_storing),
63793- atomic_read(&fscache_n_store_vmscan_gone),
63794- atomic_read(&fscache_n_store_vmscan_busy),
63795- atomic_read(&fscache_n_store_vmscan_cancelled),
63796- atomic_read(&fscache_n_store_vmscan_wait));
63797+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63798+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63799+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63800+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63801+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63802
63803 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63804- atomic_read(&fscache_n_op_pend),
63805- atomic_read(&fscache_n_op_run),
63806- atomic_read(&fscache_n_op_enqueue),
63807- atomic_read(&fscache_n_op_cancelled),
63808- atomic_read(&fscache_n_op_rejected));
63809+ atomic_read_unchecked(&fscache_n_op_pend),
63810+ atomic_read_unchecked(&fscache_n_op_run),
63811+ atomic_read_unchecked(&fscache_n_op_enqueue),
63812+ atomic_read_unchecked(&fscache_n_op_cancelled),
63813+ atomic_read_unchecked(&fscache_n_op_rejected));
63814 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63815- atomic_read(&fscache_n_op_deferred_release),
63816- atomic_read(&fscache_n_op_release),
63817- atomic_read(&fscache_n_op_gc));
63818+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63819+ atomic_read_unchecked(&fscache_n_op_release),
63820+ atomic_read_unchecked(&fscache_n_op_gc));
63821
63822 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63823 atomic_read(&fscache_n_cop_alloc_object),
63824diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63825index 28d0c7a..04816b7 100644
63826--- a/fs/fuse/cuse.c
63827+++ b/fs/fuse/cuse.c
63828@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63829 INIT_LIST_HEAD(&cuse_conntbl[i]);
63830
63831 /* inherit and extend fuse_dev_operations */
63832- cuse_channel_fops = fuse_dev_operations;
63833- cuse_channel_fops.owner = THIS_MODULE;
63834- cuse_channel_fops.open = cuse_channel_open;
63835- cuse_channel_fops.release = cuse_channel_release;
63836+ pax_open_kernel();
63837+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63838+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63839+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63840+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63841+ pax_close_kernel();
63842
63843 cuse_class = class_create(THIS_MODULE, "cuse");
63844 if (IS_ERR(cuse_class))
63845diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63846index 39706c5..a803c71 100644
63847--- a/fs/fuse/dev.c
63848+++ b/fs/fuse/dev.c
63849@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63850 ret = 0;
63851 pipe_lock(pipe);
63852
63853- if (!pipe->readers) {
63854+ if (!atomic_read(&pipe->readers)) {
63855 send_sig(SIGPIPE, current, 0);
63856 if (!ret)
63857 ret = -EPIPE;
63858@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63859 page_nr++;
63860 ret += buf->len;
63861
63862- if (pipe->files)
63863+ if (atomic_read(&pipe->files))
63864 do_wakeup = 1;
63865 }
63866
63867diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63868index 1545b71..7fabe47 100644
63869--- a/fs/fuse/dir.c
63870+++ b/fs/fuse/dir.c
63871@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63872 return link;
63873 }
63874
63875-static void free_link(char *link)
63876+static void free_link(const char *link)
63877 {
63878 if (!IS_ERR(link))
63879 free_page((unsigned long) link);
63880diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
63881index f42dffb..4a4c435 100644
63882--- a/fs/gfs2/glock.c
63883+++ b/fs/gfs2/glock.c
63884@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
63885 if (held1 != held2) {
63886 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
63887 if (held2)
63888- gl->gl_lockref.count++;
63889+ __lockref_inc(&gl->gl_lockref);
63890 else
63891- gl->gl_lockref.count--;
63892+ __lockref_dec(&gl->gl_lockref);
63893 }
63894 if (held1 && held2 && list_empty(&gl->gl_holders))
63895 clear_bit(GLF_QUEUED, &gl->gl_flags);
63896@@ -614,9 +614,9 @@ out:
63897 out_sched:
63898 clear_bit(GLF_LOCK, &gl->gl_flags);
63899 smp_mb__after_atomic();
63900- gl->gl_lockref.count++;
63901+ __lockref_inc(&gl->gl_lockref);
63902 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63903- gl->gl_lockref.count--;
63904+ __lockref_dec(&gl->gl_lockref);
63905 return;
63906
63907 out_unlock:
63908@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
63909 gl->gl_sbd = sdp;
63910 gl->gl_flags = 0;
63911 gl->gl_name = name;
63912- gl->gl_lockref.count = 1;
63913+ __lockref_set(&gl->gl_lockref, 1);
63914 gl->gl_state = LM_ST_UNLOCKED;
63915 gl->gl_target = LM_ST_UNLOCKED;
63916 gl->gl_demote_state = LM_ST_EXCLUSIVE;
63917@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
63918 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
63919 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
63920 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
63921- gl->gl_lockref.count++;
63922+ __lockref_inc(&gl->gl_lockref);
63923 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63924- gl->gl_lockref.count--;
63925+ __lockref_dec(&gl->gl_lockref);
63926 }
63927 run_queue(gl, 1);
63928 spin_unlock(&gl->gl_spin);
63929@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
63930 }
63931 }
63932
63933- gl->gl_lockref.count++;
63934+ __lockref_inc(&gl->gl_lockref);
63935 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
63936 spin_unlock(&gl->gl_spin);
63937
63938@@ -1384,12 +1384,12 @@ add_back_to_lru:
63939 goto add_back_to_lru;
63940 }
63941 clear_bit(GLF_LRU, &gl->gl_flags);
63942- gl->gl_lockref.count++;
63943+ __lockref_inc(&gl->gl_lockref);
63944 if (demote_ok(gl))
63945 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
63946 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
63947 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63948- gl->gl_lockref.count--;
63949+ __lockref_dec(&gl->gl_lockref);
63950 spin_unlock(&gl->gl_spin);
63951 cond_resched_lock(&lru_lock);
63952 }
63953@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
63954 state2str(gl->gl_demote_state), dtime,
63955 atomic_read(&gl->gl_ail_count),
63956 atomic_read(&gl->gl_revokes),
63957- (int)gl->gl_lockref.count, gl->gl_hold_time);
63958+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
63959
63960 list_for_each_entry(gh, &gl->gl_holders, gh_list)
63961 dump_holder(seq, gh);
63962diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
63963index fe91951..ce38a6e 100644
63964--- a/fs/gfs2/glops.c
63965+++ b/fs/gfs2/glops.c
63966@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
63967
63968 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
63969 gl->gl_state == LM_ST_SHARED && ip) {
63970- gl->gl_lockref.count++;
63971+ __lockref_inc(&gl->gl_lockref);
63972 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
63973- gl->gl_lockref.count--;
63974+ __lockref_dec(&gl->gl_lockref);
63975 }
63976 }
63977
63978diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
63979index 3aa17d4..b338075 100644
63980--- a/fs/gfs2/quota.c
63981+++ b/fs/gfs2/quota.c
63982@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
63983 if (!spin_trylock(&qd->qd_lockref.lock))
63984 return LRU_SKIP;
63985
63986- if (qd->qd_lockref.count == 0) {
63987+ if (__lockref_read(&qd->qd_lockref) == 0) {
63988 lockref_mark_dead(&qd->qd_lockref);
63989 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
63990 }
63991@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
63992 return NULL;
63993
63994 qd->qd_sbd = sdp;
63995- qd->qd_lockref.count = 1;
63996+ __lockref_set(&qd->qd_lockref, 1);
63997 spin_lock_init(&qd->qd_lockref.lock);
63998 qd->qd_id = qid;
63999 qd->qd_slot = -1;
64000@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
64001 if (lockref_put_or_lock(&qd->qd_lockref))
64002 return;
64003
64004- qd->qd_lockref.count = 0;
64005+ __lockref_set(&qd->qd_lockref, 0);
64006 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
64007 spin_unlock(&qd->qd_lockref.lock);
64008
64009diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64010index fd62cae..3494dfa 100644
64011--- a/fs/hostfs/hostfs_kern.c
64012+++ b/fs/hostfs/hostfs_kern.c
64013@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64014
64015 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64016 {
64017- char *s = nd_get_link(nd);
64018+ const char *s = nd_get_link(nd);
64019 if (!IS_ERR(s))
64020 __putname(s);
64021 }
64022diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64023index c274aca..772fa5e 100644
64024--- a/fs/hugetlbfs/inode.c
64025+++ b/fs/hugetlbfs/inode.c
64026@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64027 struct mm_struct *mm = current->mm;
64028 struct vm_area_struct *vma;
64029 struct hstate *h = hstate_file(file);
64030+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64031 struct vm_unmapped_area_info info;
64032
64033 if (len & ~huge_page_mask(h))
64034@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64035 return addr;
64036 }
64037
64038+#ifdef CONFIG_PAX_RANDMMAP
64039+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64040+#endif
64041+
64042 if (addr) {
64043 addr = ALIGN(addr, huge_page_size(h));
64044 vma = find_vma(mm, addr);
64045- if (TASK_SIZE - len >= addr &&
64046- (!vma || addr + len <= vma->vm_start))
64047+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64048 return addr;
64049 }
64050
64051 info.flags = 0;
64052 info.length = len;
64053 info.low_limit = TASK_UNMAPPED_BASE;
64054+
64055+#ifdef CONFIG_PAX_RANDMMAP
64056+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64057+ info.low_limit += mm->delta_mmap;
64058+#endif
64059+
64060 info.high_limit = TASK_SIZE;
64061 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64062 info.align_offset = 0;
64063@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64064 };
64065 MODULE_ALIAS_FS("hugetlbfs");
64066
64067-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64068+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64069
64070 static int can_do_hugetlb_shm(void)
64071 {
64072diff --git a/fs/inode.c b/fs/inode.c
64073index f00b16f..b653fea 100644
64074--- a/fs/inode.c
64075+++ b/fs/inode.c
64076@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
64077 unsigned int *p = &get_cpu_var(last_ino);
64078 unsigned int res = *p;
64079
64080+start:
64081+
64082 #ifdef CONFIG_SMP
64083 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64084- static atomic_t shared_last_ino;
64085- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64086+ static atomic_unchecked_t shared_last_ino;
64087+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64088
64089 res = next - LAST_INO_BATCH;
64090 }
64091 #endif
64092
64093- *p = ++res;
64094+ if (unlikely(!++res))
64095+ goto start; /* never zero */
64096+ *p = res;
64097 put_cpu_var(last_ino);
64098 return res;
64099 }
64100diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64101index 4a6cf28..d3a29d3 100644
64102--- a/fs/jffs2/erase.c
64103+++ b/fs/jffs2/erase.c
64104@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
64105 struct jffs2_unknown_node marker = {
64106 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
64107 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64108- .totlen = cpu_to_je32(c->cleanmarker_size)
64109+ .totlen = cpu_to_je32(c->cleanmarker_size),
64110+ .hdr_crc = cpu_to_je32(0)
64111 };
64112
64113 jffs2_prealloc_raw_node_refs(c, jeb, 1);
64114diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
64115index 09ed551..45684f8 100644
64116--- a/fs/jffs2/wbuf.c
64117+++ b/fs/jffs2/wbuf.c
64118@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
64119 {
64120 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
64121 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64122- .totlen = constant_cpu_to_je32(8)
64123+ .totlen = constant_cpu_to_je32(8),
64124+ .hdr_crc = constant_cpu_to_je32(0)
64125 };
64126
64127 /*
64128diff --git a/fs/jfs/super.c b/fs/jfs/super.c
64129index 5d30c56..8c45372 100644
64130--- a/fs/jfs/super.c
64131+++ b/fs/jfs/super.c
64132@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
64133
64134 jfs_inode_cachep =
64135 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
64136- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
64137+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
64138 init_once);
64139 if (jfs_inode_cachep == NULL)
64140 return -ENOMEM;
64141diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
64142index 6acc964..eca491f 100644
64143--- a/fs/kernfs/dir.c
64144+++ b/fs/kernfs/dir.c
64145@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
64146 *
64147 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64148 */
64149-static unsigned int kernfs_name_hash(const char *name, const void *ns)
64150+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
64151 {
64152 unsigned long hash = init_name_hash();
64153 unsigned int len = strlen(name);
64154@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
64155 ret = scops->mkdir(parent, dentry->d_name.name, mode);
64156
64157 kernfs_put_active(parent);
64158+
64159+ if (!ret) {
64160+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
64161+ ret = PTR_ERR_OR_ZERO(dentry_ret);
64162+ }
64163+
64164 return ret;
64165 }
64166
64167diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
64168index 2bacb99..f745182 100644
64169--- a/fs/kernfs/file.c
64170+++ b/fs/kernfs/file.c
64171@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
64172
64173 struct kernfs_open_node {
64174 atomic_t refcnt;
64175- atomic_t event;
64176+ atomic_unchecked_t event;
64177 wait_queue_head_t poll;
64178 struct list_head files; /* goes through kernfs_open_file.list */
64179 };
64180@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
64181 {
64182 struct kernfs_open_file *of = sf->private;
64183
64184- of->event = atomic_read(&of->kn->attr.open->event);
64185+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64186
64187 return of->kn->attr.ops->seq_show(sf, v);
64188 }
64189@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
64190 goto out_free;
64191 }
64192
64193- of->event = atomic_read(&of->kn->attr.open->event);
64194+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64195 ops = kernfs_ops(of->kn);
64196 if (ops->read)
64197 len = ops->read(of, buf, len, *ppos);
64198@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
64199 {
64200 struct kernfs_open_file *of = kernfs_of(file);
64201 const struct kernfs_ops *ops;
64202- size_t len;
64203+ ssize_t len;
64204 char *buf;
64205
64206 if (of->atomic_write_len) {
64207@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
64208 return ret;
64209 }
64210
64211-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64212- void *buf, int len, int write)
64213+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64214+ void *buf, size_t len, int write)
64215 {
64216 struct file *file = vma->vm_file;
64217 struct kernfs_open_file *of = kernfs_of(file);
64218- int ret;
64219+ ssize_t ret;
64220
64221 if (!of->vm_ops)
64222 return -EINVAL;
64223@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
64224 return -ENOMEM;
64225
64226 atomic_set(&new_on->refcnt, 0);
64227- atomic_set(&new_on->event, 1);
64228+ atomic_set_unchecked(&new_on->event, 1);
64229 init_waitqueue_head(&new_on->poll);
64230 INIT_LIST_HEAD(&new_on->files);
64231 goto retry;
64232@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64233
64234 kernfs_put_active(kn);
64235
64236- if (of->event != atomic_read(&on->event))
64237+ if (of->event != atomic_read_unchecked(&on->event))
64238 goto trigger;
64239
64240 return DEFAULT_POLLMASK;
64241@@ -824,7 +824,7 @@ repeat:
64242
64243 on = kn->attr.open;
64244 if (on) {
64245- atomic_inc(&on->event);
64246+ atomic_inc_unchecked(&on->event);
64247 wake_up_interruptible(&on->poll);
64248 }
64249
64250diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64251index 8a19889..4c3069a 100644
64252--- a/fs/kernfs/symlink.c
64253+++ b/fs/kernfs/symlink.c
64254@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64255 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64256 void *cookie)
64257 {
64258- char *page = nd_get_link(nd);
64259+ const char *page = nd_get_link(nd);
64260 if (!IS_ERR(page))
64261 free_page((unsigned long)page);
64262 }
64263diff --git a/fs/libfs.c b/fs/libfs.c
64264index 0ab6512..cd9982d 100644
64265--- a/fs/libfs.c
64266+++ b/fs/libfs.c
64267@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64268
64269 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64270 struct dentry *next = list_entry(p, struct dentry, d_child);
64271+ char d_name[sizeof(next->d_iname)];
64272+ const unsigned char *name;
64273+
64274 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64275 if (!simple_positive(next)) {
64276 spin_unlock(&next->d_lock);
64277@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64278
64279 spin_unlock(&next->d_lock);
64280 spin_unlock(&dentry->d_lock);
64281- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64282+ name = next->d_name.name;
64283+ if (name == next->d_iname) {
64284+ memcpy(d_name, name, next->d_name.len);
64285+ name = d_name;
64286+ }
64287+ if (!dir_emit(ctx, name, next->d_name.len,
64288 next->d_inode->i_ino, dt_type(next->d_inode)))
64289 return 0;
64290 spin_lock(&dentry->d_lock);
64291@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64292 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64293 void *cookie)
64294 {
64295- char *s = nd_get_link(nd);
64296+ const char *s = nd_get_link(nd);
64297 if (!IS_ERR(s))
64298 kfree(s);
64299 }
64300diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64301index acd3947..1f896e2 100644
64302--- a/fs/lockd/clntproc.c
64303+++ b/fs/lockd/clntproc.c
64304@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64305 /*
64306 * Cookie counter for NLM requests
64307 */
64308-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64309+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64310
64311 void nlmclnt_next_cookie(struct nlm_cookie *c)
64312 {
64313- u32 cookie = atomic_inc_return(&nlm_cookie);
64314+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64315
64316 memcpy(c->data, &cookie, 4);
64317 c->len=4;
64318diff --git a/fs/mount.h b/fs/mount.h
64319index 6a61c2b..bd79179 100644
64320--- a/fs/mount.h
64321+++ b/fs/mount.h
64322@@ -13,7 +13,7 @@ struct mnt_namespace {
64323 u64 seq; /* Sequence number to prevent loops */
64324 wait_queue_head_t poll;
64325 u64 event;
64326-};
64327+} __randomize_layout;
64328
64329 struct mnt_pcp {
64330 int mnt_count;
64331@@ -65,7 +65,7 @@ struct mount {
64332 struct hlist_head mnt_pins;
64333 struct fs_pin mnt_umount;
64334 struct dentry *mnt_ex_mountpoint;
64335-};
64336+} __randomize_layout;
64337
64338 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64339
64340diff --git a/fs/namei.c b/fs/namei.c
64341index c83145a..a78aa13 100644
64342--- a/fs/namei.c
64343+++ b/fs/namei.c
64344@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
64345 if (ret != -EACCES)
64346 return ret;
64347
64348+#ifdef CONFIG_GRKERNSEC
64349+ /* we'll block if we have to log due to a denied capability use */
64350+ if (mask & MAY_NOT_BLOCK)
64351+ return -ECHILD;
64352+#endif
64353+
64354 if (S_ISDIR(inode->i_mode)) {
64355 /* DACs are overridable for directories */
64356- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64357- return 0;
64358 if (!(mask & MAY_WRITE))
64359- if (capable_wrt_inode_uidgid(inode,
64360- CAP_DAC_READ_SEARCH))
64361+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64362+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64363 return 0;
64364+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64365+ return 0;
64366 return -EACCES;
64367 }
64368 /*
64369+ * Searching includes executable on directories, else just read.
64370+ */
64371+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64372+ if (mask == MAY_READ)
64373+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64374+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64375+ return 0;
64376+
64377+ /*
64378 * Read/write DACs are always overridable.
64379 * Executable DACs are overridable when there is
64380 * at least one exec bit set.
64381@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
64382 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64383 return 0;
64384
64385- /*
64386- * Searching includes executable on directories, else just read.
64387- */
64388- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64389- if (mask == MAY_READ)
64390- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64391- return 0;
64392-
64393 return -EACCES;
64394 }
64395 EXPORT_SYMBOL(generic_permission);
64396@@ -503,7 +510,7 @@ struct nameidata {
64397 int last_type;
64398 unsigned depth;
64399 struct file *base;
64400- char *saved_names[MAX_NESTED_LINKS + 1];
64401+ const char *saved_names[MAX_NESTED_LINKS + 1];
64402 };
64403
64404 /*
64405@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
64406 nd->flags |= LOOKUP_JUMPED;
64407 }
64408
64409-void nd_set_link(struct nameidata *nd, char *path)
64410+void nd_set_link(struct nameidata *nd, const char *path)
64411 {
64412 nd->saved_names[nd->depth] = path;
64413 }
64414 EXPORT_SYMBOL(nd_set_link);
64415
64416-char *nd_get_link(struct nameidata *nd)
64417+const char *nd_get_link(const struct nameidata *nd)
64418 {
64419 return nd->saved_names[nd->depth];
64420 }
64421@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64422 {
64423 struct dentry *dentry = link->dentry;
64424 int error;
64425- char *s;
64426+ const char *s;
64427
64428 BUG_ON(nd->flags & LOOKUP_RCU);
64429
64430@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64431 if (error)
64432 goto out_put_nd_path;
64433
64434+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64435+ dentry->d_inode, dentry, nd->path.mnt)) {
64436+ error = -EACCES;
64437+ goto out_put_nd_path;
64438+ }
64439+
64440 nd->last_type = LAST_BIND;
64441 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64442 error = PTR_ERR(*p);
64443@@ -1639,6 +1652,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64444 if (res)
64445 break;
64446 res = walk_component(nd, path, LOOKUP_FOLLOW);
64447+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64448+ res = -EACCES;
64449 put_link(nd, &link, cookie);
64450 } while (res > 0);
64451
64452@@ -1711,7 +1726,7 @@ EXPORT_SYMBOL(full_name_hash);
64453 static inline u64 hash_name(const char *name)
64454 {
64455 unsigned long a, b, adata, bdata, mask, hash, len;
64456- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64457+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64458
64459 hash = a = 0;
64460 len = -sizeof(unsigned long);
64461@@ -2006,6 +2021,8 @@ static int path_lookupat(int dfd, const char *name,
64462 if (err)
64463 break;
64464 err = lookup_last(nd, &path);
64465+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64466+ err = -EACCES;
64467 put_link(nd, &link, cookie);
64468 }
64469 }
64470@@ -2013,6 +2030,13 @@ static int path_lookupat(int dfd, const char *name,
64471 if (!err)
64472 err = complete_walk(nd);
64473
64474+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64475+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64476+ path_put(&nd->path);
64477+ err = -ENOENT;
64478+ }
64479+ }
64480+
64481 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64482 if (!d_can_lookup(nd->path.dentry)) {
64483 path_put(&nd->path);
64484@@ -2034,8 +2058,15 @@ static int filename_lookup(int dfd, struct filename *name,
64485 retval = path_lookupat(dfd, name->name,
64486 flags | LOOKUP_REVAL, nd);
64487
64488- if (likely(!retval))
64489+ if (likely(!retval)) {
64490 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64491+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64492+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64493+ path_put(&nd->path);
64494+ return -ENOENT;
64495+ }
64496+ }
64497+ }
64498 return retval;
64499 }
64500
64501@@ -2614,6 +2645,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64502 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64503 return -EPERM;
64504
64505+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64506+ return -EPERM;
64507+ if (gr_handle_rawio(inode))
64508+ return -EPERM;
64509+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64510+ return -EACCES;
64511+
64512 return 0;
64513 }
64514
64515@@ -2845,7 +2883,7 @@ looked_up:
64516 * cleared otherwise prior to returning.
64517 */
64518 static int lookup_open(struct nameidata *nd, struct path *path,
64519- struct file *file,
64520+ struct path *link, struct file *file,
64521 const struct open_flags *op,
64522 bool got_write, int *opened)
64523 {
64524@@ -2880,6 +2918,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64525 /* Negative dentry, just create the file */
64526 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64527 umode_t mode = op->mode;
64528+
64529+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64530+ error = -EACCES;
64531+ goto out_dput;
64532+ }
64533+
64534+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64535+ error = -EACCES;
64536+ goto out_dput;
64537+ }
64538+
64539 if (!IS_POSIXACL(dir->d_inode))
64540 mode &= ~current_umask();
64541 /*
64542@@ -2901,6 +2950,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64543 nd->flags & LOOKUP_EXCL);
64544 if (error)
64545 goto out_dput;
64546+ else
64547+ gr_handle_create(dentry, nd->path.mnt);
64548 }
64549 out_no_open:
64550 path->dentry = dentry;
64551@@ -2915,7 +2966,7 @@ out_dput:
64552 /*
64553 * Handle the last step of open()
64554 */
64555-static int do_last(struct nameidata *nd, struct path *path,
64556+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64557 struct file *file, const struct open_flags *op,
64558 int *opened, struct filename *name)
64559 {
64560@@ -2965,6 +3016,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64561 if (error)
64562 return error;
64563
64564+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64565+ error = -ENOENT;
64566+ goto out;
64567+ }
64568+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64569+ error = -EACCES;
64570+ goto out;
64571+ }
64572+
64573 audit_inode(name, dir, LOOKUP_PARENT);
64574 error = -EISDIR;
64575 /* trailing slashes? */
64576@@ -2984,7 +3044,7 @@ retry_lookup:
64577 */
64578 }
64579 mutex_lock(&dir->d_inode->i_mutex);
64580- error = lookup_open(nd, path, file, op, got_write, opened);
64581+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64582 mutex_unlock(&dir->d_inode->i_mutex);
64583
64584 if (error <= 0) {
64585@@ -3008,11 +3068,28 @@ retry_lookup:
64586 goto finish_open_created;
64587 }
64588
64589+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64590+ error = -ENOENT;
64591+ goto exit_dput;
64592+ }
64593+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64594+ error = -EACCES;
64595+ goto exit_dput;
64596+ }
64597+
64598 /*
64599 * create/update audit record if it already exists.
64600 */
64601- if (d_is_positive(path->dentry))
64602+ if (d_is_positive(path->dentry)) {
64603+ /* only check if O_CREAT is specified, all other checks need to go
64604+ into may_open */
64605+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64606+ error = -EACCES;
64607+ goto exit_dput;
64608+ }
64609+
64610 audit_inode(name, path->dentry, 0);
64611+ }
64612
64613 /*
64614 * If atomic_open() acquired write access it is dropped now due to
64615@@ -3053,6 +3130,11 @@ finish_lookup:
64616 }
64617 }
64618 BUG_ON(inode != path->dentry->d_inode);
64619+ /* if we're resolving a symlink to another symlink */
64620+ if (link && gr_handle_symlink_owner(link, inode)) {
64621+ error = -EACCES;
64622+ goto out;
64623+ }
64624 return 1;
64625 }
64626
64627@@ -3072,7 +3154,18 @@ finish_open:
64628 path_put(&save_parent);
64629 return error;
64630 }
64631+
64632+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64633+ error = -ENOENT;
64634+ goto out;
64635+ }
64636+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64637+ error = -EACCES;
64638+ goto out;
64639+ }
64640+
64641 audit_inode(name, nd->path.dentry, 0);
64642+
64643 error = -EISDIR;
64644 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64645 goto out;
64646@@ -3233,7 +3326,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64647 if (unlikely(error))
64648 goto out;
64649
64650- error = do_last(nd, &path, file, op, &opened, pathname);
64651+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64652 while (unlikely(error > 0)) { /* trailing symlink */
64653 struct path link = path;
64654 void *cookie;
64655@@ -3251,7 +3344,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64656 error = follow_link(&link, nd, &cookie);
64657 if (unlikely(error))
64658 break;
64659- error = do_last(nd, &path, file, op, &opened, pathname);
64660+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64661 put_link(nd, &link, cookie);
64662 }
64663 out:
64664@@ -3353,9 +3446,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
64665 goto unlock;
64666
64667 error = -EEXIST;
64668- if (d_is_positive(dentry))
64669+ if (d_is_positive(dentry)) {
64670+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64671+ error = -ENOENT;
64672 goto fail;
64673-
64674+ }
64675 /*
64676 * Special case - lookup gave negative, but... we had foo/bar/
64677 * From the vfs_mknod() POV we just have a negative dentry -
64678@@ -3420,6 +3515,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64679 }
64680 EXPORT_SYMBOL(user_path_create);
64681
64682+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64683+{
64684+ struct filename *tmp = getname(pathname);
64685+ struct dentry *res;
64686+ if (IS_ERR(tmp))
64687+ return ERR_CAST(tmp);
64688+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64689+ if (IS_ERR(res))
64690+ putname(tmp);
64691+ else
64692+ *to = tmp;
64693+ return res;
64694+}
64695+
64696 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64697 {
64698 int error = may_create(dir, dentry);
64699@@ -3483,6 +3592,17 @@ retry:
64700
64701 if (!IS_POSIXACL(path.dentry->d_inode))
64702 mode &= ~current_umask();
64703+
64704+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64705+ error = -EPERM;
64706+ goto out;
64707+ }
64708+
64709+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64710+ error = -EACCES;
64711+ goto out;
64712+ }
64713+
64714 error = security_path_mknod(&path, dentry, mode, dev);
64715 if (error)
64716 goto out;
64717@@ -3498,6 +3618,8 @@ retry:
64718 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64719 break;
64720 }
64721+ if (!error)
64722+ gr_handle_create(dentry, path.mnt);
64723 out:
64724 done_path_create(&path, dentry);
64725 if (retry_estale(error, lookup_flags)) {
64726@@ -3552,9 +3674,16 @@ retry:
64727
64728 if (!IS_POSIXACL(path.dentry->d_inode))
64729 mode &= ~current_umask();
64730+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64731+ error = -EACCES;
64732+ goto out;
64733+ }
64734 error = security_path_mkdir(&path, dentry, mode);
64735 if (!error)
64736 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64737+ if (!error)
64738+ gr_handle_create(dentry, path.mnt);
64739+out:
64740 done_path_create(&path, dentry);
64741 if (retry_estale(error, lookup_flags)) {
64742 lookup_flags |= LOOKUP_REVAL;
64743@@ -3587,7 +3716,7 @@ void dentry_unhash(struct dentry *dentry)
64744 {
64745 shrink_dcache_parent(dentry);
64746 spin_lock(&dentry->d_lock);
64747- if (dentry->d_lockref.count == 1)
64748+ if (__lockref_read(&dentry->d_lockref) == 1)
64749 __d_drop(dentry);
64750 spin_unlock(&dentry->d_lock);
64751 }
64752@@ -3638,6 +3767,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64753 struct filename *name;
64754 struct dentry *dentry;
64755 struct nameidata nd;
64756+ u64 saved_ino = 0;
64757+ dev_t saved_dev = 0;
64758 unsigned int lookup_flags = 0;
64759 retry:
64760 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64761@@ -3670,10 +3801,21 @@ retry:
64762 error = -ENOENT;
64763 goto exit3;
64764 }
64765+
64766+ saved_ino = gr_get_ino_from_dentry(dentry);
64767+ saved_dev = gr_get_dev_from_dentry(dentry);
64768+
64769+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64770+ error = -EACCES;
64771+ goto exit3;
64772+ }
64773+
64774 error = security_path_rmdir(&nd.path, dentry);
64775 if (error)
64776 goto exit3;
64777 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64778+ if (!error && (saved_dev || saved_ino))
64779+ gr_handle_delete(saved_ino, saved_dev);
64780 exit3:
64781 dput(dentry);
64782 exit2:
64783@@ -3766,6 +3908,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64784 struct nameidata nd;
64785 struct inode *inode = NULL;
64786 struct inode *delegated_inode = NULL;
64787+ u64 saved_ino = 0;
64788+ dev_t saved_dev = 0;
64789 unsigned int lookup_flags = 0;
64790 retry:
64791 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64792@@ -3792,10 +3936,22 @@ retry_deleg:
64793 if (d_is_negative(dentry))
64794 goto slashes;
64795 ihold(inode);
64796+
64797+ if (inode->i_nlink <= 1) {
64798+ saved_ino = gr_get_ino_from_dentry(dentry);
64799+ saved_dev = gr_get_dev_from_dentry(dentry);
64800+ }
64801+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64802+ error = -EACCES;
64803+ goto exit2;
64804+ }
64805+
64806 error = security_path_unlink(&nd.path, dentry);
64807 if (error)
64808 goto exit2;
64809 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64810+ if (!error && (saved_ino || saved_dev))
64811+ gr_handle_delete(saved_ino, saved_dev);
64812 exit2:
64813 dput(dentry);
64814 }
64815@@ -3884,9 +4040,17 @@ retry:
64816 if (IS_ERR(dentry))
64817 goto out_putname;
64818
64819+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64820+ error = -EACCES;
64821+ goto out;
64822+ }
64823+
64824 error = security_path_symlink(&path, dentry, from->name);
64825 if (!error)
64826 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64827+ if (!error)
64828+ gr_handle_create(dentry, path.mnt);
64829+out:
64830 done_path_create(&path, dentry);
64831 if (retry_estale(error, lookup_flags)) {
64832 lookup_flags |= LOOKUP_REVAL;
64833@@ -3990,6 +4154,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64834 struct dentry *new_dentry;
64835 struct path old_path, new_path;
64836 struct inode *delegated_inode = NULL;
64837+ struct filename *to = NULL;
64838 int how = 0;
64839 int error;
64840
64841@@ -4013,7 +4178,7 @@ retry:
64842 if (error)
64843 return error;
64844
64845- new_dentry = user_path_create(newdfd, newname, &new_path,
64846+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64847 (how & LOOKUP_REVAL));
64848 error = PTR_ERR(new_dentry);
64849 if (IS_ERR(new_dentry))
64850@@ -4025,11 +4190,28 @@ retry:
64851 error = may_linkat(&old_path);
64852 if (unlikely(error))
64853 goto out_dput;
64854+
64855+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64856+ old_path.dentry->d_inode,
64857+ old_path.dentry->d_inode->i_mode, to)) {
64858+ error = -EACCES;
64859+ goto out_dput;
64860+ }
64861+
64862+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64863+ old_path.dentry, old_path.mnt, to)) {
64864+ error = -EACCES;
64865+ goto out_dput;
64866+ }
64867+
64868 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64869 if (error)
64870 goto out_dput;
64871 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64872+ if (!error)
64873+ gr_handle_create(new_dentry, new_path.mnt);
64874 out_dput:
64875+ putname(to);
64876 done_path_create(&new_path, new_dentry);
64877 if (delegated_inode) {
64878 error = break_deleg_wait(&delegated_inode);
64879@@ -4345,6 +4527,20 @@ retry_deleg:
64880 if (new_dentry == trap)
64881 goto exit5;
64882
64883+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64884+ /* use EXDEV error to cause 'mv' to switch to an alternative
64885+ * method for usability
64886+ */
64887+ error = -EXDEV;
64888+ goto exit5;
64889+ }
64890+
64891+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64892+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64893+ to, flags);
64894+ if (error)
64895+ goto exit5;
64896+
64897 error = security_path_rename(&oldnd.path, old_dentry,
64898 &newnd.path, new_dentry, flags);
64899 if (error)
64900@@ -4352,6 +4548,9 @@ retry_deleg:
64901 error = vfs_rename(old_dir->d_inode, old_dentry,
64902 new_dir->d_inode, new_dentry,
64903 &delegated_inode, flags);
64904+ if (!error)
64905+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64906+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64907 exit5:
64908 dput(new_dentry);
64909 exit4:
64910@@ -4408,14 +4607,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64911
64912 int readlink_copy(char __user *buffer, int buflen, const char *link)
64913 {
64914+ char tmpbuf[64];
64915+ const char *newlink;
64916 int len = PTR_ERR(link);
64917+
64918 if (IS_ERR(link))
64919 goto out;
64920
64921 len = strlen(link);
64922 if (len > (unsigned) buflen)
64923 len = buflen;
64924- if (copy_to_user(buffer, link, len))
64925+
64926+ if (len < sizeof(tmpbuf)) {
64927+ memcpy(tmpbuf, link, len);
64928+ newlink = tmpbuf;
64929+ } else
64930+ newlink = link;
64931+
64932+ if (copy_to_user(buffer, newlink, len))
64933 len = -EFAULT;
64934 out:
64935 return len;
64936diff --git a/fs/namespace.c b/fs/namespace.c
64937index 82ef140..5335e75 100644
64938--- a/fs/namespace.c
64939+++ b/fs/namespace.c
64940@@ -1438,6 +1438,9 @@ static int do_umount(struct mount *mnt, int flags)
64941 if (!(sb->s_flags & MS_RDONLY))
64942 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64943 up_write(&sb->s_umount);
64944+
64945+ gr_log_remount(mnt->mnt_devname, retval);
64946+
64947 return retval;
64948 }
64949
64950@@ -1460,6 +1463,9 @@ static int do_umount(struct mount *mnt, int flags)
64951 }
64952 unlock_mount_hash();
64953 namespace_unlock();
64954+
64955+ gr_log_unmount(mnt->mnt_devname, retval);
64956+
64957 return retval;
64958 }
64959
64960@@ -1510,7 +1516,7 @@ static inline bool may_mount(void)
64961 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64962 */
64963
64964-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64965+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64966 {
64967 struct path path;
64968 struct mount *mnt;
64969@@ -1555,7 +1561,7 @@ out:
64970 /*
64971 * The 2.0 compatible umount. No flags.
64972 */
64973-SYSCALL_DEFINE1(oldumount, char __user *, name)
64974+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64975 {
64976 return sys_umount(name, 0);
64977 }
64978@@ -2621,6 +2627,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64979 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64980 MS_STRICTATIME);
64981
64982+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64983+ retval = -EPERM;
64984+ goto dput_out;
64985+ }
64986+
64987+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64988+ retval = -EPERM;
64989+ goto dput_out;
64990+ }
64991+
64992 if (flags & MS_REMOUNT)
64993 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64994 data_page);
64995@@ -2634,7 +2650,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64996 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64997 dev_name, data_page);
64998 dput_out:
64999+ gr_log_mount(dev_name, &path, retval);
65000+
65001 path_put(&path);
65002+
65003 return retval;
65004 }
65005
65006@@ -2652,7 +2671,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65007 * number incrementing at 10Ghz will take 12,427 years to wrap which
65008 * is effectively never, so we can ignore the possibility.
65009 */
65010-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65011+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65012
65013 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65014 {
65015@@ -2668,7 +2687,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65016 return ERR_PTR(ret);
65017 }
65018 new_ns->ns.ops = &mntns_operations;
65019- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65020+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
65021 atomic_set(&new_ns->count, 1);
65022 new_ns->root = NULL;
65023 INIT_LIST_HEAD(&new_ns->list);
65024@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65025 return new_ns;
65026 }
65027
65028-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65029+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65030 struct user_namespace *user_ns, struct fs_struct *new_fs)
65031 {
65032 struct mnt_namespace *new_ns;
65033@@ -2799,8 +2818,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65034 }
65035 EXPORT_SYMBOL(mount_subtree);
65036
65037-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65038- char __user *, type, unsigned long, flags, void __user *, data)
65039+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65040+ const char __user *, type, unsigned long, flags, void __user *, data)
65041 {
65042 int ret;
65043 char *kernel_type;
65044@@ -2906,6 +2925,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65045 if (error)
65046 goto out2;
65047
65048+ if (gr_handle_chroot_pivot()) {
65049+ error = -EPERM;
65050+ goto out2;
65051+ }
65052+
65053 get_fs_root(current->fs, &root);
65054 old_mp = lock_mount(&old);
65055 error = PTR_ERR(old_mp);
65056@@ -3180,7 +3204,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
65057 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65058 return -EPERM;
65059
65060- if (fs->users != 1)
65061+ if (atomic_read(&fs->users) != 1)
65062 return -EINVAL;
65063
65064 get_mnt_ns(mnt_ns);
65065diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65066index 19ca95c..b28702c 100644
65067--- a/fs/nfs/callback_xdr.c
65068+++ b/fs/nfs/callback_xdr.c
65069@@ -51,7 +51,7 @@ struct callback_op {
65070 callback_decode_arg_t decode_args;
65071 callback_encode_res_t encode_res;
65072 long res_maxsize;
65073-};
65074+} __do_const;
65075
65076 static struct callback_op callback_ops[];
65077
65078diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65079index d42dff6..ecbdf42 100644
65080--- a/fs/nfs/inode.c
65081+++ b/fs/nfs/inode.c
65082@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
65083 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
65084 }
65085
65086-static atomic_long_t nfs_attr_generation_counter;
65087+static atomic_long_unchecked_t nfs_attr_generation_counter;
65088
65089 static unsigned long nfs_read_attr_generation_counter(void)
65090 {
65091- return atomic_long_read(&nfs_attr_generation_counter);
65092+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65093 }
65094
65095 unsigned long nfs_inc_attr_generation_counter(void)
65096 {
65097- return atomic_long_inc_return(&nfs_attr_generation_counter);
65098+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65099 }
65100 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
65101
65102diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65103index 92b9d97..045e58c 100644
65104--- a/fs/nfsd/nfs4proc.c
65105+++ b/fs/nfsd/nfs4proc.c
65106@@ -1492,7 +1492,7 @@ struct nfsd4_operation {
65107 nfsd4op_rsize op_rsize_bop;
65108 stateid_getter op_get_currentstateid;
65109 stateid_setter op_set_currentstateid;
65110-};
65111+} __do_const;
65112
65113 static struct nfsd4_operation nfsd4_ops[];
65114
65115diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65116index 5fb7e78..cc8a22e 100644
65117--- a/fs/nfsd/nfs4xdr.c
65118+++ b/fs/nfsd/nfs4xdr.c
65119@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65120
65121 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65122
65123-static nfsd4_dec nfsd4_dec_ops[] = {
65124+static const nfsd4_dec nfsd4_dec_ops[] = {
65125 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65126 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
65127 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
65128diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
65129index 46ec934..f384e41 100644
65130--- a/fs/nfsd/nfscache.c
65131+++ b/fs/nfsd/nfscache.c
65132@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65133 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
65134 u32 hash;
65135 struct nfsd_drc_bucket *b;
65136- int len;
65137+ long len;
65138 size_t bufsize = 0;
65139
65140 if (!rp)
65141@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65142 hash = nfsd_cache_hash(rp->c_xid);
65143 b = &drc_hashtbl[hash];
65144
65145- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
65146- len >>= 2;
65147+ if (statp) {
65148+ len = (char*)statp - (char*)resv->iov_base;
65149+ len = resv->iov_len - len;
65150+ len >>= 2;
65151+ }
65152
65153 /* Don't cache excessive amounts of data and XDR failures */
65154- if (!statp || len > (256 >> 2)) {
65155+ if (!statp || len > (256 >> 2) || len < 0) {
65156 nfsd_reply_cache_free(b, rp);
65157 return;
65158 }
65159@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65160 switch (cachetype) {
65161 case RC_REPLSTAT:
65162 if (len != 1)
65163- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
65164+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
65165 rp->c_replstat = *statp;
65166 break;
65167 case RC_REPLBUFF:
65168diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
65169index 3685265..e77261e 100644
65170--- a/fs/nfsd/vfs.c
65171+++ b/fs/nfsd/vfs.c
65172@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
65173
65174 oldfs = get_fs();
65175 set_fs(KERNEL_DS);
65176- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
65177+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
65178 set_fs(oldfs);
65179 return nfsd_finish_read(file, count, host_err);
65180 }
65181@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
65182
65183 /* Write the data. */
65184 oldfs = get_fs(); set_fs(KERNEL_DS);
65185- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
65186+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
65187 set_fs(oldfs);
65188 if (host_err < 0)
65189 goto out_nfserr;
65190@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
65191 */
65192
65193 oldfs = get_fs(); set_fs(KERNEL_DS);
65194- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
65195+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
65196 set_fs(oldfs);
65197
65198 if (host_err < 0)
65199diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
65200index 52ccd34..7a6b202 100644
65201--- a/fs/nls/nls_base.c
65202+++ b/fs/nls/nls_base.c
65203@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
65204
65205 int __register_nls(struct nls_table *nls, struct module *owner)
65206 {
65207- struct nls_table ** tmp = &tables;
65208+ struct nls_table *tmp = tables;
65209
65210 if (nls->next)
65211 return -EBUSY;
65212
65213- nls->owner = owner;
65214+ pax_open_kernel();
65215+ *(void **)&nls->owner = owner;
65216+ pax_close_kernel();
65217 spin_lock(&nls_lock);
65218- while (*tmp) {
65219- if (nls == *tmp) {
65220+ while (tmp) {
65221+ if (nls == tmp) {
65222 spin_unlock(&nls_lock);
65223 return -EBUSY;
65224 }
65225- tmp = &(*tmp)->next;
65226+ tmp = tmp->next;
65227 }
65228- nls->next = tables;
65229+ pax_open_kernel();
65230+ *(struct nls_table **)&nls->next = tables;
65231+ pax_close_kernel();
65232 tables = nls;
65233 spin_unlock(&nls_lock);
65234 return 0;
65235@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65236
65237 int unregister_nls(struct nls_table * nls)
65238 {
65239- struct nls_table ** tmp = &tables;
65240+ struct nls_table * const * tmp = &tables;
65241
65242 spin_lock(&nls_lock);
65243 while (*tmp) {
65244 if (nls == *tmp) {
65245- *tmp = nls->next;
65246+ pax_open_kernel();
65247+ *(struct nls_table **)tmp = nls->next;
65248+ pax_close_kernel();
65249 spin_unlock(&nls_lock);
65250 return 0;
65251 }
65252@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65253 return -EINVAL;
65254 }
65255
65256-static struct nls_table *find_nls(char *charset)
65257+static struct nls_table *find_nls(const char *charset)
65258 {
65259 struct nls_table *nls;
65260 spin_lock(&nls_lock);
65261@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65262 return nls;
65263 }
65264
65265-struct nls_table *load_nls(char *charset)
65266+struct nls_table *load_nls(const char *charset)
65267 {
65268 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65269 }
65270diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65271index 162b3f1..6076a7c 100644
65272--- a/fs/nls/nls_euc-jp.c
65273+++ b/fs/nls/nls_euc-jp.c
65274@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65275 p_nls = load_nls("cp932");
65276
65277 if (p_nls) {
65278- table.charset2upper = p_nls->charset2upper;
65279- table.charset2lower = p_nls->charset2lower;
65280+ pax_open_kernel();
65281+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65282+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65283+ pax_close_kernel();
65284 return register_nls(&table);
65285 }
65286
65287diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65288index a80a741..7b96e1b 100644
65289--- a/fs/nls/nls_koi8-ru.c
65290+++ b/fs/nls/nls_koi8-ru.c
65291@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65292 p_nls = load_nls("koi8-u");
65293
65294 if (p_nls) {
65295- table.charset2upper = p_nls->charset2upper;
65296- table.charset2lower = p_nls->charset2lower;
65297+ pax_open_kernel();
65298+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65299+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65300+ pax_close_kernel();
65301 return register_nls(&table);
65302 }
65303
65304diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65305index cf27550..6c70f29d 100644
65306--- a/fs/notify/fanotify/fanotify_user.c
65307+++ b/fs/notify/fanotify/fanotify_user.c
65308@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65309
65310 fd = fanotify_event_metadata.fd;
65311 ret = -EFAULT;
65312- if (copy_to_user(buf, &fanotify_event_metadata,
65313- fanotify_event_metadata.event_len))
65314+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65315+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65316 goto out_close_fd;
65317
65318 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65319diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65320index a95d8e0..a91a5fd 100644
65321--- a/fs/notify/notification.c
65322+++ b/fs/notify/notification.c
65323@@ -48,7 +48,7 @@
65324 #include <linux/fsnotify_backend.h>
65325 #include "fsnotify.h"
65326
65327-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65328+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65329
65330 /**
65331 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65332@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65333 */
65334 u32 fsnotify_get_cookie(void)
65335 {
65336- return atomic_inc_return(&fsnotify_sync_cookie);
65337+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65338 }
65339 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65340
65341diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65342index 9e38daf..5727cae 100644
65343--- a/fs/ntfs/dir.c
65344+++ b/fs/ntfs/dir.c
65345@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65346 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65347 ~(s64)(ndir->itype.index.block_size - 1)));
65348 /* Bounds checks. */
65349- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65350+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65351 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65352 "inode 0x%lx or driver bug.", vdir->i_ino);
65353 goto err_out;
65354diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65355index 1da9b2d..9cca092a 100644
65356--- a/fs/ntfs/file.c
65357+++ b/fs/ntfs/file.c
65358@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65359 char *addr;
65360 size_t total = 0;
65361 unsigned len;
65362- int left;
65363+ unsigned left;
65364
65365 do {
65366 len = PAGE_CACHE_SIZE - ofs;
65367diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65368index 9e1e112..241a52a 100644
65369--- a/fs/ntfs/super.c
65370+++ b/fs/ntfs/super.c
65371@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65372 if (!silent)
65373 ntfs_error(sb, "Primary boot sector is invalid.");
65374 } else if (!silent)
65375- ntfs_error(sb, read_err_str, "primary");
65376+ ntfs_error(sb, read_err_str, "%s", "primary");
65377 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65378 if (bh_primary)
65379 brelse(bh_primary);
65380@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65381 goto hotfix_primary_boot_sector;
65382 brelse(bh_backup);
65383 } else if (!silent)
65384- ntfs_error(sb, read_err_str, "backup");
65385+ ntfs_error(sb, read_err_str, "%s", "backup");
65386 /* Try to read NT3.51- backup boot sector. */
65387 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65388 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65389@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65390 "sector.");
65391 brelse(bh_backup);
65392 } else if (!silent)
65393- ntfs_error(sb, read_err_str, "backup");
65394+ ntfs_error(sb, read_err_str, "%s", "backup");
65395 /* We failed. Cleanup and return. */
65396 if (bh_primary)
65397 brelse(bh_primary);
65398diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65399index 0440134..d52c93a 100644
65400--- a/fs/ocfs2/localalloc.c
65401+++ b/fs/ocfs2/localalloc.c
65402@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65403 goto bail;
65404 }
65405
65406- atomic_inc(&osb->alloc_stats.moves);
65407+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65408
65409 bail:
65410 if (handle)
65411diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65412index 460c6c3..b4ef513 100644
65413--- a/fs/ocfs2/ocfs2.h
65414+++ b/fs/ocfs2/ocfs2.h
65415@@ -247,11 +247,11 @@ enum ocfs2_vol_state
65416
65417 struct ocfs2_alloc_stats
65418 {
65419- atomic_t moves;
65420- atomic_t local_data;
65421- atomic_t bitmap_data;
65422- atomic_t bg_allocs;
65423- atomic_t bg_extends;
65424+ atomic_unchecked_t moves;
65425+ atomic_unchecked_t local_data;
65426+ atomic_unchecked_t bitmap_data;
65427+ atomic_unchecked_t bg_allocs;
65428+ atomic_unchecked_t bg_extends;
65429 };
65430
65431 enum ocfs2_local_alloc_state
65432diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
65433index ee541f9..df3a500 100644
65434--- a/fs/ocfs2/refcounttree.c
65435+++ b/fs/ocfs2/refcounttree.c
65436@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
65437 error = posix_acl_create(dir, &mode, &default_acl, &acl);
65438 if (error) {
65439 mlog_errno(error);
65440- goto out;
65441+ return error;
65442 }
65443
65444 error = ocfs2_create_inode_in_orphan(dir, mode,
65445diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65446index 0cb889a..6a26b24 100644
65447--- a/fs/ocfs2/suballoc.c
65448+++ b/fs/ocfs2/suballoc.c
65449@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65450 mlog_errno(status);
65451 goto bail;
65452 }
65453- atomic_inc(&osb->alloc_stats.bg_extends);
65454+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65455
65456 /* You should never ask for this much metadata */
65457 BUG_ON(bits_wanted >
65458@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65459 mlog_errno(status);
65460 goto bail;
65461 }
65462- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65463+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65464
65465 *suballoc_loc = res.sr_bg_blkno;
65466 *suballoc_bit_start = res.sr_bit_offset;
65467@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65468 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65469 res->sr_bits);
65470
65471- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65472+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65473
65474 BUG_ON(res->sr_bits != 1);
65475
65476@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65477 mlog_errno(status);
65478 goto bail;
65479 }
65480- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65481+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65482
65483 BUG_ON(res.sr_bits != 1);
65484
65485@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65486 cluster_start,
65487 num_clusters);
65488 if (!status)
65489- atomic_inc(&osb->alloc_stats.local_data);
65490+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65491 } else {
65492 if (min_clusters > (osb->bitmap_cpg - 1)) {
65493 /* The only paths asking for contiguousness
65494@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65495 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65496 res.sr_bg_blkno,
65497 res.sr_bit_offset);
65498- atomic_inc(&osb->alloc_stats.bitmap_data);
65499+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65500 *num_clusters = res.sr_bits;
65501 }
65502 }
65503diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65504index 2667518..24bcf79 100644
65505--- a/fs/ocfs2/super.c
65506+++ b/fs/ocfs2/super.c
65507@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65508 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65509 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65510 "Stats",
65511- atomic_read(&osb->alloc_stats.bitmap_data),
65512- atomic_read(&osb->alloc_stats.local_data),
65513- atomic_read(&osb->alloc_stats.bg_allocs),
65514- atomic_read(&osb->alloc_stats.moves),
65515- atomic_read(&osb->alloc_stats.bg_extends));
65516+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65517+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65518+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65519+ atomic_read_unchecked(&osb->alloc_stats.moves),
65520+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65521
65522 out += snprintf(buf + out, len - out,
65523 "%10s => State: %u Descriptor: %llu Size: %u bits "
65524@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65525
65526 mutex_init(&osb->system_file_mutex);
65527
65528- atomic_set(&osb->alloc_stats.moves, 0);
65529- atomic_set(&osb->alloc_stats.local_data, 0);
65530- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65531- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65532- atomic_set(&osb->alloc_stats.bg_extends, 0);
65533+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65534+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65535+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65536+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65537+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65538
65539 /* Copy the blockcheck stats from the superblock probe */
65540 osb->osb_ecc_stats = *stats;
65541diff --git a/fs/open.c b/fs/open.c
65542index 33f9cbf..8abe053 100644
65543--- a/fs/open.c
65544+++ b/fs/open.c
65545@@ -32,6 +32,8 @@
65546 #include <linux/dnotify.h>
65547 #include <linux/compat.h>
65548
65549+#define CREATE_TRACE_POINTS
65550+#include <trace/events/fs.h>
65551 #include "internal.h"
65552
65553 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65554@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65555 error = locks_verify_truncate(inode, NULL, length);
65556 if (!error)
65557 error = security_path_truncate(path);
65558+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65559+ error = -EACCES;
65560 if (!error)
65561 error = do_truncate(path->dentry, length, 0, NULL);
65562
65563@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65564 error = locks_verify_truncate(inode, f.file, length);
65565 if (!error)
65566 error = security_path_truncate(&f.file->f_path);
65567+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65568+ error = -EACCES;
65569 if (!error)
65570 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65571 sb_end_write(inode->i_sb);
65572@@ -392,6 +398,9 @@ retry:
65573 if (__mnt_is_readonly(path.mnt))
65574 res = -EROFS;
65575
65576+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65577+ res = -EACCES;
65578+
65579 out_path_release:
65580 path_put(&path);
65581 if (retry_estale(res, lookup_flags)) {
65582@@ -423,6 +432,8 @@ retry:
65583 if (error)
65584 goto dput_and_out;
65585
65586+ gr_log_chdir(path.dentry, path.mnt);
65587+
65588 set_fs_pwd(current->fs, &path);
65589
65590 dput_and_out:
65591@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65592 goto out_putf;
65593
65594 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65595+
65596+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65597+ error = -EPERM;
65598+
65599+ if (!error)
65600+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65601+
65602 if (!error)
65603 set_fs_pwd(current->fs, &f.file->f_path);
65604 out_putf:
65605@@ -481,7 +499,13 @@ retry:
65606 if (error)
65607 goto dput_and_out;
65608
65609+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65610+ goto dput_and_out;
65611+
65612 set_fs_root(current->fs, &path);
65613+
65614+ gr_handle_chroot_chdir(&path);
65615+
65616 error = 0;
65617 dput_and_out:
65618 path_put(&path);
65619@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65620 return error;
65621 retry_deleg:
65622 mutex_lock(&inode->i_mutex);
65623+
65624+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65625+ error = -EACCES;
65626+ goto out_unlock;
65627+ }
65628+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65629+ error = -EACCES;
65630+ goto out_unlock;
65631+ }
65632+
65633 error = security_path_chmod(path, mode);
65634 if (error)
65635 goto out_unlock;
65636@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65637 uid = make_kuid(current_user_ns(), user);
65638 gid = make_kgid(current_user_ns(), group);
65639
65640+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65641+ return -EACCES;
65642+
65643 newattrs.ia_valid = ATTR_CTIME;
65644 if (user != (uid_t) -1) {
65645 if (!uid_valid(uid))
65646@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65647 } else {
65648 fsnotify_open(f);
65649 fd_install(fd, f);
65650+ trace_do_sys_open(tmp->name, flags, mode);
65651 }
65652 }
65653 putname(tmp);
65654diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
65655index 5f0d199..13b74b9 100644
65656--- a/fs/overlayfs/super.c
65657+++ b/fs/overlayfs/super.c
65658@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
65659 {
65660 struct ovl_entry *oe = dentry->d_fsdata;
65661
65662- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
65663+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
65664 }
65665
65666 int ovl_want_write(struct dentry *dentry)
65667@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
65668
65669 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
65670 {
65671- struct path upperpath = { NULL, NULL };
65672- struct path workpath = { NULL, NULL };
65673+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
65674+ struct path workpath = { .dentry = NULL, .mnt = NULL };
65675 struct dentry *root_dentry;
65676 struct ovl_entry *oe;
65677 struct ovl_fs *ufs;
65678diff --git a/fs/pipe.c b/fs/pipe.c
65679index 21981e5..3d5f55c 100644
65680--- a/fs/pipe.c
65681+++ b/fs/pipe.c
65682@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65683
65684 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65685 {
65686- if (pipe->files)
65687+ if (atomic_read(&pipe->files))
65688 mutex_lock_nested(&pipe->mutex, subclass);
65689 }
65690
65691@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65692
65693 void pipe_unlock(struct pipe_inode_info *pipe)
65694 {
65695- if (pipe->files)
65696+ if (atomic_read(&pipe->files))
65697 mutex_unlock(&pipe->mutex);
65698 }
65699 EXPORT_SYMBOL(pipe_unlock);
65700@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65701 }
65702 if (bufs) /* More to do? */
65703 continue;
65704- if (!pipe->writers)
65705+ if (!atomic_read(&pipe->writers))
65706 break;
65707- if (!pipe->waiting_writers) {
65708+ if (!atomic_read(&pipe->waiting_writers)) {
65709 /* syscall merging: Usually we must not sleep
65710 * if O_NONBLOCK is set, or if we got some data.
65711 * But if a writer sleeps in kernel space, then
65712@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65713
65714 __pipe_lock(pipe);
65715
65716- if (!pipe->readers) {
65717+ if (!atomic_read(&pipe->readers)) {
65718 send_sig(SIGPIPE, current, 0);
65719 ret = -EPIPE;
65720 goto out;
65721@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65722 for (;;) {
65723 int bufs;
65724
65725- if (!pipe->readers) {
65726+ if (!atomic_read(&pipe->readers)) {
65727 send_sig(SIGPIPE, current, 0);
65728 if (!ret)
65729 ret = -EPIPE;
65730@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65731 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65732 do_wakeup = 0;
65733 }
65734- pipe->waiting_writers++;
65735+ atomic_inc(&pipe->waiting_writers);
65736 pipe_wait(pipe);
65737- pipe->waiting_writers--;
65738+ atomic_dec(&pipe->waiting_writers);
65739 }
65740 out:
65741 __pipe_unlock(pipe);
65742@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65743 mask = 0;
65744 if (filp->f_mode & FMODE_READ) {
65745 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65746- if (!pipe->writers && filp->f_version != pipe->w_counter)
65747+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65748 mask |= POLLHUP;
65749 }
65750
65751@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65752 * Most Unices do not set POLLERR for FIFOs but on Linux they
65753 * behave exactly like pipes for poll().
65754 */
65755- if (!pipe->readers)
65756+ if (!atomic_read(&pipe->readers))
65757 mask |= POLLERR;
65758 }
65759
65760@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65761 int kill = 0;
65762
65763 spin_lock(&inode->i_lock);
65764- if (!--pipe->files) {
65765+ if (atomic_dec_and_test(&pipe->files)) {
65766 inode->i_pipe = NULL;
65767 kill = 1;
65768 }
65769@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65770
65771 __pipe_lock(pipe);
65772 if (file->f_mode & FMODE_READ)
65773- pipe->readers--;
65774+ atomic_dec(&pipe->readers);
65775 if (file->f_mode & FMODE_WRITE)
65776- pipe->writers--;
65777+ atomic_dec(&pipe->writers);
65778
65779- if (pipe->readers || pipe->writers) {
65780+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65781 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65782 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65783 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65784@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65785 kfree(pipe);
65786 }
65787
65788-static struct vfsmount *pipe_mnt __read_mostly;
65789+struct vfsmount *pipe_mnt __read_mostly;
65790
65791 /*
65792 * pipefs_dname() is called from d_path().
65793@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65794 goto fail_iput;
65795
65796 inode->i_pipe = pipe;
65797- pipe->files = 2;
65798- pipe->readers = pipe->writers = 1;
65799+ atomic_set(&pipe->files, 2);
65800+ atomic_set(&pipe->readers, 1);
65801+ atomic_set(&pipe->writers, 1);
65802 inode->i_fop = &pipefifo_fops;
65803
65804 /*
65805@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65806 spin_lock(&inode->i_lock);
65807 if (inode->i_pipe) {
65808 pipe = inode->i_pipe;
65809- pipe->files++;
65810+ atomic_inc(&pipe->files);
65811 spin_unlock(&inode->i_lock);
65812 } else {
65813 spin_unlock(&inode->i_lock);
65814 pipe = alloc_pipe_info();
65815 if (!pipe)
65816 return -ENOMEM;
65817- pipe->files = 1;
65818+ atomic_set(&pipe->files, 1);
65819 spin_lock(&inode->i_lock);
65820 if (unlikely(inode->i_pipe)) {
65821- inode->i_pipe->files++;
65822+ atomic_inc(&inode->i_pipe->files);
65823 spin_unlock(&inode->i_lock);
65824 free_pipe_info(pipe);
65825 pipe = inode->i_pipe;
65826@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65827 * opened, even when there is no process writing the FIFO.
65828 */
65829 pipe->r_counter++;
65830- if (pipe->readers++ == 0)
65831+ if (atomic_inc_return(&pipe->readers) == 1)
65832 wake_up_partner(pipe);
65833
65834- if (!is_pipe && !pipe->writers) {
65835+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65836 if ((filp->f_flags & O_NONBLOCK)) {
65837 /* suppress POLLHUP until we have
65838 * seen a writer */
65839@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65840 * errno=ENXIO when there is no process reading the FIFO.
65841 */
65842 ret = -ENXIO;
65843- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65844+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65845 goto err;
65846
65847 pipe->w_counter++;
65848- if (!pipe->writers++)
65849+ if (atomic_inc_return(&pipe->writers) == 1)
65850 wake_up_partner(pipe);
65851
65852- if (!is_pipe && !pipe->readers) {
65853+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65854 if (wait_for_partner(pipe, &pipe->r_counter))
65855 goto err_wr;
65856 }
65857@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65858 * the process can at least talk to itself.
65859 */
65860
65861- pipe->readers++;
65862- pipe->writers++;
65863+ atomic_inc(&pipe->readers);
65864+ atomic_inc(&pipe->writers);
65865 pipe->r_counter++;
65866 pipe->w_counter++;
65867- if (pipe->readers == 1 || pipe->writers == 1)
65868+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65869 wake_up_partner(pipe);
65870 break;
65871
65872@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65873 return 0;
65874
65875 err_rd:
65876- if (!--pipe->readers)
65877+ if (atomic_dec_and_test(&pipe->readers))
65878 wake_up_interruptible(&pipe->wait);
65879 ret = -ERESTARTSYS;
65880 goto err;
65881
65882 err_wr:
65883- if (!--pipe->writers)
65884+ if (atomic_dec_and_test(&pipe->writers))
65885 wake_up_interruptible(&pipe->wait);
65886 ret = -ERESTARTSYS;
65887 goto err;
65888diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65889index 3a48bb7..403067b 100644
65890--- a/fs/posix_acl.c
65891+++ b/fs/posix_acl.c
65892@@ -20,6 +20,7 @@
65893 #include <linux/xattr.h>
65894 #include <linux/export.h>
65895 #include <linux/user_namespace.h>
65896+#include <linux/grsecurity.h>
65897
65898 struct posix_acl **acl_by_type(struct inode *inode, int type)
65899 {
65900@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65901 }
65902 }
65903 if (mode_p)
65904- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65905+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65906 return not_equiv;
65907 }
65908 EXPORT_SYMBOL(posix_acl_equiv_mode);
65909@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65910 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65911 }
65912
65913- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65914+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65915 return not_equiv;
65916 }
65917
65918@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65919 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65920 int err = -ENOMEM;
65921 if (clone) {
65922+ *mode_p &= ~gr_acl_umask();
65923+
65924 err = posix_acl_create_masq(clone, mode_p);
65925 if (err < 0) {
65926 posix_acl_release(clone);
65927@@ -663,11 +666,12 @@ struct posix_acl *
65928 posix_acl_from_xattr(struct user_namespace *user_ns,
65929 const void *value, size_t size)
65930 {
65931- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65932- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65933+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65934+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65935 int count;
65936 struct posix_acl *acl;
65937 struct posix_acl_entry *acl_e;
65938+ umode_t umask = gr_acl_umask();
65939
65940 if (!value)
65941 return NULL;
65942@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65943
65944 switch(acl_e->e_tag) {
65945 case ACL_USER_OBJ:
65946+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65947+ break;
65948 case ACL_GROUP_OBJ:
65949 case ACL_MASK:
65950+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65951+ break;
65952 case ACL_OTHER:
65953+ acl_e->e_perm &= ~(umask & S_IRWXO);
65954 break;
65955
65956 case ACL_USER:
65957+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65958 acl_e->e_uid =
65959 make_kuid(user_ns,
65960 le32_to_cpu(entry->e_id));
65961@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65962 goto fail;
65963 break;
65964 case ACL_GROUP:
65965+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65966 acl_e->e_gid =
65967 make_kgid(user_ns,
65968 le32_to_cpu(entry->e_id));
65969diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65970index 2183fcf..3c32a98 100644
65971--- a/fs/proc/Kconfig
65972+++ b/fs/proc/Kconfig
65973@@ -30,7 +30,7 @@ config PROC_FS
65974
65975 config PROC_KCORE
65976 bool "/proc/kcore support" if !ARM
65977- depends on PROC_FS && MMU
65978+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65979 help
65980 Provides a virtual ELF core file of the live kernel. This can
65981 be read with gdb and other ELF tools. No modifications can be
65982@@ -38,8 +38,8 @@ config PROC_KCORE
65983
65984 config PROC_VMCORE
65985 bool "/proc/vmcore support"
65986- depends on PROC_FS && CRASH_DUMP
65987- default y
65988+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65989+ default n
65990 help
65991 Exports the dump image of crashed kernel in ELF format.
65992
65993@@ -63,8 +63,8 @@ config PROC_SYSCTL
65994 limited in memory.
65995
65996 config PROC_PAGE_MONITOR
65997- default y
65998- depends on PROC_FS && MMU
65999+ default n
66000+ depends on PROC_FS && MMU && !GRKERNSEC
66001 bool "Enable /proc page monitoring" if EXPERT
66002 help
66003 Various /proc files exist to monitor process memory utilization:
66004diff --git a/fs/proc/array.c b/fs/proc/array.c
66005index 1295a00..4c91a6b 100644
66006--- a/fs/proc/array.c
66007+++ b/fs/proc/array.c
66008@@ -60,6 +60,7 @@
66009 #include <linux/tty.h>
66010 #include <linux/string.h>
66011 #include <linux/mman.h>
66012+#include <linux/grsecurity.h>
66013 #include <linux/proc_fs.h>
66014 #include <linux/ioport.h>
66015 #include <linux/uaccess.h>
66016@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66017 cpumask_pr_args(&task->cpus_allowed));
66018 }
66019
66020+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66021+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66022+{
66023+ if (p->mm)
66024+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66025+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66026+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66027+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66028+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66029+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66030+ else
66031+ seq_printf(m, "PaX:\t-----\n");
66032+}
66033+#endif
66034+
66035 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66036 struct pid *pid, struct task_struct *task)
66037 {
66038@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66039 task_cpus_allowed(m, task);
66040 cpuset_task_status_allowed(m, task);
66041 task_context_switch_counts(m, task);
66042+
66043+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66044+ task_pax(m, task);
66045+#endif
66046+
66047+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66048+ task_grsec_rbac(m, task);
66049+#endif
66050+
66051 return 0;
66052 }
66053
66054+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66055+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66056+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66057+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66058+#endif
66059+
66060 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66061 struct pid *pid, struct task_struct *task, int whole)
66062 {
66063@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66064 char tcomm[sizeof(task->comm)];
66065 unsigned long flags;
66066
66067+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66068+ if (current->exec_id != m->exec_id) {
66069+ gr_log_badprocpid("stat");
66070+ return 0;
66071+ }
66072+#endif
66073+
66074 state = *get_task_state(task);
66075 vsize = eip = esp = 0;
66076 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66077@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66078 gtime = task_gtime(task);
66079 }
66080
66081+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66082+ if (PAX_RAND_FLAGS(mm)) {
66083+ eip = 0;
66084+ esp = 0;
66085+ wchan = 0;
66086+ }
66087+#endif
66088+#ifdef CONFIG_GRKERNSEC_HIDESYM
66089+ wchan = 0;
66090+ eip =0;
66091+ esp =0;
66092+#endif
66093+
66094 /* scale priority and nice values from timeslices to -20..20 */
66095 /* to make it look like a "normal" Unix priority/nice value */
66096 priority = task_prio(task);
66097@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66098 seq_put_decimal_ull(m, ' ', vsize);
66099 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66100 seq_put_decimal_ull(m, ' ', rsslim);
66101+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66102+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66103+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66104+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66105+#else
66106 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66107 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66108 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66109+#endif
66110 seq_put_decimal_ull(m, ' ', esp);
66111 seq_put_decimal_ull(m, ' ', eip);
66112 /* The signal information here is obsolete.
66113@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66114 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66115 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66116
66117- if (mm && permitted) {
66118+ if (mm && permitted
66119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66120+ && !PAX_RAND_FLAGS(mm)
66121+#endif
66122+ ) {
66123 seq_put_decimal_ull(m, ' ', mm->start_data);
66124 seq_put_decimal_ull(m, ' ', mm->end_data);
66125 seq_put_decimal_ull(m, ' ', mm->start_brk);
66126@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66127 struct pid *pid, struct task_struct *task)
66128 {
66129 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66130- struct mm_struct *mm = get_task_mm(task);
66131+ struct mm_struct *mm;
66132
66133+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66134+ if (current->exec_id != m->exec_id) {
66135+ gr_log_badprocpid("statm");
66136+ return 0;
66137+ }
66138+#endif
66139+ mm = get_task_mm(task);
66140 if (mm) {
66141 size = task_statm(mm, &shared, &text, &data, &resident);
66142 mmput(mm);
66143@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66144 return 0;
66145 }
66146
66147+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66148+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
66149+{
66150+ unsigned long flags;
66151+ u32 curr_ip = 0;
66152+
66153+ if (lock_task_sighand(task, &flags)) {
66154+ curr_ip = task->signal->curr_ip;
66155+ unlock_task_sighand(task, &flags);
66156+ }
66157+ return seq_printf(m, "%pI4\n", &curr_ip);
66158+}
66159+#endif
66160+
66161 #ifdef CONFIG_CHECKPOINT_RESTORE
66162 static struct pid *
66163 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66164diff --git a/fs/proc/base.c b/fs/proc/base.c
66165index 3f3d7ae..68de109 100644
66166--- a/fs/proc/base.c
66167+++ b/fs/proc/base.c
66168@@ -113,6 +113,14 @@ struct pid_entry {
66169 union proc_op op;
66170 };
66171
66172+struct getdents_callback {
66173+ struct linux_dirent __user * current_dir;
66174+ struct linux_dirent __user * previous;
66175+ struct file * file;
66176+ int count;
66177+ int error;
66178+};
66179+
66180 #define NOD(NAME, MODE, IOP, FOP, OP) { \
66181 .name = (NAME), \
66182 .len = sizeof(NAME) - 1, \
66183@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
66184 return 0;
66185 }
66186
66187+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66188+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66189+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66190+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66191+#endif
66192+
66193 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66194 struct pid *pid, struct task_struct *task)
66195 {
66196 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66197 if (mm && !IS_ERR(mm)) {
66198 unsigned int nwords = 0;
66199+
66200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66201+ /* allow if we're currently ptracing this task */
66202+ if (PAX_RAND_FLAGS(mm) &&
66203+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
66204+ mmput(mm);
66205+ return 0;
66206+ }
66207+#endif
66208+
66209 do {
66210 nwords += 2;
66211 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
66212@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66213 }
66214
66215
66216-#ifdef CONFIG_KALLSYMS
66217+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66218 /*
66219 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
66220 * Returns the resolved symbol. If that fails, simply return the address.
66221@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
66222 mutex_unlock(&task->signal->cred_guard_mutex);
66223 }
66224
66225-#ifdef CONFIG_STACKTRACE
66226+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66227
66228 #define MAX_STACK_TRACE_DEPTH 64
66229
66230@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
66231 return 0;
66232 }
66233
66234-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66235+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66236 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66237 struct pid *pid, struct task_struct *task)
66238 {
66239@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66240 /************************************************************************/
66241
66242 /* permission checks */
66243-static int proc_fd_access_allowed(struct inode *inode)
66244+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66245 {
66246 struct task_struct *task;
66247 int allowed = 0;
66248@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66249 */
66250 task = get_proc_task(inode);
66251 if (task) {
66252- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66253+ if (log)
66254+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66255+ else
66256+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66257 put_task_struct(task);
66258 }
66259 return allowed;
66260@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66261 struct task_struct *task,
66262 int hide_pid_min)
66263 {
66264+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66265+ return false;
66266+
66267+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66268+ rcu_read_lock();
66269+ {
66270+ const struct cred *tmpcred = current_cred();
66271+ const struct cred *cred = __task_cred(task);
66272+
66273+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66274+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66275+ || in_group_p(grsec_proc_gid)
66276+#endif
66277+ ) {
66278+ rcu_read_unlock();
66279+ return true;
66280+ }
66281+ }
66282+ rcu_read_unlock();
66283+
66284+ if (!pid->hide_pid)
66285+ return false;
66286+#endif
66287+
66288 if (pid->hide_pid < hide_pid_min)
66289 return true;
66290 if (in_group_p(pid->pid_gid))
66291 return true;
66292+
66293 return ptrace_may_access(task, PTRACE_MODE_READ);
66294 }
66295
66296@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66297 put_task_struct(task);
66298
66299 if (!has_perms) {
66300+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66301+ {
66302+#else
66303 if (pid->hide_pid == 2) {
66304+#endif
66305 /*
66306 * Let's make getdents(), stat(), and open()
66307 * consistent with each other. If a process
66308@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
66309
66310 if (task) {
66311 mm = mm_access(task, mode);
66312+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
66313+ mmput(mm);
66314+ mm = ERR_PTR(-EPERM);
66315+ }
66316 put_task_struct(task);
66317
66318 if (!IS_ERR_OR_NULL(mm)) {
66319@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66320 return PTR_ERR(mm);
66321
66322 file->private_data = mm;
66323+
66324+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66325+ file->f_version = current->exec_id;
66326+#endif
66327+
66328 return 0;
66329 }
66330
66331@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66332 ssize_t copied;
66333 char *page;
66334
66335+#ifdef CONFIG_GRKERNSEC
66336+ if (write)
66337+ return -EPERM;
66338+#endif
66339+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66340+ if (file->f_version != current->exec_id) {
66341+ gr_log_badprocpid("mem");
66342+ return 0;
66343+ }
66344+#endif
66345+
66346 if (!mm)
66347 return 0;
66348
66349@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66350 goto free;
66351
66352 while (count > 0) {
66353- int this_len = min_t(int, count, PAGE_SIZE);
66354+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66355
66356 if (write && copy_from_user(page, buf, this_len)) {
66357 copied = -EFAULT;
66358@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66359 if (!mm)
66360 return 0;
66361
66362+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66363+ if (file->f_version != current->exec_id) {
66364+ gr_log_badprocpid("environ");
66365+ return 0;
66366+ }
66367+#endif
66368+
66369 page = (char *)__get_free_page(GFP_TEMPORARY);
66370 if (!page)
66371 return -ENOMEM;
66372@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66373 goto free;
66374 while (count > 0) {
66375 size_t this_len, max_len;
66376- int retval;
66377+ ssize_t retval;
66378
66379 if (src >= (mm->env_end - mm->env_start))
66380 break;
66381@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66382 int error = -EACCES;
66383
66384 /* Are we allowed to snoop on the tasks file descriptors? */
66385- if (!proc_fd_access_allowed(inode))
66386+ if (!proc_fd_access_allowed(inode, 0))
66387 goto out;
66388
66389 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66390@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66391 struct path path;
66392
66393 /* Are we allowed to snoop on the tasks file descriptors? */
66394- if (!proc_fd_access_allowed(inode))
66395- goto out;
66396+ /* logging this is needed for learning on chromium to work properly,
66397+ but we don't want to flood the logs from 'ps' which does a readlink
66398+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66399+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66400+ */
66401+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66402+ if (!proc_fd_access_allowed(inode,0))
66403+ goto out;
66404+ } else {
66405+ if (!proc_fd_access_allowed(inode,1))
66406+ goto out;
66407+ }
66408
66409 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66410 if (error)
66411@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66412 rcu_read_lock();
66413 cred = __task_cred(task);
66414 inode->i_uid = cred->euid;
66415+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66416+ inode->i_gid = grsec_proc_gid;
66417+#else
66418 inode->i_gid = cred->egid;
66419+#endif
66420 rcu_read_unlock();
66421 }
66422 security_task_to_inode(task, inode);
66423@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66424 return -ENOENT;
66425 }
66426 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66427+#ifdef CONFIG_GRKERNSEC_PROC_USER
66428+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66429+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66430+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66431+#endif
66432 task_dumpable(task)) {
66433 cred = __task_cred(task);
66434 stat->uid = cred->euid;
66435+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66436+ stat->gid = grsec_proc_gid;
66437+#else
66438 stat->gid = cred->egid;
66439+#endif
66440 }
66441 }
66442 rcu_read_unlock();
66443@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66444
66445 if (task) {
66446 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66447+#ifdef CONFIG_GRKERNSEC_PROC_USER
66448+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66449+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66450+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66451+#endif
66452 task_dumpable(task)) {
66453 rcu_read_lock();
66454 cred = __task_cred(task);
66455 inode->i_uid = cred->euid;
66456+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66457+ inode->i_gid = grsec_proc_gid;
66458+#else
66459 inode->i_gid = cred->egid;
66460+#endif
66461 rcu_read_unlock();
66462 } else {
66463 inode->i_uid = GLOBAL_ROOT_UID;
66464@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66465 if (!task)
66466 goto out_no_task;
66467
66468+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66469+ goto out;
66470+
66471 /*
66472 * Yes, it does not scale. And it should not. Don't add
66473 * new entries into /proc/<tgid>/ without very good reasons.
66474@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66475 if (!task)
66476 return -ENOENT;
66477
66478+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66479+ goto out;
66480+
66481 if (!dir_emit_dots(file, ctx))
66482 goto out;
66483
66484@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66485 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66486 #endif
66487 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66488-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66489+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66490 ONE("syscall", S_IRUSR, proc_pid_syscall),
66491 #endif
66492 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66493@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66494 #ifdef CONFIG_SECURITY
66495 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66496 #endif
66497-#ifdef CONFIG_KALLSYMS
66498+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66499 ONE("wchan", S_IRUGO, proc_pid_wchan),
66500 #endif
66501-#ifdef CONFIG_STACKTRACE
66502+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66503 ONE("stack", S_IRUSR, proc_pid_stack),
66504 #endif
66505 #ifdef CONFIG_SCHEDSTATS
66506@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66507 #ifdef CONFIG_HARDWALL
66508 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66509 #endif
66510+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66511+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66512+#endif
66513 #ifdef CONFIG_USER_NS
66514 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66515 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66516@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
66517 if (!inode)
66518 goto out;
66519
66520+#ifdef CONFIG_GRKERNSEC_PROC_USER
66521+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66522+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66523+ inode->i_gid = grsec_proc_gid;
66524+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66525+#else
66526 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66527+#endif
66528 inode->i_op = &proc_tgid_base_inode_operations;
66529 inode->i_fop = &proc_tgid_base_operations;
66530 inode->i_flags|=S_IMMUTABLE;
66531@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66532 if (!task)
66533 goto out;
66534
66535+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66536+ goto out_put_task;
66537+
66538 result = proc_pid_instantiate(dir, dentry, task, NULL);
66539+out_put_task:
66540 put_task_struct(task);
66541 out:
66542 return ERR_PTR(result);
66543@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66544 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66545 #endif
66546 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66547-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66548+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66549 ONE("syscall", S_IRUSR, proc_pid_syscall),
66550 #endif
66551 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66552@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66553 #ifdef CONFIG_SECURITY
66554 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66555 #endif
66556-#ifdef CONFIG_KALLSYMS
66557+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66558 ONE("wchan", S_IRUGO, proc_pid_wchan),
66559 #endif
66560-#ifdef CONFIG_STACKTRACE
66561+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66562 ONE("stack", S_IRUSR, proc_pid_stack),
66563 #endif
66564 #ifdef CONFIG_SCHEDSTATS
66565diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66566index cbd82df..c0407d2 100644
66567--- a/fs/proc/cmdline.c
66568+++ b/fs/proc/cmdline.c
66569@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66570
66571 static int __init proc_cmdline_init(void)
66572 {
66573+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66574+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66575+#else
66576 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66577+#endif
66578 return 0;
66579 }
66580 fs_initcall(proc_cmdline_init);
66581diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66582index 50493ed..248166b 100644
66583--- a/fs/proc/devices.c
66584+++ b/fs/proc/devices.c
66585@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66586
66587 static int __init proc_devices_init(void)
66588 {
66589+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66590+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66591+#else
66592 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66593+#endif
66594 return 0;
66595 }
66596 fs_initcall(proc_devices_init);
66597diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66598index 8e5ad83..1f07a8c 100644
66599--- a/fs/proc/fd.c
66600+++ b/fs/proc/fd.c
66601@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66602 if (!task)
66603 return -ENOENT;
66604
66605- files = get_files_struct(task);
66606+ if (!gr_acl_handle_procpidmem(task))
66607+ files = get_files_struct(task);
66608 put_task_struct(task);
66609
66610 if (files) {
66611@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66612 */
66613 int proc_fd_permission(struct inode *inode, int mask)
66614 {
66615+ struct task_struct *task;
66616 int rv = generic_permission(inode, mask);
66617- if (rv == 0)
66618- return 0;
66619+
66620 if (task_tgid(current) == proc_pid(inode))
66621 rv = 0;
66622+
66623+ task = get_proc_task(inode);
66624+ if (task == NULL)
66625+ return rv;
66626+
66627+ if (gr_acl_handle_procpidmem(task))
66628+ rv = -EACCES;
66629+
66630+ put_task_struct(task);
66631+
66632 return rv;
66633 }
66634
66635diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66636index be65b20..2998ba8 100644
66637--- a/fs/proc/generic.c
66638+++ b/fs/proc/generic.c
66639@@ -22,6 +22,7 @@
66640 #include <linux/bitops.h>
66641 #include <linux/spinlock.h>
66642 #include <linux/completion.h>
66643+#include <linux/grsecurity.h>
66644 #include <asm/uaccess.h>
66645
66646 #include "internal.h"
66647@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66648 return proc_lookup_de(PDE(dir), dir, dentry);
66649 }
66650
66651+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66652+ unsigned int flags)
66653+{
66654+ if (gr_proc_is_restricted())
66655+ return ERR_PTR(-EACCES);
66656+
66657+ return proc_lookup_de(PDE(dir), dir, dentry);
66658+}
66659+
66660 /*
66661 * This returns non-zero if at EOF, so that the /proc
66662 * root directory can use this and check if it should
66663@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66664 return proc_readdir_de(PDE(inode), file, ctx);
66665 }
66666
66667+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66668+{
66669+ struct inode *inode = file_inode(file);
66670+
66671+ if (gr_proc_is_restricted())
66672+ return -EACCES;
66673+
66674+ return proc_readdir_de(PDE(inode), file, ctx);
66675+}
66676+
66677 /*
66678 * These are the generic /proc directory operations. They
66679 * use the in-memory "struct proc_dir_entry" tree to parse
66680@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66681 .iterate = proc_readdir,
66682 };
66683
66684+static const struct file_operations proc_dir_restricted_operations = {
66685+ .llseek = generic_file_llseek,
66686+ .read = generic_read_dir,
66687+ .iterate = proc_readdir_restrict,
66688+};
66689+
66690 /*
66691 * proc directories can do almost nothing..
66692 */
66693@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66694 .setattr = proc_notify_change,
66695 };
66696
66697+static const struct inode_operations proc_dir_restricted_inode_operations = {
66698+ .lookup = proc_lookup_restrict,
66699+ .getattr = proc_getattr,
66700+ .setattr = proc_notify_change,
66701+};
66702+
66703 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66704 {
66705 int ret;
66706@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66707 }
66708 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66709
66710+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66711+ struct proc_dir_entry *parent, void *data)
66712+{
66713+ struct proc_dir_entry *ent;
66714+
66715+ if (mode == 0)
66716+ mode = S_IRUGO | S_IXUGO;
66717+
66718+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66719+ if (ent) {
66720+ ent->data = data;
66721+ ent->restricted = 1;
66722+ ent->proc_fops = &proc_dir_restricted_operations;
66723+ ent->proc_iops = &proc_dir_restricted_inode_operations;
66724+ parent->nlink++;
66725+ if (proc_register(parent, ent) < 0) {
66726+ kfree(ent);
66727+ parent->nlink--;
66728+ ent = NULL;
66729+ }
66730+ }
66731+ return ent;
66732+}
66733+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66734+
66735 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66736 struct proc_dir_entry *parent)
66737 {
66738@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66739 }
66740 EXPORT_SYMBOL(proc_mkdir);
66741
66742+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66743+ struct proc_dir_entry *parent)
66744+{
66745+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66746+}
66747+EXPORT_SYMBOL(proc_mkdir_restrict);
66748+
66749 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66750 struct proc_dir_entry *parent,
66751 const struct file_operations *proc_fops,
66752diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66753index 7697b66..8d8e541 100644
66754--- a/fs/proc/inode.c
66755+++ b/fs/proc/inode.c
66756@@ -24,11 +24,17 @@
66757 #include <linux/mount.h>
66758 #include <linux/magic.h>
66759 #include <linux/namei.h>
66760+#include <linux/grsecurity.h>
66761
66762 #include <asm/uaccess.h>
66763
66764 #include "internal.h"
66765
66766+#ifdef CONFIG_PROC_SYSCTL
66767+extern const struct inode_operations proc_sys_inode_operations;
66768+extern const struct inode_operations proc_sys_dir_operations;
66769+#endif
66770+
66771 static void proc_evict_inode(struct inode *inode)
66772 {
66773 struct proc_dir_entry *de;
66774@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66775 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66776 sysctl_head_put(head);
66777 }
66778+
66779+#ifdef CONFIG_PROC_SYSCTL
66780+ if (inode->i_op == &proc_sys_inode_operations ||
66781+ inode->i_op == &proc_sys_dir_operations)
66782+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66783+#endif
66784+
66785 }
66786
66787 static struct kmem_cache * proc_inode_cachep;
66788@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66789 if (de->mode) {
66790 inode->i_mode = de->mode;
66791 inode->i_uid = de->uid;
66792+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66793+ inode->i_gid = grsec_proc_gid;
66794+#else
66795 inode->i_gid = de->gid;
66796+#endif
66797 }
66798 if (de->size)
66799 inode->i_size = de->size;
66800diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66801index c835b94..c9e01a3 100644
66802--- a/fs/proc/internal.h
66803+++ b/fs/proc/internal.h
66804@@ -47,9 +47,10 @@ struct proc_dir_entry {
66805 struct completion *pde_unload_completion;
66806 struct list_head pde_openers; /* who did ->open, but not ->release */
66807 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66808+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66809 u8 namelen;
66810 char name[];
66811-};
66812+} __randomize_layout;
66813
66814 union proc_op {
66815 int (*proc_get_link)(struct dentry *, struct path *);
66816@@ -67,7 +68,7 @@ struct proc_inode {
66817 struct ctl_table *sysctl_entry;
66818 const struct proc_ns_operations *ns_ops;
66819 struct inode vfs_inode;
66820-};
66821+} __randomize_layout;
66822
66823 /*
66824 * General functions
66825@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66826 struct pid *, struct task_struct *);
66827 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66828 struct pid *, struct task_struct *);
66829+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66830+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66831+ struct pid *, struct task_struct *);
66832+#endif
66833
66834 /*
66835 * base.c
66836@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66837 * generic.c
66838 */
66839 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66840+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66841 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66842 struct dentry *);
66843 extern int proc_readdir(struct file *, struct dir_context *);
66844+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66845 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66846
66847 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66848diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66849index a352d57..cb94a5c 100644
66850--- a/fs/proc/interrupts.c
66851+++ b/fs/proc/interrupts.c
66852@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66853
66854 static int __init proc_interrupts_init(void)
66855 {
66856+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66857+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66858+#else
66859 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66860+#endif
66861 return 0;
66862 }
66863 fs_initcall(proc_interrupts_init);
66864diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66865index 91a4e64..69f1a3e 100644
66866--- a/fs/proc/kcore.c
66867+++ b/fs/proc/kcore.c
66868@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66869 * the addresses in the elf_phdr on our list.
66870 */
66871 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66872- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66873+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66874+ if (tsz > buflen)
66875 tsz = buflen;
66876-
66877+
66878 while (buflen) {
66879 struct kcore_list *m;
66880
66881@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66882 kfree(elf_buf);
66883 } else {
66884 if (kern_addr_valid(start)) {
66885- unsigned long n;
66886+ char *elf_buf;
66887+ mm_segment_t oldfs;
66888
66889- n = copy_to_user(buffer, (char *)start, tsz);
66890- /*
66891- * We cannot distinguish between fault on source
66892- * and fault on destination. When this happens
66893- * we clear too and hope it will trigger the
66894- * EFAULT again.
66895- */
66896- if (n) {
66897- if (clear_user(buffer + tsz - n,
66898- n))
66899+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66900+ if (!elf_buf)
66901+ return -ENOMEM;
66902+ oldfs = get_fs();
66903+ set_fs(KERNEL_DS);
66904+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66905+ set_fs(oldfs);
66906+ if (copy_to_user(buffer, elf_buf, tsz)) {
66907+ kfree(elf_buf);
66908 return -EFAULT;
66909+ }
66910 }
66911+ set_fs(oldfs);
66912+ kfree(elf_buf);
66913 } else {
66914 if (clear_user(buffer, tsz))
66915 return -EFAULT;
66916@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66917
66918 static int open_kcore(struct inode *inode, struct file *filp)
66919 {
66920+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66921+ return -EPERM;
66922+#endif
66923 if (!capable(CAP_SYS_RAWIO))
66924 return -EPERM;
66925 if (kcore_need_update)
66926@@ -580,7 +587,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
66927 return NOTIFY_OK;
66928 }
66929
66930-static struct notifier_block kcore_callback_nb __meminitdata = {
66931+static struct notifier_block kcore_callback_nb __meminitconst = {
66932 .notifier_call = kcore_callback,
66933 .priority = 0,
66934 };
66935diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66936index d3ebf2e..6ad42d1 100644
66937--- a/fs/proc/meminfo.c
66938+++ b/fs/proc/meminfo.c
66939@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66940 vmi.used >> 10,
66941 vmi.largest_chunk >> 10
66942 #ifdef CONFIG_MEMORY_FAILURE
66943- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66944+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66945 #endif
66946 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66947 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66948diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66949index d4a3574..b421ce9 100644
66950--- a/fs/proc/nommu.c
66951+++ b/fs/proc/nommu.c
66952@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66953
66954 if (file) {
66955 seq_pad(m, ' ');
66956- seq_path(m, &file->f_path, "");
66957+ seq_path(m, &file->f_path, "\n\\");
66958 }
66959
66960 seq_putc(m, '\n');
66961diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66962index 1bde894..22ac7eb 100644
66963--- a/fs/proc/proc_net.c
66964+++ b/fs/proc/proc_net.c
66965@@ -23,9 +23,27 @@
66966 #include <linux/nsproxy.h>
66967 #include <net/net_namespace.h>
66968 #include <linux/seq_file.h>
66969+#include <linux/grsecurity.h>
66970
66971 #include "internal.h"
66972
66973+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66974+static struct seq_operations *ipv6_seq_ops_addr;
66975+
66976+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66977+{
66978+ ipv6_seq_ops_addr = addr;
66979+}
66980+
66981+void unregister_ipv6_seq_ops_addr(void)
66982+{
66983+ ipv6_seq_ops_addr = NULL;
66984+}
66985+
66986+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66987+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66988+#endif
66989+
66990 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66991 {
66992 return pde->parent->data;
66993@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66994 return maybe_get_net(PDE_NET(PDE(inode)));
66995 }
66996
66997+extern const struct seq_operations dev_seq_ops;
66998+
66999 int seq_open_net(struct inode *ino, struct file *f,
67000 const struct seq_operations *ops, int size)
67001 {
67002@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67003
67004 BUG_ON(size < sizeof(*p));
67005
67006+ /* only permit access to /proc/net/dev */
67007+ if (
67008+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67009+ ops != ipv6_seq_ops_addr &&
67010+#endif
67011+ ops != &dev_seq_ops && gr_proc_is_restricted())
67012+ return -EACCES;
67013+
67014 net = get_proc_net(ino);
67015 if (net == NULL)
67016 return -ENXIO;
67017@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67018 int err;
67019 struct net *net;
67020
67021+ if (gr_proc_is_restricted())
67022+ return -EACCES;
67023+
67024 err = -ENXIO;
67025 net = get_proc_net(inode);
67026 if (net == NULL)
67027diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67028index f92d5dd..26398ac 100644
67029--- a/fs/proc/proc_sysctl.c
67030+++ b/fs/proc/proc_sysctl.c
67031@@ -11,13 +11,21 @@
67032 #include <linux/namei.h>
67033 #include <linux/mm.h>
67034 #include <linux/module.h>
67035+#include <linux/nsproxy.h>
67036+#ifdef CONFIG_GRKERNSEC
67037+#include <net/net_namespace.h>
67038+#endif
67039 #include "internal.h"
67040
67041+extern int gr_handle_chroot_sysctl(const int op);
67042+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67043+ const int op);
67044+
67045 static const struct dentry_operations proc_sys_dentry_operations;
67046 static const struct file_operations proc_sys_file_operations;
67047-static const struct inode_operations proc_sys_inode_operations;
67048+const struct inode_operations proc_sys_inode_operations;
67049 static const struct file_operations proc_sys_dir_file_operations;
67050-static const struct inode_operations proc_sys_dir_operations;
67051+const struct inode_operations proc_sys_dir_operations;
67052
67053 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67054 {
67055@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67056
67057 err = NULL;
67058 d_set_d_op(dentry, &proc_sys_dentry_operations);
67059+
67060+ gr_handle_proc_create(dentry, inode);
67061+
67062 d_add(dentry, inode);
67063
67064 out:
67065@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67066 struct inode *inode = file_inode(filp);
67067 struct ctl_table_header *head = grab_header(inode);
67068 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67069+ int op = write ? MAY_WRITE : MAY_READ;
67070 ssize_t error;
67071 size_t res;
67072
67073@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67074 * and won't be until we finish.
67075 */
67076 error = -EPERM;
67077- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67078+ if (sysctl_perm(head, table, op))
67079 goto out;
67080
67081 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67082@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67083 if (!table->proc_handler)
67084 goto out;
67085
67086+#ifdef CONFIG_GRKERNSEC
67087+ error = -EPERM;
67088+ if (gr_handle_chroot_sysctl(op))
67089+ goto out;
67090+ dget(filp->f_path.dentry);
67091+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67092+ dput(filp->f_path.dentry);
67093+ goto out;
67094+ }
67095+ dput(filp->f_path.dentry);
67096+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67097+ goto out;
67098+ if (write) {
67099+ if (current->nsproxy->net_ns != table->extra2) {
67100+ if (!capable(CAP_SYS_ADMIN))
67101+ goto out;
67102+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67103+ goto out;
67104+ }
67105+#endif
67106+
67107 /* careful: calling conventions are nasty here */
67108 res = count;
67109 error = table->proc_handler(table, write, buf, &res, ppos);
67110@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67111 return false;
67112 } else {
67113 d_set_d_op(child, &proc_sys_dentry_operations);
67114+
67115+ gr_handle_proc_create(child, inode);
67116+
67117 d_add(child, inode);
67118 }
67119 } else {
67120@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
67121 if ((*pos)++ < ctx->pos)
67122 return true;
67123
67124+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67125+ return 0;
67126+
67127 if (unlikely(S_ISLNK(table->mode)))
67128 res = proc_sys_link_fill_cache(file, ctx, head, table);
67129 else
67130@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67131 if (IS_ERR(head))
67132 return PTR_ERR(head);
67133
67134+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67135+ return -ENOENT;
67136+
67137 generic_fillattr(inode, stat);
67138 if (table)
67139 stat->mode = (stat->mode & S_IFMT) | table->mode;
67140@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67141 .llseek = generic_file_llseek,
67142 };
67143
67144-static const struct inode_operations proc_sys_inode_operations = {
67145+const struct inode_operations proc_sys_inode_operations = {
67146 .permission = proc_sys_permission,
67147 .setattr = proc_sys_setattr,
67148 .getattr = proc_sys_getattr,
67149 };
67150
67151-static const struct inode_operations proc_sys_dir_operations = {
67152+const struct inode_operations proc_sys_dir_operations = {
67153 .lookup = proc_sys_lookup,
67154 .permission = proc_sys_permission,
67155 .setattr = proc_sys_setattr,
67156@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67157 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67158 const char *name, int namelen)
67159 {
67160- struct ctl_table *table;
67161+ ctl_table_no_const *table;
67162 struct ctl_dir *new;
67163 struct ctl_node *node;
67164 char *new_name;
67165@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67166 return NULL;
67167
67168 node = (struct ctl_node *)(new + 1);
67169- table = (struct ctl_table *)(node + 1);
67170+ table = (ctl_table_no_const *)(node + 1);
67171 new_name = (char *)(table + 2);
67172 memcpy(new_name, name, namelen);
67173 new_name[namelen] = '\0';
67174@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
67175 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
67176 struct ctl_table_root *link_root)
67177 {
67178- struct ctl_table *link_table, *entry, *link;
67179+ ctl_table_no_const *link_table, *link;
67180+ struct ctl_table *entry;
67181 struct ctl_table_header *links;
67182 struct ctl_node *node;
67183 char *link_name;
67184@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
67185 return NULL;
67186
67187 node = (struct ctl_node *)(links + 1);
67188- link_table = (struct ctl_table *)(node + nr_entries);
67189+ link_table = (ctl_table_no_const *)(node + nr_entries);
67190 link_name = (char *)&link_table[nr_entries + 1];
67191
67192 for (link = link_table, entry = table; entry->procname; link++, entry++) {
67193@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67194 struct ctl_table_header ***subheader, struct ctl_table_set *set,
67195 struct ctl_table *table)
67196 {
67197- struct ctl_table *ctl_table_arg = NULL;
67198- struct ctl_table *entry, *files;
67199+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
67200+ struct ctl_table *entry;
67201 int nr_files = 0;
67202 int nr_dirs = 0;
67203 int err = -ENOMEM;
67204@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67205 nr_files++;
67206 }
67207
67208- files = table;
67209 /* If there are mixed files and directories we need a new table */
67210 if (nr_dirs && nr_files) {
67211- struct ctl_table *new;
67212+ ctl_table_no_const *new;
67213 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
67214 GFP_KERNEL);
67215 if (!files)
67216@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67217 /* Register everything except a directory full of subdirectories */
67218 if (nr_files || !nr_dirs) {
67219 struct ctl_table_header *header;
67220- header = __register_sysctl_table(set, path, files);
67221+ header = __register_sysctl_table(set, path, files ? files : table);
67222 if (!header) {
67223 kfree(ctl_table_arg);
67224 goto out;
67225diff --git a/fs/proc/root.c b/fs/proc/root.c
67226index e74ac9f..35e89f4 100644
67227--- a/fs/proc/root.c
67228+++ b/fs/proc/root.c
67229@@ -188,7 +188,15 @@ void __init proc_root_init(void)
67230 proc_mkdir("openprom", NULL);
67231 #endif
67232 proc_tty_init();
67233+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67234+#ifdef CONFIG_GRKERNSEC_PROC_USER
67235+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
67236+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67237+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
67238+#endif
67239+#else
67240 proc_mkdir("bus", NULL);
67241+#endif
67242 proc_sys_init();
67243 }
67244
67245diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67246index 510413eb..34d9a8c 100644
67247--- a/fs/proc/stat.c
67248+++ b/fs/proc/stat.c
67249@@ -11,6 +11,7 @@
67250 #include <linux/irqnr.h>
67251 #include <linux/cputime.h>
67252 #include <linux/tick.h>
67253+#include <linux/grsecurity.h>
67254
67255 #ifndef arch_irq_stat_cpu
67256 #define arch_irq_stat_cpu(cpu) 0
67257@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67258 u64 sum_softirq = 0;
67259 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67260 struct timespec boottime;
67261+ int unrestricted = 1;
67262+
67263+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67264+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67265+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67266+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67267+ && !in_group_p(grsec_proc_gid)
67268+#endif
67269+ )
67270+ unrestricted = 0;
67271+#endif
67272+#endif
67273
67274 user = nice = system = idle = iowait =
67275 irq = softirq = steal = 0;
67276@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67277 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67278 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67279 idle += get_idle_time(i);
67280- iowait += get_iowait_time(i);
67281- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67282- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67283- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67284- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67285- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67286- sum += kstat_cpu_irqs_sum(i);
67287- sum += arch_irq_stat_cpu(i);
67288+ if (unrestricted) {
67289+ iowait += get_iowait_time(i);
67290+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67291+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67292+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67293+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67294+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67295+ sum += kstat_cpu_irqs_sum(i);
67296+ sum += arch_irq_stat_cpu(i);
67297+ for (j = 0; j < NR_SOFTIRQS; j++) {
67298+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67299
67300- for (j = 0; j < NR_SOFTIRQS; j++) {
67301- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67302-
67303- per_softirq_sums[j] += softirq_stat;
67304- sum_softirq += softirq_stat;
67305+ per_softirq_sums[j] += softirq_stat;
67306+ sum_softirq += softirq_stat;
67307+ }
67308 }
67309 }
67310- sum += arch_irq_stat();
67311+ if (unrestricted)
67312+ sum += arch_irq_stat();
67313
67314 seq_puts(p, "cpu ");
67315 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67316@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67317 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67318 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67319 idle = get_idle_time(i);
67320- iowait = get_iowait_time(i);
67321- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67322- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67323- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67324- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67325- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67326+ if (unrestricted) {
67327+ iowait = get_iowait_time(i);
67328+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67329+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67330+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67331+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67332+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67333+ }
67334 seq_printf(p, "cpu%d", i);
67335 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67336 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67337@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67338
67339 /* sum again ? it could be updated? */
67340 for_each_irq_nr(j)
67341- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
67342+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
67343
67344 seq_printf(p,
67345 "\nctxt %llu\n"
67346@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67347 "processes %lu\n"
67348 "procs_running %lu\n"
67349 "procs_blocked %lu\n",
67350- nr_context_switches(),
67351+ unrestricted ? nr_context_switches() : 0ULL,
67352 (unsigned long)jif,
67353- total_forks,
67354- nr_running(),
67355- nr_iowait());
67356+ unrestricted ? total_forks : 0UL,
67357+ unrestricted ? nr_running() : 0UL,
67358+ unrestricted ? nr_iowait() : 0UL);
67359
67360 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67361
67362diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67363index 6dee68d..1b4add0 100644
67364--- a/fs/proc/task_mmu.c
67365+++ b/fs/proc/task_mmu.c
67366@@ -13,12 +13,19 @@
67367 #include <linux/swap.h>
67368 #include <linux/swapops.h>
67369 #include <linux/mmu_notifier.h>
67370+#include <linux/grsecurity.h>
67371
67372 #include <asm/elf.h>
67373 #include <asm/uaccess.h>
67374 #include <asm/tlbflush.h>
67375 #include "internal.h"
67376
67377+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67378+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67379+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67380+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67381+#endif
67382+
67383 void task_mem(struct seq_file *m, struct mm_struct *mm)
67384 {
67385 unsigned long data, text, lib, swap, ptes, pmds;
67386@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67387 "VmLib:\t%8lu kB\n"
67388 "VmPTE:\t%8lu kB\n"
67389 "VmPMD:\t%8lu kB\n"
67390- "VmSwap:\t%8lu kB\n",
67391- hiwater_vm << (PAGE_SHIFT-10),
67392+ "VmSwap:\t%8lu kB\n"
67393+
67394+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67395+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67396+#endif
67397+
67398+ ,hiwater_vm << (PAGE_SHIFT-10),
67399 total_vm << (PAGE_SHIFT-10),
67400 mm->locked_vm << (PAGE_SHIFT-10),
67401 mm->pinned_vm << (PAGE_SHIFT-10),
67402@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67403 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67404 ptes >> 10,
67405 pmds >> 10,
67406- swap << (PAGE_SHIFT-10));
67407+ swap << (PAGE_SHIFT-10)
67408+
67409+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67410+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67411+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67412+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67413+#else
67414+ , mm->context.user_cs_base
67415+ , mm->context.user_cs_limit
67416+#endif
67417+#endif
67418+
67419+ );
67420 }
67421
67422 unsigned long task_vsize(struct mm_struct *mm)
67423@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67424 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67425 }
67426
67427- /* We don't show the stack guard page in /proc/maps */
67428+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67429+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67430+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67431+#else
67432 start = vma->vm_start;
67433- if (stack_guard_page_start(vma, start))
67434- start += PAGE_SIZE;
67435 end = vma->vm_end;
67436- if (stack_guard_page_end(vma, end))
67437- end -= PAGE_SIZE;
67438+#endif
67439
67440 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67441 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67442@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67443 flags & VM_WRITE ? 'w' : '-',
67444 flags & VM_EXEC ? 'x' : '-',
67445 flags & VM_MAYSHARE ? 's' : 'p',
67446+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67447+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67448+#else
67449 pgoff,
67450+#endif
67451 MAJOR(dev), MINOR(dev), ino);
67452
67453 /*
67454@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67455 */
67456 if (file) {
67457 seq_pad(m, ' ');
67458- seq_path(m, &file->f_path, "\n");
67459+ seq_path(m, &file->f_path, "\n\\");
67460 goto done;
67461 }
67462
67463@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67464 * Thread stack in /proc/PID/task/TID/maps or
67465 * the main process stack.
67466 */
67467- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67468- vma->vm_end >= mm->start_stack)) {
67469+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67470+ (vma->vm_start <= mm->start_stack &&
67471+ vma->vm_end >= mm->start_stack)) {
67472 name = "[stack]";
67473 } else {
67474 /* Thread stack in /proc/PID/maps */
67475@@ -362,6 +391,12 @@ done:
67476
67477 static int show_map(struct seq_file *m, void *v, int is_pid)
67478 {
67479+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67480+ if (current->exec_id != m->exec_id) {
67481+ gr_log_badprocpid("maps");
67482+ return 0;
67483+ }
67484+#endif
67485 show_map_vma(m, v, is_pid);
67486 m_cache_vma(m, v);
67487 return 0;
67488@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67489 .private = &mss,
67490 };
67491
67492+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67493+ if (current->exec_id != m->exec_id) {
67494+ gr_log_badprocpid("smaps");
67495+ return 0;
67496+ }
67497+#endif
67498 memset(&mss, 0, sizeof mss);
67499- /* mmap_sem is held in m_start */
67500- walk_page_vma(vma, &smaps_walk);
67501+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67502+ if (!PAX_RAND_FLAGS(vma->vm_mm))
67503+#endif
67504+ /* mmap_sem is held in m_start */
67505+ walk_page_vma(vma, &smaps_walk);
67506
67507 show_map_vma(m, vma, is_pid);
67508
67509@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67510 "KernelPageSize: %8lu kB\n"
67511 "MMUPageSize: %8lu kB\n"
67512 "Locked: %8lu kB\n",
67513+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67514+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67515+#else
67516 (vma->vm_end - vma->vm_start) >> 10,
67517+#endif
67518 mss.resident >> 10,
67519 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67520 mss.shared_clean >> 10,
67521@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67522 char buffer[64];
67523 int nid;
67524
67525+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67526+ if (current->exec_id != m->exec_id) {
67527+ gr_log_badprocpid("numa_maps");
67528+ return 0;
67529+ }
67530+#endif
67531+
67532 if (!mm)
67533 return 0;
67534
67535@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67536 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67537 }
67538
67539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67540+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67541+#else
67542 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67543+#endif
67544
67545 if (file) {
67546 seq_puts(m, " file=");
67547- seq_path(m, &file->f_path, "\n\t= ");
67548+ seq_path(m, &file->f_path, "\n\t\\= ");
67549 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67550 seq_puts(m, " heap");
67551 } else {
67552diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67553index 599ec2e..f1413ae 100644
67554--- a/fs/proc/task_nommu.c
67555+++ b/fs/proc/task_nommu.c
67556@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67557 else
67558 bytes += kobjsize(mm);
67559
67560- if (current->fs && current->fs->users > 1)
67561+ if (current->fs && atomic_read(&current->fs->users) > 1)
67562 sbytes += kobjsize(current->fs);
67563 else
67564 bytes += kobjsize(current->fs);
67565@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67566
67567 if (file) {
67568 seq_pad(m, ' ');
67569- seq_path(m, &file->f_path, "");
67570+ seq_path(m, &file->f_path, "\n\\");
67571 } else if (mm) {
67572 pid_t tid = pid_of_stack(priv, vma, is_pid);
67573
67574diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67575index 4e61388..1a2523d 100644
67576--- a/fs/proc/vmcore.c
67577+++ b/fs/proc/vmcore.c
67578@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67579 nr_bytes = count;
67580
67581 /* If pfn is not ram, return zeros for sparse dump files */
67582- if (pfn_is_ram(pfn) == 0)
67583- memset(buf, 0, nr_bytes);
67584- else {
67585+ if (pfn_is_ram(pfn) == 0) {
67586+ if (userbuf) {
67587+ if (clear_user((char __force_user *)buf, nr_bytes))
67588+ return -EFAULT;
67589+ } else
67590+ memset(buf, 0, nr_bytes);
67591+ } else {
67592 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67593 offset, userbuf);
67594 if (tmp < 0)
67595@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67596 static int copy_to(void *target, void *src, size_t size, int userbuf)
67597 {
67598 if (userbuf) {
67599- if (copy_to_user((char __user *) target, src, size))
67600+ if (copy_to_user((char __force_user *) target, src, size))
67601 return -EFAULT;
67602 } else {
67603 memcpy(target, src, size);
67604@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67605 if (*fpos < m->offset + m->size) {
67606 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67607 start = m->paddr + *fpos - m->offset;
67608- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67609+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67610 if (tmp < 0)
67611 return tmp;
67612 buflen -= tsz;
67613@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67614 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67615 size_t buflen, loff_t *fpos)
67616 {
67617- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67618+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67619 }
67620
67621 /*
67622diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67623index d3fb2b6..43a8140 100644
67624--- a/fs/qnx6/qnx6.h
67625+++ b/fs/qnx6/qnx6.h
67626@@ -74,7 +74,7 @@ enum {
67627 BYTESEX_BE,
67628 };
67629
67630-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67631+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67632 {
67633 if (sbi->s_bytesex == BYTESEX_LE)
67634 return le64_to_cpu((__force __le64)n);
67635@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67636 return (__force __fs64)cpu_to_be64(n);
67637 }
67638
67639-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67640+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67641 {
67642 if (sbi->s_bytesex == BYTESEX_LE)
67643 return le32_to_cpu((__force __le32)n);
67644diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67645index bb2869f..d34ada8 100644
67646--- a/fs/quota/netlink.c
67647+++ b/fs/quota/netlink.c
67648@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67649 void quota_send_warning(struct kqid qid, dev_t dev,
67650 const char warntype)
67651 {
67652- static atomic_t seq;
67653+ static atomic_unchecked_t seq;
67654 struct sk_buff *skb;
67655 void *msg_head;
67656 int ret;
67657@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67658 "VFS: Not enough memory to send quota warning.\n");
67659 return;
67660 }
67661- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67662+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67663 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67664 if (!msg_head) {
67665 printk(KERN_ERR
67666diff --git a/fs/read_write.c b/fs/read_write.c
67667index 8e1b687..bad2eec 100644
67668--- a/fs/read_write.c
67669+++ b/fs/read_write.c
67670@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67671
67672 old_fs = get_fs();
67673 set_fs(get_ds());
67674- p = (__force const char __user *)buf;
67675+ p = (const char __force_user *)buf;
67676 if (count > MAX_RW_COUNT)
67677 count = MAX_RW_COUNT;
67678 if (file->f_op->write)
67679diff --git a/fs/readdir.c b/fs/readdir.c
67680index ced6791..936687b 100644
67681--- a/fs/readdir.c
67682+++ b/fs/readdir.c
67683@@ -18,6 +18,7 @@
67684 #include <linux/security.h>
67685 #include <linux/syscalls.h>
67686 #include <linux/unistd.h>
67687+#include <linux/namei.h>
67688
67689 #include <asm/uaccess.h>
67690
67691@@ -71,6 +72,7 @@ struct old_linux_dirent {
67692 struct readdir_callback {
67693 struct dir_context ctx;
67694 struct old_linux_dirent __user * dirent;
67695+ struct file * file;
67696 int result;
67697 };
67698
67699@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67700 buf->result = -EOVERFLOW;
67701 return -EOVERFLOW;
67702 }
67703+
67704+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67705+ return 0;
67706+
67707 buf->result++;
67708 dirent = buf->dirent;
67709 if (!access_ok(VERIFY_WRITE, dirent,
67710@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67711 if (!f.file)
67712 return -EBADF;
67713
67714+ buf.file = f.file;
67715 error = iterate_dir(f.file, &buf.ctx);
67716 if (buf.result)
67717 error = buf.result;
67718@@ -145,6 +152,7 @@ struct getdents_callback {
67719 struct dir_context ctx;
67720 struct linux_dirent __user * current_dir;
67721 struct linux_dirent __user * previous;
67722+ struct file * file;
67723 int count;
67724 int error;
67725 };
67726@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67727 buf->error = -EOVERFLOW;
67728 return -EOVERFLOW;
67729 }
67730+
67731+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67732+ return 0;
67733+
67734 dirent = buf->previous;
67735 if (dirent) {
67736 if (__put_user(offset, &dirent->d_off))
67737@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67738 if (!f.file)
67739 return -EBADF;
67740
67741+ buf.file = f.file;
67742 error = iterate_dir(f.file, &buf.ctx);
67743 if (error >= 0)
67744 error = buf.error;
67745@@ -230,6 +243,7 @@ struct getdents_callback64 {
67746 struct dir_context ctx;
67747 struct linux_dirent64 __user * current_dir;
67748 struct linux_dirent64 __user * previous;
67749+ struct file *file;
67750 int count;
67751 int error;
67752 };
67753@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67754 buf->error = -EINVAL; /* only used if we fail.. */
67755 if (reclen > buf->count)
67756 return -EINVAL;
67757+
67758+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67759+ return 0;
67760+
67761 dirent = buf->previous;
67762 if (dirent) {
67763 if (__put_user(offset, &dirent->d_off))
67764@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67765 if (!f.file)
67766 return -EBADF;
67767
67768+ buf.file = f.file;
67769 error = iterate_dir(f.file, &buf.ctx);
67770 if (error >= 0)
67771 error = buf.error;
67772diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67773index 9c02d96..6562c10 100644
67774--- a/fs/reiserfs/do_balan.c
67775+++ b/fs/reiserfs/do_balan.c
67776@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67777 return;
67778 }
67779
67780- atomic_inc(&fs_generation(tb->tb_sb));
67781+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67782 do_balance_starts(tb);
67783
67784 /*
67785diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67786index aca73dd..e3c558d 100644
67787--- a/fs/reiserfs/item_ops.c
67788+++ b/fs/reiserfs/item_ops.c
67789@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67790 }
67791
67792 static struct item_operations errcatch_ops = {
67793- errcatch_bytes_number,
67794- errcatch_decrement_key,
67795- errcatch_is_left_mergeable,
67796- errcatch_print_item,
67797- errcatch_check_item,
67798+ .bytes_number = errcatch_bytes_number,
67799+ .decrement_key = errcatch_decrement_key,
67800+ .is_left_mergeable = errcatch_is_left_mergeable,
67801+ .print_item = errcatch_print_item,
67802+ .check_item = errcatch_check_item,
67803
67804- errcatch_create_vi,
67805- errcatch_check_left,
67806- errcatch_check_right,
67807- errcatch_part_size,
67808- errcatch_unit_num,
67809- errcatch_print_vi
67810+ .create_vi = errcatch_create_vi,
67811+ .check_left = errcatch_check_left,
67812+ .check_right = errcatch_check_right,
67813+ .part_size = errcatch_part_size,
67814+ .unit_num = errcatch_unit_num,
67815+ .print_vi = errcatch_print_vi
67816 };
67817
67818 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67819diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67820index 621b9f3..af527fd 100644
67821--- a/fs/reiserfs/procfs.c
67822+++ b/fs/reiserfs/procfs.c
67823@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67824 "SMALL_TAILS " : "NO_TAILS ",
67825 replay_only(sb) ? "REPLAY_ONLY " : "",
67826 convert_reiserfs(sb) ? "CONV " : "",
67827- atomic_read(&r->s_generation_counter),
67828+ atomic_read_unchecked(&r->s_generation_counter),
67829 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67830 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67831 SF(s_good_search_by_key_reada), SF(s_bmaps),
67832diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67833index bb79cdd..fcf49ef 100644
67834--- a/fs/reiserfs/reiserfs.h
67835+++ b/fs/reiserfs/reiserfs.h
67836@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67837 /* Comment? -Hans */
67838 wait_queue_head_t s_wait;
67839 /* increased by one every time the tree gets re-balanced */
67840- atomic_t s_generation_counter;
67841+ atomic_unchecked_t s_generation_counter;
67842
67843 /* File system properties. Currently holds on-disk FS format */
67844 unsigned long s_properties;
67845@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67846 #define REISERFS_USER_MEM 1 /* user memory mode */
67847
67848 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67849-#define get_generation(s) atomic_read (&fs_generation(s))
67850+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67851 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67852 #define __fs_changed(gen,s) (gen != get_generation (s))
67853 #define fs_changed(gen,s) \
67854diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67855index 71fbbe3..eff29ba 100644
67856--- a/fs/reiserfs/super.c
67857+++ b/fs/reiserfs/super.c
67858@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67859 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67860 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67861 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67862+#ifdef CONFIG_REISERFS_FS_XATTR
67863+ /* turn on user xattrs by default */
67864+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67865+#endif
67866 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67867 sbi->s_alloc_options.preallocmin = 0;
67868 /* Preallocate by 16 blocks (17-1) at once */
67869diff --git a/fs/select.c b/fs/select.c
67870index f684c75..4117611 100644
67871--- a/fs/select.c
67872+++ b/fs/select.c
67873@@ -20,6 +20,7 @@
67874 #include <linux/export.h>
67875 #include <linux/slab.h>
67876 #include <linux/poll.h>
67877+#include <linux/security.h>
67878 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67879 #include <linux/file.h>
67880 #include <linux/fdtable.h>
67881@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67882 struct poll_list *walk = head;
67883 unsigned long todo = nfds;
67884
67885+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67886 if (nfds > rlimit(RLIMIT_NOFILE))
67887 return -EINVAL;
67888
67889diff --git a/fs/seq_file.c b/fs/seq_file.c
67890index 555f821..34684d7 100644
67891--- a/fs/seq_file.c
67892+++ b/fs/seq_file.c
67893@@ -12,6 +12,8 @@
67894 #include <linux/slab.h>
67895 #include <linux/cred.h>
67896 #include <linux/mm.h>
67897+#include <linux/sched.h>
67898+#include <linux/grsecurity.h>
67899
67900 #include <asm/uaccess.h>
67901 #include <asm/page.h>
67902@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67903
67904 static void *seq_buf_alloc(unsigned long size)
67905 {
67906- void *buf;
67907-
67908- /*
67909- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67910- * it's better to fall back to vmalloc() than to kill things.
67911- */
67912- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67913- if (!buf && size > PAGE_SIZE)
67914- buf = vmalloc(size);
67915- return buf;
67916+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67917 }
67918
67919 /**
67920@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67921 #ifdef CONFIG_USER_NS
67922 p->user_ns = file->f_cred->user_ns;
67923 #endif
67924+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67925+ p->exec_id = current->exec_id;
67926+#endif
67927
67928 /*
67929 * Wrappers around seq_open(e.g. swaps_open) need to be
67930@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67931 }
67932 EXPORT_SYMBOL(seq_open);
67933
67934+
67935+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67936+{
67937+ if (gr_proc_is_restricted())
67938+ return -EACCES;
67939+
67940+ return seq_open(file, op);
67941+}
67942+EXPORT_SYMBOL(seq_open_restrict);
67943+
67944 static int traverse(struct seq_file *m, loff_t offset)
67945 {
67946 loff_t pos = 0, index;
67947@@ -158,7 +164,7 @@ Eoverflow:
67948 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67949 {
67950 struct seq_file *m = file->private_data;
67951- size_t copied = 0;
67952+ ssize_t copied = 0;
67953 loff_t pos;
67954 size_t n;
67955 void *p;
67956@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
67957 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67958 void *data)
67959 {
67960- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67961+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67962 int res = -ENOMEM;
67963
67964 if (op) {
67965@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67966 }
67967 EXPORT_SYMBOL(single_open_size);
67968
67969+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67970+ void *data)
67971+{
67972+ if (gr_proc_is_restricted())
67973+ return -EACCES;
67974+
67975+ return single_open(file, show, data);
67976+}
67977+EXPORT_SYMBOL(single_open_restrict);
67978+
67979+
67980 int single_release(struct inode *inode, struct file *file)
67981 {
67982 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67983diff --git a/fs/splice.c b/fs/splice.c
67984index 7968da9..275187d 100644
67985--- a/fs/splice.c
67986+++ b/fs/splice.c
67987@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67988 pipe_lock(pipe);
67989
67990 for (;;) {
67991- if (!pipe->readers) {
67992+ if (!atomic_read(&pipe->readers)) {
67993 send_sig(SIGPIPE, current, 0);
67994 if (!ret)
67995 ret = -EPIPE;
67996@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67997 page_nr++;
67998 ret += buf->len;
67999
68000- if (pipe->files)
68001+ if (atomic_read(&pipe->files))
68002 do_wakeup = 1;
68003
68004 if (!--spd->nr_pages)
68005@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68006 do_wakeup = 0;
68007 }
68008
68009- pipe->waiting_writers++;
68010+ atomic_inc(&pipe->waiting_writers);
68011 pipe_wait(pipe);
68012- pipe->waiting_writers--;
68013+ atomic_dec(&pipe->waiting_writers);
68014 }
68015
68016 pipe_unlock(pipe);
68017@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68018 old_fs = get_fs();
68019 set_fs(get_ds());
68020 /* The cast to a user pointer is valid due to the set_fs() */
68021- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68022+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68023 set_fs(old_fs);
68024
68025 return res;
68026@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68027 old_fs = get_fs();
68028 set_fs(get_ds());
68029 /* The cast to a user pointer is valid due to the set_fs() */
68030- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68031+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68032 set_fs(old_fs);
68033
68034 return res;
68035@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68036 goto err;
68037
68038 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68039- vec[i].iov_base = (void __user *) page_address(page);
68040+ vec[i].iov_base = (void __force_user *) page_address(page);
68041 vec[i].iov_len = this_len;
68042 spd.pages[i] = page;
68043 spd.nr_pages++;
68044@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68045 ops->release(pipe, buf);
68046 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68047 pipe->nrbufs--;
68048- if (pipe->files)
68049+ if (atomic_read(&pipe->files))
68050 sd->need_wakeup = true;
68051 }
68052
68053@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68054 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68055 {
68056 while (!pipe->nrbufs) {
68057- if (!pipe->writers)
68058+ if (!atomic_read(&pipe->writers))
68059 return 0;
68060
68061- if (!pipe->waiting_writers && sd->num_spliced)
68062+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68063 return 0;
68064
68065 if (sd->flags & SPLICE_F_NONBLOCK)
68066@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68067 ops->release(pipe, buf);
68068 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68069 pipe->nrbufs--;
68070- if (pipe->files)
68071+ if (atomic_read(&pipe->files))
68072 sd.need_wakeup = true;
68073 } else {
68074 buf->offset += ret;
68075@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68076 * out of the pipe right after the splice_to_pipe(). So set
68077 * PIPE_READERS appropriately.
68078 */
68079- pipe->readers = 1;
68080+ atomic_set(&pipe->readers, 1);
68081
68082 current->splice_pipe = pipe;
68083 }
68084@@ -1482,6 +1482,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68085
68086 partial[buffers].offset = off;
68087 partial[buffers].len = plen;
68088+ partial[buffers].private = 0;
68089
68090 off = 0;
68091 len -= plen;
68092@@ -1718,9 +1719,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68093 ret = -ERESTARTSYS;
68094 break;
68095 }
68096- if (!pipe->writers)
68097+ if (!atomic_read(&pipe->writers))
68098 break;
68099- if (!pipe->waiting_writers) {
68100+ if (!atomic_read(&pipe->waiting_writers)) {
68101 if (flags & SPLICE_F_NONBLOCK) {
68102 ret = -EAGAIN;
68103 break;
68104@@ -1752,7 +1753,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68105 pipe_lock(pipe);
68106
68107 while (pipe->nrbufs >= pipe->buffers) {
68108- if (!pipe->readers) {
68109+ if (!atomic_read(&pipe->readers)) {
68110 send_sig(SIGPIPE, current, 0);
68111 ret = -EPIPE;
68112 break;
68113@@ -1765,9 +1766,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68114 ret = -ERESTARTSYS;
68115 break;
68116 }
68117- pipe->waiting_writers++;
68118+ atomic_inc(&pipe->waiting_writers);
68119 pipe_wait(pipe);
68120- pipe->waiting_writers--;
68121+ atomic_dec(&pipe->waiting_writers);
68122 }
68123
68124 pipe_unlock(pipe);
68125@@ -1803,14 +1804,14 @@ retry:
68126 pipe_double_lock(ipipe, opipe);
68127
68128 do {
68129- if (!opipe->readers) {
68130+ if (!atomic_read(&opipe->readers)) {
68131 send_sig(SIGPIPE, current, 0);
68132 if (!ret)
68133 ret = -EPIPE;
68134 break;
68135 }
68136
68137- if (!ipipe->nrbufs && !ipipe->writers)
68138+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68139 break;
68140
68141 /*
68142@@ -1907,7 +1908,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68143 pipe_double_lock(ipipe, opipe);
68144
68145 do {
68146- if (!opipe->readers) {
68147+ if (!atomic_read(&opipe->readers)) {
68148 send_sig(SIGPIPE, current, 0);
68149 if (!ret)
68150 ret = -EPIPE;
68151@@ -1952,7 +1953,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68152 * return EAGAIN if we have the potential of some data in the
68153 * future, otherwise just return 0
68154 */
68155- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68156+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68157 ret = -EAGAIN;
68158
68159 pipe_unlock(ipipe);
68160diff --git a/fs/stat.c b/fs/stat.c
68161index ae0c3ce..9ee641c 100644
68162--- a/fs/stat.c
68163+++ b/fs/stat.c
68164@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68165 stat->gid = inode->i_gid;
68166 stat->rdev = inode->i_rdev;
68167 stat->size = i_size_read(inode);
68168- stat->atime = inode->i_atime;
68169- stat->mtime = inode->i_mtime;
68170+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68171+ stat->atime = inode->i_ctime;
68172+ stat->mtime = inode->i_ctime;
68173+ } else {
68174+ stat->atime = inode->i_atime;
68175+ stat->mtime = inode->i_mtime;
68176+ }
68177 stat->ctime = inode->i_ctime;
68178 stat->blksize = (1 << inode->i_blkbits);
68179 stat->blocks = inode->i_blocks;
68180@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
68181 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
68182 {
68183 struct inode *inode = path->dentry->d_inode;
68184+ int retval;
68185
68186- if (inode->i_op->getattr)
68187- return inode->i_op->getattr(path->mnt, path->dentry, stat);
68188+ if (inode->i_op->getattr) {
68189+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
68190+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68191+ stat->atime = stat->ctime;
68192+ stat->mtime = stat->ctime;
68193+ }
68194+ return retval;
68195+ }
68196
68197 generic_fillattr(inode, stat);
68198 return 0;
68199diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
68200index 0b45ff4..edf9d3a 100644
68201--- a/fs/sysfs/dir.c
68202+++ b/fs/sysfs/dir.c
68203@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68204 kfree(buf);
68205 }
68206
68207+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68208+extern int grsec_enable_sysfs_restrict;
68209+#endif
68210+
68211 /**
68212 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
68213 * @kobj: object we're creating directory for
68214@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68215 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68216 {
68217 struct kernfs_node *parent, *kn;
68218+ const char *name;
68219+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
68220+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68221+ const char *parent_name;
68222+#endif
68223
68224 BUG_ON(!kobj);
68225
68226+ name = kobject_name(kobj);
68227+
68228 if (kobj->parent)
68229 parent = kobj->parent->sd;
68230 else
68231@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68232 if (!parent)
68233 return -ENOENT;
68234
68235- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
68236- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
68237+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68238+ parent_name = parent->name;
68239+ mode = S_IRWXU;
68240+
68241+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
68242+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
68243+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
68244+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
68245+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68246+ if (!grsec_enable_sysfs_restrict)
68247+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68248+#endif
68249+
68250+ kn = kernfs_create_dir_ns(parent, name,
68251+ mode, kobj, ns);
68252 if (IS_ERR(kn)) {
68253 if (PTR_ERR(kn) == -EEXIST)
68254- sysfs_warn_dup(parent, kobject_name(kobj));
68255+ sysfs_warn_dup(parent, name);
68256 return PTR_ERR(kn);
68257 }
68258
68259diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
68260index 69d4889..a810bd4 100644
68261--- a/fs/sysv/sysv.h
68262+++ b/fs/sysv/sysv.h
68263@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68264 #endif
68265 }
68266
68267-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68268+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68269 {
68270 if (sbi->s_bytesex == BYTESEX_PDP)
68271 return PDP_swab((__force __u32)n);
68272diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68273index fb08b0c..65fcc7e 100644
68274--- a/fs/ubifs/io.c
68275+++ b/fs/ubifs/io.c
68276@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68277 return err;
68278 }
68279
68280-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68281+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68282 {
68283 int err;
68284
68285diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68286index c175b4d..8f36a16 100644
68287--- a/fs/udf/misc.c
68288+++ b/fs/udf/misc.c
68289@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68290
68291 u8 udf_tag_checksum(const struct tag *t)
68292 {
68293- u8 *data = (u8 *)t;
68294+ const u8 *data = (const u8 *)t;
68295 u8 checksum = 0;
68296 int i;
68297 for (i = 0; i < sizeof(struct tag); ++i)
68298diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68299index 8d974c4..b82f6ec 100644
68300--- a/fs/ufs/swab.h
68301+++ b/fs/ufs/swab.h
68302@@ -22,7 +22,7 @@ enum {
68303 BYTESEX_BE
68304 };
68305
68306-static inline u64
68307+static inline u64 __intentional_overflow(-1)
68308 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68309 {
68310 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68311@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68312 return (__force __fs64)cpu_to_be64(n);
68313 }
68314
68315-static inline u32
68316+static inline u32 __intentional_overflow(-1)
68317 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68318 {
68319 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68320diff --git a/fs/utimes.c b/fs/utimes.c
68321index aa138d6..5f3a811 100644
68322--- a/fs/utimes.c
68323+++ b/fs/utimes.c
68324@@ -1,6 +1,7 @@
68325 #include <linux/compiler.h>
68326 #include <linux/file.h>
68327 #include <linux/fs.h>
68328+#include <linux/security.h>
68329 #include <linux/linkage.h>
68330 #include <linux/mount.h>
68331 #include <linux/namei.h>
68332@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68333 }
68334 }
68335 retry_deleg:
68336+
68337+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68338+ error = -EACCES;
68339+ goto mnt_drop_write_and_out;
68340+ }
68341+
68342 mutex_lock(&inode->i_mutex);
68343 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68344 mutex_unlock(&inode->i_mutex);
68345diff --git a/fs/xattr.c b/fs/xattr.c
68346index 4ef6985..a6cd6567 100644
68347--- a/fs/xattr.c
68348+++ b/fs/xattr.c
68349@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68350 return rc;
68351 }
68352
68353+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68354+ssize_t
68355+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68356+{
68357+ struct inode *inode = dentry->d_inode;
68358+ ssize_t error;
68359+
68360+ error = inode_permission(inode, MAY_EXEC);
68361+ if (error)
68362+ return error;
68363+
68364+ if (inode->i_op->getxattr)
68365+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68366+ else
68367+ error = -EOPNOTSUPP;
68368+
68369+ return error;
68370+}
68371+EXPORT_SYMBOL(pax_getxattr);
68372+#endif
68373+
68374 ssize_t
68375 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68376 {
68377@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68378 * Extended attribute SET operations
68379 */
68380 static long
68381-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68382+setxattr(struct path *path, const char __user *name, const void __user *value,
68383 size_t size, int flags)
68384 {
68385 int error;
68386@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68387 posix_acl_fix_xattr_from_user(kvalue, size);
68388 }
68389
68390- error = vfs_setxattr(d, kname, kvalue, size, flags);
68391+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68392+ error = -EACCES;
68393+ goto out;
68394+ }
68395+
68396+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68397 out:
68398 if (vvalue)
68399 vfree(vvalue);
68400@@ -376,7 +402,7 @@ retry:
68401 return error;
68402 error = mnt_want_write(path.mnt);
68403 if (!error) {
68404- error = setxattr(path.dentry, name, value, size, flags);
68405+ error = setxattr(&path, name, value, size, flags);
68406 mnt_drop_write(path.mnt);
68407 }
68408 path_put(&path);
68409@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68410 audit_file(f.file);
68411 error = mnt_want_write_file(f.file);
68412 if (!error) {
68413- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
68414+ error = setxattr(&f.file->f_path, name, value, size, flags);
68415 mnt_drop_write_file(f.file);
68416 }
68417 fdput(f);
68418@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68419 * Extended attribute REMOVE operations
68420 */
68421 static long
68422-removexattr(struct dentry *d, const char __user *name)
68423+removexattr(struct path *path, const char __user *name)
68424 {
68425 int error;
68426 char kname[XATTR_NAME_MAX + 1];
68427@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
68428 if (error < 0)
68429 return error;
68430
68431- return vfs_removexattr(d, kname);
68432+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68433+ return -EACCES;
68434+
68435+ return vfs_removexattr(path->dentry, kname);
68436 }
68437
68438 static int path_removexattr(const char __user *pathname,
68439@@ -623,7 +652,7 @@ retry:
68440 return error;
68441 error = mnt_want_write(path.mnt);
68442 if (!error) {
68443- error = removexattr(path.dentry, name);
68444+ error = removexattr(&path, name);
68445 mnt_drop_write(path.mnt);
68446 }
68447 path_put(&path);
68448@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
68449 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68450 {
68451 struct fd f = fdget(fd);
68452+ struct path *path;
68453 int error = -EBADF;
68454
68455 if (!f.file)
68456 return error;
68457+ path = &f.file->f_path;
68458 audit_file(f.file);
68459 error = mnt_want_write_file(f.file);
68460 if (!error) {
68461- error = removexattr(f.file->f_path.dentry, name);
68462+ error = removexattr(path, name);
68463 mnt_drop_write_file(f.file);
68464 }
68465 fdput(f);
68466diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68467index 61ec015..7c18807 100644
68468--- a/fs/xfs/libxfs/xfs_bmap.c
68469+++ b/fs/xfs/libxfs/xfs_bmap.c
68470@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
68471
68472 #else
68473 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68474-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68475+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68476 #endif /* DEBUG */
68477
68478 /*
68479diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68480index 098cd78..724d3f8 100644
68481--- a/fs/xfs/xfs_dir2_readdir.c
68482+++ b/fs/xfs/xfs_dir2_readdir.c
68483@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
68484 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68485 filetype = dp->d_ops->sf_get_ftype(sfep);
68486 ctx->pos = off & 0x7fffffff;
68487- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68488+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68489+ char name[sfep->namelen];
68490+ memcpy(name, sfep->name, sfep->namelen);
68491+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68492+ return 0;
68493+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68494 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68495 return 0;
68496 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68497diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68498index ac4feae..386d551 100644
68499--- a/fs/xfs/xfs_ioctl.c
68500+++ b/fs/xfs/xfs_ioctl.c
68501@@ -120,7 +120,7 @@ xfs_find_handle(
68502 }
68503
68504 error = -EFAULT;
68505- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68506+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68507 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68508 goto out_put;
68509
68510diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68511index c31d2c2..6ec8f62 100644
68512--- a/fs/xfs/xfs_linux.h
68513+++ b/fs/xfs/xfs_linux.h
68514@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68515 * of the compiler which do not like us using do_div in the middle
68516 * of large functions.
68517 */
68518-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68519+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68520 {
68521 __u32 mod;
68522
68523@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68524 return 0;
68525 }
68526 #else
68527-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68528+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68529 {
68530 __u32 mod;
68531
68532diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68533new file mode 100644
68534index 0000000..31f8fe4
68535--- /dev/null
68536+++ b/grsecurity/Kconfig
68537@@ -0,0 +1,1182 @@
68538+#
68539+# grecurity configuration
68540+#
68541+menu "Memory Protections"
68542+depends on GRKERNSEC
68543+
68544+config GRKERNSEC_KMEM
68545+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68546+ default y if GRKERNSEC_CONFIG_AUTO
68547+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68548+ help
68549+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68550+ be written to or read from to modify or leak the contents of the running
68551+ kernel. /dev/port will also not be allowed to be opened, writing to
68552+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68553+ If you have module support disabled, enabling this will close up several
68554+ ways that are currently used to insert malicious code into the running
68555+ kernel.
68556+
68557+ Even with this feature enabled, we still highly recommend that
68558+ you use the RBAC system, as it is still possible for an attacker to
68559+ modify the running kernel through other more obscure methods.
68560+
68561+ It is highly recommended that you say Y here if you meet all the
68562+ conditions above.
68563+
68564+config GRKERNSEC_VM86
68565+ bool "Restrict VM86 mode"
68566+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68567+ depends on X86_32
68568+
68569+ help
68570+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68571+ make use of a special execution mode on 32bit x86 processors called
68572+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68573+ video cards and will still work with this option enabled. The purpose
68574+ of the option is to prevent exploitation of emulation errors in
68575+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68576+ Nearly all users should be able to enable this option.
68577+
68578+config GRKERNSEC_IO
68579+ bool "Disable privileged I/O"
68580+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68581+ depends on X86
68582+ select RTC_CLASS
68583+ select RTC_INTF_DEV
68584+ select RTC_DRV_CMOS
68585+
68586+ help
68587+ If you say Y here, all ioperm and iopl calls will return an error.
68588+ Ioperm and iopl can be used to modify the running kernel.
68589+ Unfortunately, some programs need this access to operate properly,
68590+ the most notable of which are XFree86 and hwclock. hwclock can be
68591+ remedied by having RTC support in the kernel, so real-time
68592+ clock support is enabled if this option is enabled, to ensure
68593+ that hwclock operates correctly. If hwclock still does not work,
68594+ either update udev or symlink /dev/rtc to /dev/rtc0.
68595+
68596+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68597+ you may not be able to boot into a graphical environment with this
68598+ option enabled. In this case, you should use the RBAC system instead.
68599+
68600+config GRKERNSEC_BPF_HARDEN
68601+ bool "Harden BPF interpreter"
68602+ default y if GRKERNSEC_CONFIG_AUTO
68603+ help
68604+ Unlike previous versions of grsecurity that hardened both the BPF
68605+ interpreted code against corruption at rest as well as the JIT code
68606+ against JIT-spray attacks and attacker-controlled immediate values
68607+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68608+ and will ensure the interpreted code is read-only at rest. This feature
68609+ may be removed at a later time when eBPF stabilizes to entirely revert
68610+ back to the more secure pre-3.16 BPF interpreter/JIT.
68611+
68612+ If you're using KERNEXEC, it's recommended that you enable this option
68613+ to supplement the hardening of the kernel.
68614+
68615+config GRKERNSEC_PERF_HARDEN
68616+ bool "Disable unprivileged PERF_EVENTS usage by default"
68617+ default y if GRKERNSEC_CONFIG_AUTO
68618+ depends on PERF_EVENTS
68619+ help
68620+ If you say Y here, the range of acceptable values for the
68621+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68622+ default to a new value: 3. When the sysctl is set to this value, no
68623+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68624+
68625+ Though PERF_EVENTS can be used legitimately for performance monitoring
68626+ and low-level application profiling, it is forced on regardless of
68627+ configuration, has been at fault for several vulnerabilities, and
68628+ creates new opportunities for side channels and other information leaks.
68629+
68630+ This feature puts PERF_EVENTS into a secure default state and permits
68631+ the administrator to change out of it temporarily if unprivileged
68632+ application profiling is needed.
68633+
68634+config GRKERNSEC_RAND_THREADSTACK
68635+ bool "Insert random gaps between thread stacks"
68636+ default y if GRKERNSEC_CONFIG_AUTO
68637+ depends on PAX_RANDMMAP && !PPC
68638+ help
68639+ If you say Y here, a random-sized gap will be enforced between allocated
68640+ thread stacks. Glibc's NPTL and other threading libraries that
68641+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68642+ The implementation currently provides 8 bits of entropy for the gap.
68643+
68644+ Many distributions do not compile threaded remote services with the
68645+ -fstack-check argument to GCC, causing the variable-sized stack-based
68646+ allocator, alloca(), to not probe the stack on allocation. This
68647+ permits an unbounded alloca() to skip over any guard page and potentially
68648+ modify another thread's stack reliably. An enforced random gap
68649+ reduces the reliability of such an attack and increases the chance
68650+ that such a read/write to another thread's stack instead lands in
68651+ an unmapped area, causing a crash and triggering grsecurity's
68652+ anti-bruteforcing logic.
68653+
68654+config GRKERNSEC_PROC_MEMMAP
68655+ bool "Harden ASLR against information leaks and entropy reduction"
68656+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68657+ depends on PAX_NOEXEC || PAX_ASLR
68658+ help
68659+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68660+ give no information about the addresses of its mappings if
68661+ PaX features that rely on random addresses are enabled on the task.
68662+ In addition to sanitizing this information and disabling other
68663+ dangerous sources of information, this option causes reads of sensitive
68664+ /proc/<pid> entries where the file descriptor was opened in a different
68665+ task than the one performing the read. Such attempts are logged.
68666+ This option also limits argv/env strings for suid/sgid binaries
68667+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68668+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68669+ binaries to prevent alternative mmap layouts from being abused.
68670+
68671+ If you use PaX it is essential that you say Y here as it closes up
68672+ several holes that make full ASLR useless locally.
68673+
68674+
68675+config GRKERNSEC_KSTACKOVERFLOW
68676+ bool "Prevent kernel stack overflows"
68677+ default y if GRKERNSEC_CONFIG_AUTO
68678+ depends on !IA64 && 64BIT
68679+ help
68680+ If you say Y here, the kernel's process stacks will be allocated
68681+ with vmalloc instead of the kernel's default allocator. This
68682+ introduces guard pages that in combination with the alloca checking
68683+ of the STACKLEAK feature prevents all forms of kernel process stack
68684+ overflow abuse. Note that this is different from kernel stack
68685+ buffer overflows.
68686+
68687+config GRKERNSEC_BRUTE
68688+ bool "Deter exploit bruteforcing"
68689+ default y if GRKERNSEC_CONFIG_AUTO
68690+ help
68691+ If you say Y here, attempts to bruteforce exploits against forking
68692+ daemons such as apache or sshd, as well as against suid/sgid binaries
68693+ will be deterred. When a child of a forking daemon is killed by PaX
68694+ or crashes due to an illegal instruction or other suspicious signal,
68695+ the parent process will be delayed 30 seconds upon every subsequent
68696+ fork until the administrator is able to assess the situation and
68697+ restart the daemon.
68698+ In the suid/sgid case, the attempt is logged, the user has all their
68699+ existing instances of the suid/sgid binary terminated and will
68700+ be unable to execute any suid/sgid binaries for 15 minutes.
68701+
68702+ It is recommended that you also enable signal logging in the auditing
68703+ section so that logs are generated when a process triggers a suspicious
68704+ signal.
68705+ If the sysctl option is enabled, a sysctl option with name
68706+ "deter_bruteforce" is created.
68707+
68708+config GRKERNSEC_MODHARDEN
68709+ bool "Harden module auto-loading"
68710+ default y if GRKERNSEC_CONFIG_AUTO
68711+ depends on MODULES
68712+ help
68713+ If you say Y here, module auto-loading in response to use of some
68714+ feature implemented by an unloaded module will be restricted to
68715+ root users. Enabling this option helps defend against attacks
68716+ by unprivileged users who abuse the auto-loading behavior to
68717+ cause a vulnerable module to load that is then exploited.
68718+
68719+ If this option prevents a legitimate use of auto-loading for a
68720+ non-root user, the administrator can execute modprobe manually
68721+ with the exact name of the module mentioned in the alert log.
68722+ Alternatively, the administrator can add the module to the list
68723+ of modules loaded at boot by modifying init scripts.
68724+
68725+ Modification of init scripts will most likely be needed on
68726+ Ubuntu servers with encrypted home directory support enabled,
68727+ as the first non-root user logging in will cause the ecb(aes),
68728+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68729+
68730+config GRKERNSEC_HIDESYM
68731+ bool "Hide kernel symbols"
68732+ default y if GRKERNSEC_CONFIG_AUTO
68733+ select PAX_USERCOPY_SLABS
68734+ help
68735+ If you say Y here, getting information on loaded modules, and
68736+ displaying all kernel symbols through a syscall will be restricted
68737+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68738+ /proc/kallsyms will be restricted to the root user. The RBAC
68739+ system can hide that entry even from root.
68740+
68741+ This option also prevents leaking of kernel addresses through
68742+ several /proc entries.
68743+
68744+ Note that this option is only effective provided the following
68745+ conditions are met:
68746+ 1) The kernel using grsecurity is not precompiled by some distribution
68747+ 2) You have also enabled GRKERNSEC_DMESG
68748+ 3) You are using the RBAC system and hiding other files such as your
68749+ kernel image and System.map. Alternatively, enabling this option
68750+ causes the permissions on /boot, /lib/modules, and the kernel
68751+ source directory to change at compile time to prevent
68752+ reading by non-root users.
68753+ If the above conditions are met, this option will aid in providing a
68754+ useful protection against local kernel exploitation of overflows
68755+ and arbitrary read/write vulnerabilities.
68756+
68757+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68758+ in addition to this feature.
68759+
68760+config GRKERNSEC_RANDSTRUCT
68761+ bool "Randomize layout of sensitive kernel structures"
68762+ default y if GRKERNSEC_CONFIG_AUTO
68763+ select GRKERNSEC_HIDESYM
68764+ select MODVERSIONS if MODULES
68765+ help
68766+ If you say Y here, the layouts of a number of sensitive kernel
68767+ structures (task, fs, cred, etc) and all structures composed entirely
68768+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68769+ This can introduce the requirement of an additional infoleak
68770+ vulnerability for exploits targeting these structure types.
68771+
68772+ Enabling this feature will introduce some performance impact, slightly
68773+ increase memory usage, and prevent the use of forensic tools like
68774+ Volatility against the system (unless the kernel source tree isn't
68775+ cleaned after kernel installation).
68776+
68777+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68778+ It remains after a make clean to allow for external modules to be compiled
68779+ with the existing seed and will be removed by a make mrproper or
68780+ make distclean.
68781+
68782+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68783+ to install the supporting headers explicitly in addition to the normal
68784+ gcc package.
68785+
68786+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68787+ bool "Use cacheline-aware structure randomization"
68788+ depends on GRKERNSEC_RANDSTRUCT
68789+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68790+ help
68791+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68792+ at restricting randomization to cacheline-sized groups of elements. It
68793+ will further not randomize bitfields in structures. This reduces the
68794+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68795+
68796+config GRKERNSEC_KERN_LOCKOUT
68797+ bool "Active kernel exploit response"
68798+ default y if GRKERNSEC_CONFIG_AUTO
68799+ depends on X86 || ARM || PPC || SPARC
68800+ help
68801+ If you say Y here, when a PaX alert is triggered due to suspicious
68802+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68803+ or an OOPS occurs due to bad memory accesses, instead of just
68804+ terminating the offending process (and potentially allowing
68805+ a subsequent exploit from the same user), we will take one of two
68806+ actions:
68807+ If the user was root, we will panic the system
68808+ If the user was non-root, we will log the attempt, terminate
68809+ all processes owned by the user, then prevent them from creating
68810+ any new processes until the system is restarted
68811+ This deters repeated kernel exploitation/bruteforcing attempts
68812+ and is useful for later forensics.
68813+
68814+config GRKERNSEC_OLD_ARM_USERLAND
68815+ bool "Old ARM userland compatibility"
68816+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68817+ help
68818+ If you say Y here, stubs of executable code to perform such operations
68819+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68820+ table. This is unfortunately needed for old ARM userland meant to run
68821+ across a wide range of processors. Without this option enabled,
68822+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68823+ which is enough for Linaro userlands or other userlands designed for v6
68824+ and newer ARM CPUs. It's recommended that you try without this option enabled
68825+ first, and only enable it if your userland does not boot (it will likely fail
68826+ at init time).
68827+
68828+endmenu
68829+menu "Role Based Access Control Options"
68830+depends on GRKERNSEC
68831+
68832+config GRKERNSEC_RBAC_DEBUG
68833+ bool
68834+
68835+config GRKERNSEC_NO_RBAC
68836+ bool "Disable RBAC system"
68837+ help
68838+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68839+ preventing the RBAC system from being enabled. You should only say Y
68840+ here if you have no intention of using the RBAC system, so as to prevent
68841+ an attacker with root access from misusing the RBAC system to hide files
68842+ and processes when loadable module support and /dev/[k]mem have been
68843+ locked down.
68844+
68845+config GRKERNSEC_ACL_HIDEKERN
68846+ bool "Hide kernel processes"
68847+ help
68848+ If you say Y here, all kernel threads will be hidden to all
68849+ processes but those whose subject has the "view hidden processes"
68850+ flag.
68851+
68852+config GRKERNSEC_ACL_MAXTRIES
68853+ int "Maximum tries before password lockout"
68854+ default 3
68855+ help
68856+ This option enforces the maximum number of times a user can attempt
68857+ to authorize themselves with the grsecurity RBAC system before being
68858+ denied the ability to attempt authorization again for a specified time.
68859+ The lower the number, the harder it will be to brute-force a password.
68860+
68861+config GRKERNSEC_ACL_TIMEOUT
68862+ int "Time to wait after max password tries, in seconds"
68863+ default 30
68864+ help
68865+ This option specifies the time the user must wait after attempting to
68866+ authorize to the RBAC system with the maximum number of invalid
68867+ passwords. The higher the number, the harder it will be to brute-force
68868+ a password.
68869+
68870+endmenu
68871+menu "Filesystem Protections"
68872+depends on GRKERNSEC
68873+
68874+config GRKERNSEC_PROC
68875+ bool "Proc restrictions"
68876+ default y if GRKERNSEC_CONFIG_AUTO
68877+ help
68878+ If you say Y here, the permissions of the /proc filesystem
68879+ will be altered to enhance system security and privacy. You MUST
68880+ choose either a user only restriction or a user and group restriction.
68881+ Depending upon the option you choose, you can either restrict users to
68882+ see only the processes they themselves run, or choose a group that can
68883+ view all processes and files normally restricted to root if you choose
68884+ the "restrict to user only" option. NOTE: If you're running identd or
68885+ ntpd as a non-root user, you will have to run it as the group you
68886+ specify here.
68887+
68888+config GRKERNSEC_PROC_USER
68889+ bool "Restrict /proc to user only"
68890+ depends on GRKERNSEC_PROC
68891+ help
68892+ If you say Y here, non-root users will only be able to view their own
68893+ processes, and restricts them from viewing network-related information,
68894+ and viewing kernel symbol and module information.
68895+
68896+config GRKERNSEC_PROC_USERGROUP
68897+ bool "Allow special group"
68898+ default y if GRKERNSEC_CONFIG_AUTO
68899+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68900+ help
68901+ If you say Y here, you will be able to select a group that will be
68902+ able to view all processes and network-related information. If you've
68903+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68904+ remain hidden. This option is useful if you want to run identd as
68905+ a non-root user. The group you select may also be chosen at boot time
68906+ via "grsec_proc_gid=" on the kernel commandline.
68907+
68908+config GRKERNSEC_PROC_GID
68909+ int "GID for special group"
68910+ depends on GRKERNSEC_PROC_USERGROUP
68911+ default 1001
68912+
68913+config GRKERNSEC_PROC_ADD
68914+ bool "Additional restrictions"
68915+ default y if GRKERNSEC_CONFIG_AUTO
68916+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68917+ help
68918+ If you say Y here, additional restrictions will be placed on
68919+ /proc that keep normal users from viewing device information and
68920+ slabinfo information that could be useful for exploits.
68921+
68922+config GRKERNSEC_LINK
68923+ bool "Linking restrictions"
68924+ default y if GRKERNSEC_CONFIG_AUTO
68925+ help
68926+ If you say Y here, /tmp race exploits will be prevented, since users
68927+ will no longer be able to follow symlinks owned by other users in
68928+ world-writable +t directories (e.g. /tmp), unless the owner of the
68929+ symlink is the owner of the directory. users will also not be
68930+ able to hardlink to files they do not own. If the sysctl option is
68931+ enabled, a sysctl option with name "linking_restrictions" is created.
68932+
68933+config GRKERNSEC_SYMLINKOWN
68934+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68935+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68936+ help
68937+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68938+ that prevents it from being used as a security feature. As Apache
68939+ verifies the symlink by performing a stat() against the target of
68940+ the symlink before it is followed, an attacker can setup a symlink
68941+ to point to a same-owned file, then replace the symlink with one
68942+ that targets another user's file just after Apache "validates" the
68943+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68944+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68945+ will be in place for the group you specify. If the sysctl option
68946+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68947+ created.
68948+
68949+config GRKERNSEC_SYMLINKOWN_GID
68950+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68951+ depends on GRKERNSEC_SYMLINKOWN
68952+ default 1006
68953+ help
68954+ Setting this GID determines what group kernel-enforced
68955+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68956+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68957+
68958+config GRKERNSEC_FIFO
68959+ bool "FIFO restrictions"
68960+ default y if GRKERNSEC_CONFIG_AUTO
68961+ help
68962+ If you say Y here, users will not be able to write to FIFOs they don't
68963+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68964+ the FIFO is the same owner of the directory it's held in. If the sysctl
68965+ option is enabled, a sysctl option with name "fifo_restrictions" is
68966+ created.
68967+
68968+config GRKERNSEC_SYSFS_RESTRICT
68969+ bool "Sysfs/debugfs restriction"
68970+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68971+ depends on SYSFS
68972+ help
68973+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68974+ any filesystem normally mounted under it (e.g. debugfs) will be
68975+ mostly accessible only by root. These filesystems generally provide access
68976+ to hardware and debug information that isn't appropriate for unprivileged
68977+ users of the system. Sysfs and debugfs have also become a large source
68978+ of new vulnerabilities, ranging from infoleaks to local compromise.
68979+ There has been very little oversight with an eye toward security involved
68980+ in adding new exporters of information to these filesystems, so their
68981+ use is discouraged.
68982+ For reasons of compatibility, a few directories have been whitelisted
68983+ for access by non-root users:
68984+ /sys/fs/selinux
68985+ /sys/fs/fuse
68986+ /sys/devices/system/cpu
68987+
68988+config GRKERNSEC_ROFS
68989+ bool "Runtime read-only mount protection"
68990+ depends on SYSCTL
68991+ help
68992+ If you say Y here, a sysctl option with name "romount_protect" will
68993+ be created. By setting this option to 1 at runtime, filesystems
68994+ will be protected in the following ways:
68995+ * No new writable mounts will be allowed
68996+ * Existing read-only mounts won't be able to be remounted read/write
68997+ * Write operations will be denied on all block devices
68998+ This option acts independently of grsec_lock: once it is set to 1,
68999+ it cannot be turned off. Therefore, please be mindful of the resulting
69000+ behavior if this option is enabled in an init script on a read-only
69001+ filesystem.
69002+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69003+ and GRKERNSEC_IO should be enabled and module loading disabled via
69004+ config or at runtime.
69005+ This feature is mainly intended for secure embedded systems.
69006+
69007+
69008+config GRKERNSEC_DEVICE_SIDECHANNEL
69009+ bool "Eliminate stat/notify-based device sidechannels"
69010+ default y if GRKERNSEC_CONFIG_AUTO
69011+ help
69012+ If you say Y here, timing analyses on block or character
69013+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69014+ will be thwarted for unprivileged users. If a process without
69015+ CAP_MKNOD stats such a device, the last access and last modify times
69016+ will match the device's create time. No access or modify events
69017+ will be triggered through inotify/dnotify/fanotify for such devices.
69018+ This feature will prevent attacks that may at a minimum
69019+ allow an attacker to determine the administrator's password length.
69020+
69021+config GRKERNSEC_CHROOT
69022+ bool "Chroot jail restrictions"
69023+ default y if GRKERNSEC_CONFIG_AUTO
69024+ help
69025+ If you say Y here, you will be able to choose several options that will
69026+ make breaking out of a chrooted jail much more difficult. If you
69027+ encounter no software incompatibilities with the following options, it
69028+ is recommended that you enable each one.
69029+
69030+ Note that the chroot restrictions are not intended to apply to "chroots"
69031+ to directories that are simple bind mounts of the global root filesystem.
69032+ For several other reasons, a user shouldn't expect any significant
69033+ security by performing such a chroot.
69034+
69035+config GRKERNSEC_CHROOT_MOUNT
69036+ bool "Deny mounts"
69037+ default y if GRKERNSEC_CONFIG_AUTO
69038+ depends on GRKERNSEC_CHROOT
69039+ help
69040+ If you say Y here, processes inside a chroot will not be able to
69041+ mount or remount filesystems. If the sysctl option is enabled, a
69042+ sysctl option with name "chroot_deny_mount" is created.
69043+
69044+config GRKERNSEC_CHROOT_DOUBLE
69045+ bool "Deny double-chroots"
69046+ default y if GRKERNSEC_CONFIG_AUTO
69047+ depends on GRKERNSEC_CHROOT
69048+ help
69049+ If you say Y here, processes inside a chroot will not be able to chroot
69050+ again outside the chroot. This is a widely used method of breaking
69051+ out of a chroot jail and should not be allowed. If the sysctl
69052+ option is enabled, a sysctl option with name
69053+ "chroot_deny_chroot" is created.
69054+
69055+config GRKERNSEC_CHROOT_PIVOT
69056+ bool "Deny pivot_root in chroot"
69057+ default y if GRKERNSEC_CONFIG_AUTO
69058+ depends on GRKERNSEC_CHROOT
69059+ help
69060+ If you say Y here, processes inside a chroot will not be able to use
69061+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69062+ works similar to chroot in that it changes the root filesystem. This
69063+ function could be misused in a chrooted process to attempt to break out
69064+ of the chroot, and therefore should not be allowed. If the sysctl
69065+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69066+ created.
69067+
69068+config GRKERNSEC_CHROOT_CHDIR
69069+ bool "Enforce chdir(\"/\") on all chroots"
69070+ default y if GRKERNSEC_CONFIG_AUTO
69071+ depends on GRKERNSEC_CHROOT
69072+ help
69073+ If you say Y here, the current working directory of all newly-chrooted
69074+ applications will be set to the the root directory of the chroot.
69075+ The man page on chroot(2) states:
69076+ Note that this call does not change the current working
69077+ directory, so that `.' can be outside the tree rooted at
69078+ `/'. In particular, the super-user can escape from a
69079+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69080+
69081+ It is recommended that you say Y here, since it's not known to break
69082+ any software. If the sysctl option is enabled, a sysctl option with
69083+ name "chroot_enforce_chdir" is created.
69084+
69085+config GRKERNSEC_CHROOT_CHMOD
69086+ bool "Deny (f)chmod +s"
69087+ default y if GRKERNSEC_CONFIG_AUTO
69088+ depends on GRKERNSEC_CHROOT
69089+ help
69090+ If you say Y here, processes inside a chroot will not be able to chmod
69091+ or fchmod files to make them have suid or sgid bits. This protects
69092+ against another published method of breaking a chroot. If the sysctl
69093+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69094+ created.
69095+
69096+config GRKERNSEC_CHROOT_FCHDIR
69097+ bool "Deny fchdir and fhandle out of chroot"
69098+ default y if GRKERNSEC_CONFIG_AUTO
69099+ depends on GRKERNSEC_CHROOT
69100+ help
69101+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69102+ to a file descriptor of the chrooting process that points to a directory
69103+ outside the filesystem will be stopped. Additionally, this option prevents
69104+ use of the recently-created syscall for opening files by a guessable "file
69105+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69106+ with name "chroot_deny_fchdir" is created.
69107+
69108+config GRKERNSEC_CHROOT_MKNOD
69109+ bool "Deny mknod"
69110+ default y if GRKERNSEC_CONFIG_AUTO
69111+ depends on GRKERNSEC_CHROOT
69112+ help
69113+ If you say Y here, processes inside a chroot will not be allowed to
69114+ mknod. The problem with using mknod inside a chroot is that it
69115+ would allow an attacker to create a device entry that is the same
69116+ as one on the physical root of your system, which could range from
69117+ anything from the console device to a device for your harddrive (which
69118+ they could then use to wipe the drive or steal data). It is recommended
69119+ that you say Y here, unless you run into software incompatibilities.
69120+ If the sysctl option is enabled, a sysctl option with name
69121+ "chroot_deny_mknod" is created.
69122+
69123+config GRKERNSEC_CHROOT_SHMAT
69124+ bool "Deny shmat() out of chroot"
69125+ default y if GRKERNSEC_CONFIG_AUTO
69126+ depends on GRKERNSEC_CHROOT
69127+ help
69128+ If you say Y here, processes inside a chroot will not be able to attach
69129+ to shared memory segments that were created outside of the chroot jail.
69130+ It is recommended that you say Y here. If the sysctl option is enabled,
69131+ a sysctl option with name "chroot_deny_shmat" is created.
69132+
69133+config GRKERNSEC_CHROOT_UNIX
69134+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69135+ default y if GRKERNSEC_CONFIG_AUTO
69136+ depends on GRKERNSEC_CHROOT
69137+ help
69138+ If you say Y here, processes inside a chroot will not be able to
69139+ connect to abstract (meaning not belonging to a filesystem) Unix
69140+ domain sockets that were bound outside of a chroot. It is recommended
69141+ that you say Y here. If the sysctl option is enabled, a sysctl option
69142+ with name "chroot_deny_unix" is created.
69143+
69144+config GRKERNSEC_CHROOT_FINDTASK
69145+ bool "Protect outside processes"
69146+ default y if GRKERNSEC_CONFIG_AUTO
69147+ depends on GRKERNSEC_CHROOT
69148+ help
69149+ If you say Y here, processes inside a chroot will not be able to
69150+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69151+ getsid, or view any process outside of the chroot. If the sysctl
69152+ option is enabled, a sysctl option with name "chroot_findtask" is
69153+ created.
69154+
69155+config GRKERNSEC_CHROOT_NICE
69156+ bool "Restrict priority changes"
69157+ default y if GRKERNSEC_CONFIG_AUTO
69158+ depends on GRKERNSEC_CHROOT
69159+ help
69160+ If you say Y here, processes inside a chroot will not be able to raise
69161+ the priority of processes in the chroot, or alter the priority of
69162+ processes outside the chroot. This provides more security than simply
69163+ removing CAP_SYS_NICE from the process' capability set. If the
69164+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69165+ is created.
69166+
69167+config GRKERNSEC_CHROOT_SYSCTL
69168+ bool "Deny sysctl writes"
69169+ default y if GRKERNSEC_CONFIG_AUTO
69170+ depends on GRKERNSEC_CHROOT
69171+ help
69172+ If you say Y here, an attacker in a chroot will not be able to
69173+ write to sysctl entries, either by sysctl(2) or through a /proc
69174+ interface. It is strongly recommended that you say Y here. If the
69175+ sysctl option is enabled, a sysctl option with name
69176+ "chroot_deny_sysctl" is created.
69177+
69178+config GRKERNSEC_CHROOT_RENAME
69179+ bool "Deny bad renames"
69180+ default y if GRKERNSEC_CONFIG_AUTO
69181+ depends on GRKERNSEC_CHROOT
69182+ help
69183+ If you say Y here, an attacker in a chroot will not be able to
69184+ abuse the ability to create double chroots to break out of the
69185+ chroot by exploiting a race condition between a rename of a directory
69186+ within a chroot against an open of a symlink with relative path
69187+ components. This feature will likewise prevent an accomplice outside
69188+ a chroot from enabling a user inside the chroot to break out and make
69189+ use of their credentials on the global filesystem. Enabling this
69190+ feature is essential to prevent root users from breaking out of a
69191+ chroot. If the sysctl option is enabled, a sysctl option with name
69192+ "chroot_deny_bad_rename" is created.
69193+
69194+config GRKERNSEC_CHROOT_CAPS
69195+ bool "Capability restrictions"
69196+ default y if GRKERNSEC_CONFIG_AUTO
69197+ depends on GRKERNSEC_CHROOT
69198+ help
69199+ If you say Y here, the capabilities on all processes within a
69200+ chroot jail will be lowered to stop module insertion, raw i/o,
69201+ system and net admin tasks, rebooting the system, modifying immutable
69202+ files, modifying IPC owned by another, and changing the system time.
69203+ This is left an option because it can break some apps. Disable this
69204+ if your chrooted apps are having problems performing those kinds of
69205+ tasks. If the sysctl option is enabled, a sysctl option with
69206+ name "chroot_caps" is created.
69207+
69208+config GRKERNSEC_CHROOT_INITRD
69209+ bool "Exempt initrd tasks from restrictions"
69210+ default y if GRKERNSEC_CONFIG_AUTO
69211+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
69212+ help
69213+ If you say Y here, tasks started prior to init will be exempted from
69214+ grsecurity's chroot restrictions. This option is mainly meant to
69215+ resolve Plymouth's performing privileged operations unnecessarily
69216+ in a chroot.
69217+
69218+endmenu
69219+menu "Kernel Auditing"
69220+depends on GRKERNSEC
69221+
69222+config GRKERNSEC_AUDIT_GROUP
69223+ bool "Single group for auditing"
69224+ help
69225+ If you say Y here, the exec and chdir logging features will only operate
69226+ on a group you specify. This option is recommended if you only want to
69227+ watch certain users instead of having a large amount of logs from the
69228+ entire system. If the sysctl option is enabled, a sysctl option with
69229+ name "audit_group" is created.
69230+
69231+config GRKERNSEC_AUDIT_GID
69232+ int "GID for auditing"
69233+ depends on GRKERNSEC_AUDIT_GROUP
69234+ default 1007
69235+
69236+config GRKERNSEC_EXECLOG
69237+ bool "Exec logging"
69238+ help
69239+ If you say Y here, all execve() calls will be logged (since the
69240+ other exec*() calls are frontends to execve(), all execution
69241+ will be logged). Useful for shell-servers that like to keep track
69242+ of their users. If the sysctl option is enabled, a sysctl option with
69243+ name "exec_logging" is created.
69244+ WARNING: This option when enabled will produce a LOT of logs, especially
69245+ on an active system.
69246+
69247+config GRKERNSEC_RESLOG
69248+ bool "Resource logging"
69249+ default y if GRKERNSEC_CONFIG_AUTO
69250+ help
69251+ If you say Y here, all attempts to overstep resource limits will
69252+ be logged with the resource name, the requested size, and the current
69253+ limit. It is highly recommended that you say Y here. If the sysctl
69254+ option is enabled, a sysctl option with name "resource_logging" is
69255+ created. If the RBAC system is enabled, the sysctl value is ignored.
69256+
69257+config GRKERNSEC_CHROOT_EXECLOG
69258+ bool "Log execs within chroot"
69259+ help
69260+ If you say Y here, all executions inside a chroot jail will be logged
69261+ to syslog. This can cause a large amount of logs if certain
69262+ applications (eg. djb's daemontools) are installed on the system, and
69263+ is therefore left as an option. If the sysctl option is enabled, a
69264+ sysctl option with name "chroot_execlog" is created.
69265+
69266+config GRKERNSEC_AUDIT_PTRACE
69267+ bool "Ptrace logging"
69268+ help
69269+ If you say Y here, all attempts to attach to a process via ptrace
69270+ will be logged. If the sysctl option is enabled, a sysctl option
69271+ with name "audit_ptrace" is created.
69272+
69273+config GRKERNSEC_AUDIT_CHDIR
69274+ bool "Chdir logging"
69275+ help
69276+ If you say Y here, all chdir() calls will be logged. If the sysctl
69277+ option is enabled, a sysctl option with name "audit_chdir" is created.
69278+
69279+config GRKERNSEC_AUDIT_MOUNT
69280+ bool "(Un)Mount logging"
69281+ help
69282+ If you say Y here, all mounts and unmounts will be logged. If the
69283+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69284+ created.
69285+
69286+config GRKERNSEC_SIGNAL
69287+ bool "Signal logging"
69288+ default y if GRKERNSEC_CONFIG_AUTO
69289+ help
69290+ If you say Y here, certain important signals will be logged, such as
69291+ SIGSEGV, which will as a result inform you of when a error in a program
69292+ occurred, which in some cases could mean a possible exploit attempt.
69293+ If the sysctl option is enabled, a sysctl option with name
69294+ "signal_logging" is created.
69295+
69296+config GRKERNSEC_FORKFAIL
69297+ bool "Fork failure logging"
69298+ help
69299+ If you say Y here, all failed fork() attempts will be logged.
69300+ This could suggest a fork bomb, or someone attempting to overstep
69301+ their process limit. If the sysctl option is enabled, a sysctl option
69302+ with name "forkfail_logging" is created.
69303+
69304+config GRKERNSEC_TIME
69305+ bool "Time change logging"
69306+ default y if GRKERNSEC_CONFIG_AUTO
69307+ help
69308+ If you say Y here, any changes of the system clock will be logged.
69309+ If the sysctl option is enabled, a sysctl option with name
69310+ "timechange_logging" is created.
69311+
69312+config GRKERNSEC_PROC_IPADDR
69313+ bool "/proc/<pid>/ipaddr support"
69314+ default y if GRKERNSEC_CONFIG_AUTO
69315+ help
69316+ If you say Y here, a new entry will be added to each /proc/<pid>
69317+ directory that contains the IP address of the person using the task.
69318+ The IP is carried across local TCP and AF_UNIX stream sockets.
69319+ This information can be useful for IDS/IPSes to perform remote response
69320+ to a local attack. The entry is readable by only the owner of the
69321+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69322+ the RBAC system), and thus does not create privacy concerns.
69323+
69324+config GRKERNSEC_RWXMAP_LOG
69325+ bool 'Denied RWX mmap/mprotect logging'
69326+ default y if GRKERNSEC_CONFIG_AUTO
69327+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69328+ help
69329+ If you say Y here, calls to mmap() and mprotect() with explicit
69330+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69331+ denied by the PAX_MPROTECT feature. This feature will also
69332+ log other problematic scenarios that can occur when PAX_MPROTECT
69333+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69334+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69335+ is created.
69336+
69337+endmenu
69338+
69339+menu "Executable Protections"
69340+depends on GRKERNSEC
69341+
69342+config GRKERNSEC_DMESG
69343+ bool "Dmesg(8) restriction"
69344+ default y if GRKERNSEC_CONFIG_AUTO
69345+ help
69346+ If you say Y here, non-root users will not be able to use dmesg(8)
69347+ to view the contents of the kernel's circular log buffer.
69348+ The kernel's log buffer often contains kernel addresses and other
69349+ identifying information useful to an attacker in fingerprinting a
69350+ system for a targeted exploit.
69351+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69352+ created.
69353+
69354+config GRKERNSEC_HARDEN_PTRACE
69355+ bool "Deter ptrace-based process snooping"
69356+ default y if GRKERNSEC_CONFIG_AUTO
69357+ help
69358+ If you say Y here, TTY sniffers and other malicious monitoring
69359+ programs implemented through ptrace will be defeated. If you
69360+ have been using the RBAC system, this option has already been
69361+ enabled for several years for all users, with the ability to make
69362+ fine-grained exceptions.
69363+
69364+ This option only affects the ability of non-root users to ptrace
69365+ processes that are not a descendent of the ptracing process.
69366+ This means that strace ./binary and gdb ./binary will still work,
69367+ but attaching to arbitrary processes will not. If the sysctl
69368+ option is enabled, a sysctl option with name "harden_ptrace" is
69369+ created.
69370+
69371+config GRKERNSEC_PTRACE_READEXEC
69372+ bool "Require read access to ptrace sensitive binaries"
69373+ default y if GRKERNSEC_CONFIG_AUTO
69374+ help
69375+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69376+ binaries. This option is useful in environments that
69377+ remove the read bits (e.g. file mode 4711) from suid binaries to
69378+ prevent infoleaking of their contents. This option adds
69379+ consistency to the use of that file mode, as the binary could normally
69380+ be read out when run without privileges while ptracing.
69381+
69382+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69383+ is created.
69384+
69385+config GRKERNSEC_SETXID
69386+ bool "Enforce consistent multithreaded privileges"
69387+ default y if GRKERNSEC_CONFIG_AUTO
69388+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69389+ help
69390+ If you say Y here, a change from a root uid to a non-root uid
69391+ in a multithreaded application will cause the resulting uids,
69392+ gids, supplementary groups, and capabilities in that thread
69393+ to be propagated to the other threads of the process. In most
69394+ cases this is unnecessary, as glibc will emulate this behavior
69395+ on behalf of the application. Other libcs do not act in the
69396+ same way, allowing the other threads of the process to continue
69397+ running with root privileges. If the sysctl option is enabled,
69398+ a sysctl option with name "consistent_setxid" is created.
69399+
69400+config GRKERNSEC_HARDEN_IPC
69401+ bool "Disallow access to overly-permissive IPC objects"
69402+ default y if GRKERNSEC_CONFIG_AUTO
69403+ depends on SYSVIPC
69404+ help
69405+ If you say Y here, access to overly-permissive IPC objects (shared
69406+ memory, message queues, and semaphores) will be denied for processes
69407+ given the following criteria beyond normal permission checks:
69408+ 1) If the IPC object is world-accessible and the euid doesn't match
69409+ that of the creator or current uid for the IPC object
69410+ 2) If the IPC object is group-accessible and the egid doesn't
69411+ match that of the creator or current gid for the IPC object
69412+ It's a common error to grant too much permission to these objects,
69413+ with impact ranging from denial of service and information leaking to
69414+ privilege escalation. This feature was developed in response to
69415+ research by Tim Brown:
69416+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69417+ who found hundreds of such insecure usages. Processes with
69418+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69419+ If the sysctl option is enabled, a sysctl option with name
69420+ "harden_ipc" is created.
69421+
69422+config GRKERNSEC_TPE
69423+ bool "Trusted Path Execution (TPE)"
69424+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69425+ help
69426+ If you say Y here, you will be able to choose a gid to add to the
69427+ supplementary groups of users you want to mark as "untrusted."
69428+ These users will not be able to execute any files that are not in
69429+ root-owned directories writable only by root. If the sysctl option
69430+ is enabled, a sysctl option with name "tpe" is created.
69431+
69432+config GRKERNSEC_TPE_ALL
69433+ bool "Partially restrict all non-root users"
69434+ depends on GRKERNSEC_TPE
69435+ help
69436+ If you say Y here, all non-root users will be covered under
69437+ a weaker TPE restriction. This is separate from, and in addition to,
69438+ the main TPE options that you have selected elsewhere. Thus, if a
69439+ "trusted" GID is chosen, this restriction applies to even that GID.
69440+ Under this restriction, all non-root users will only be allowed to
69441+ execute files in directories they own that are not group or
69442+ world-writable, or in directories owned by root and writable only by
69443+ root. If the sysctl option is enabled, a sysctl option with name
69444+ "tpe_restrict_all" is created.
69445+
69446+config GRKERNSEC_TPE_INVERT
69447+ bool "Invert GID option"
69448+ depends on GRKERNSEC_TPE
69449+ help
69450+ If you say Y here, the group you specify in the TPE configuration will
69451+ decide what group TPE restrictions will be *disabled* for. This
69452+ option is useful if you want TPE restrictions to be applied to most
69453+ users on the system. If the sysctl option is enabled, a sysctl option
69454+ with name "tpe_invert" is created. Unlike other sysctl options, this
69455+ entry will default to on for backward-compatibility.
69456+
69457+config GRKERNSEC_TPE_GID
69458+ int
69459+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69460+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69461+
69462+config GRKERNSEC_TPE_UNTRUSTED_GID
69463+ int "GID for TPE-untrusted users"
69464+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69465+ default 1005
69466+ help
69467+ Setting this GID determines what group TPE restrictions will be
69468+ *enabled* for. If the sysctl option is enabled, a sysctl option
69469+ with name "tpe_gid" is created.
69470+
69471+config GRKERNSEC_TPE_TRUSTED_GID
69472+ int "GID for TPE-trusted users"
69473+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69474+ default 1005
69475+ help
69476+ Setting this GID determines what group TPE restrictions will be
69477+ *disabled* for. If the sysctl option is enabled, a sysctl option
69478+ with name "tpe_gid" is created.
69479+
69480+endmenu
69481+menu "Network Protections"
69482+depends on GRKERNSEC
69483+
69484+config GRKERNSEC_BLACKHOLE
69485+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69486+ default y if GRKERNSEC_CONFIG_AUTO
69487+ depends on NET
69488+ help
69489+ If you say Y here, neither TCP resets nor ICMP
69490+ destination-unreachable packets will be sent in response to packets
69491+ sent to ports for which no associated listening process exists.
69492+ It will also prevent the sending of ICMP protocol unreachable packets
69493+ in response to packets with unknown protocols.
69494+ This feature supports both IPV4 and IPV6 and exempts the
69495+ loopback interface from blackholing. Enabling this feature
69496+ makes a host more resilient to DoS attacks and reduces network
69497+ visibility against scanners.
69498+
69499+ The blackhole feature as-implemented is equivalent to the FreeBSD
69500+ blackhole feature, as it prevents RST responses to all packets, not
69501+ just SYNs. Under most application behavior this causes no
69502+ problems, but applications (like haproxy) may not close certain
69503+ connections in a way that cleanly terminates them on the remote
69504+ end, leaving the remote host in LAST_ACK state. Because of this
69505+ side-effect and to prevent intentional LAST_ACK DoSes, this
69506+ feature also adds automatic mitigation against such attacks.
69507+ The mitigation drastically reduces the amount of time a socket
69508+ can spend in LAST_ACK state. If you're using haproxy and not
69509+ all servers it connects to have this option enabled, consider
69510+ disabling this feature on the haproxy host.
69511+
69512+ If the sysctl option is enabled, two sysctl options with names
69513+ "ip_blackhole" and "lastack_retries" will be created.
69514+ While "ip_blackhole" takes the standard zero/non-zero on/off
69515+ toggle, "lastack_retries" uses the same kinds of values as
69516+ "tcp_retries1" and "tcp_retries2". The default value of 4
69517+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69518+ state.
69519+
69520+config GRKERNSEC_NO_SIMULT_CONNECT
69521+ bool "Disable TCP Simultaneous Connect"
69522+ default y if GRKERNSEC_CONFIG_AUTO
69523+ depends on NET
69524+ help
69525+ If you say Y here, a feature by Willy Tarreau will be enabled that
69526+ removes a weakness in Linux's strict implementation of TCP that
69527+ allows two clients to connect to each other without either entering
69528+ a listening state. The weakness allows an attacker to easily prevent
69529+ a client from connecting to a known server provided the source port
69530+ for the connection is guessed correctly.
69531+
69532+ As the weakness could be used to prevent an antivirus or IPS from
69533+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69534+ it should be eliminated by enabling this option. Though Linux is
69535+ one of few operating systems supporting simultaneous connect, it
69536+ has no legitimate use in practice and is rarely supported by firewalls.
69537+
69538+config GRKERNSEC_SOCKET
69539+ bool "Socket restrictions"
69540+ depends on NET
69541+ help
69542+ If you say Y here, you will be able to choose from several options.
69543+ If you assign a GID on your system and add it to the supplementary
69544+ groups of users you want to restrict socket access to, this patch
69545+ will perform up to three things, based on the option(s) you choose.
69546+
69547+config GRKERNSEC_SOCKET_ALL
69548+ bool "Deny any sockets to group"
69549+ depends on GRKERNSEC_SOCKET
69550+ help
69551+ If you say Y here, you will be able to choose a GID of whose users will
69552+ be unable to connect to other hosts from your machine or run server
69553+ applications from your machine. If the sysctl option is enabled, a
69554+ sysctl option with name "socket_all" is created.
69555+
69556+config GRKERNSEC_SOCKET_ALL_GID
69557+ int "GID to deny all sockets for"
69558+ depends on GRKERNSEC_SOCKET_ALL
69559+ default 1004
69560+ help
69561+ Here you can choose the GID to disable socket access for. Remember to
69562+ add the users you want socket access disabled for to the GID
69563+ specified here. If the sysctl option is enabled, a sysctl option
69564+ with name "socket_all_gid" is created.
69565+
69566+config GRKERNSEC_SOCKET_CLIENT
69567+ bool "Deny client sockets to group"
69568+ depends on GRKERNSEC_SOCKET
69569+ help
69570+ If you say Y here, you will be able to choose a GID of whose users will
69571+ be unable to connect to other hosts from your machine, but will be
69572+ able to run servers. If this option is enabled, all users in the group
69573+ you specify will have to use passive mode when initiating ftp transfers
69574+ from the shell on your machine. If the sysctl option is enabled, a
69575+ sysctl option with name "socket_client" is created.
69576+
69577+config GRKERNSEC_SOCKET_CLIENT_GID
69578+ int "GID to deny client sockets for"
69579+ depends on GRKERNSEC_SOCKET_CLIENT
69580+ default 1003
69581+ help
69582+ Here you can choose the GID to disable client socket access for.
69583+ Remember to add the users you want client socket access disabled for to
69584+ the GID specified here. If the sysctl option is enabled, a sysctl
69585+ option with name "socket_client_gid" is created.
69586+
69587+config GRKERNSEC_SOCKET_SERVER
69588+ bool "Deny server sockets to group"
69589+ depends on GRKERNSEC_SOCKET
69590+ help
69591+ If you say Y here, you will be able to choose a GID of whose users will
69592+ be unable to run server applications from your machine. If the sysctl
69593+ option is enabled, a sysctl option with name "socket_server" is created.
69594+
69595+config GRKERNSEC_SOCKET_SERVER_GID
69596+ int "GID to deny server sockets for"
69597+ depends on GRKERNSEC_SOCKET_SERVER
69598+ default 1002
69599+ help
69600+ Here you can choose the GID to disable server socket access for.
69601+ Remember to add the users you want server socket access disabled for to
69602+ the GID specified here. If the sysctl option is enabled, a sysctl
69603+ option with name "socket_server_gid" is created.
69604+
69605+endmenu
69606+
69607+menu "Physical Protections"
69608+depends on GRKERNSEC
69609+
69610+config GRKERNSEC_DENYUSB
69611+ bool "Deny new USB connections after toggle"
69612+ default y if GRKERNSEC_CONFIG_AUTO
69613+ depends on SYSCTL && USB_SUPPORT
69614+ help
69615+ If you say Y here, a new sysctl option with name "deny_new_usb"
69616+ will be created. Setting its value to 1 will prevent any new
69617+ USB devices from being recognized by the OS. Any attempted USB
69618+ device insertion will be logged. This option is intended to be
69619+ used against custom USB devices designed to exploit vulnerabilities
69620+ in various USB device drivers.
69621+
69622+ For greatest effectiveness, this sysctl should be set after any
69623+ relevant init scripts. This option is safe to enable in distros
69624+ as each user can choose whether or not to toggle the sysctl.
69625+
69626+config GRKERNSEC_DENYUSB_FORCE
69627+ bool "Reject all USB devices not connected at boot"
69628+ select USB
69629+ depends on GRKERNSEC_DENYUSB
69630+ help
69631+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69632+ that doesn't involve a sysctl entry. This option should only be
69633+ enabled if you're sure you want to deny all new USB connections
69634+ at runtime and don't want to modify init scripts. This should not
69635+ be enabled by distros. It forces the core USB code to be built
69636+ into the kernel image so that all devices connected at boot time
69637+ can be recognized and new USB device connections can be prevented
69638+ prior to init running.
69639+
69640+endmenu
69641+
69642+menu "Sysctl Support"
69643+depends on GRKERNSEC && SYSCTL
69644+
69645+config GRKERNSEC_SYSCTL
69646+ bool "Sysctl support"
69647+ default y if GRKERNSEC_CONFIG_AUTO
69648+ help
69649+ If you say Y here, you will be able to change the options that
69650+ grsecurity runs with at bootup, without having to recompile your
69651+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69652+ to enable (1) or disable (0) various features. All the sysctl entries
69653+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69654+ All features enabled in the kernel configuration are disabled at boot
69655+ if you do not say Y to the "Turn on features by default" option.
69656+ All options should be set at startup, and the grsec_lock entry should
69657+ be set to a non-zero value after all the options are set.
69658+ *THIS IS EXTREMELY IMPORTANT*
69659+
69660+config GRKERNSEC_SYSCTL_DISTRO
69661+ bool "Extra sysctl support for distro makers (READ HELP)"
69662+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69663+ help
69664+ If you say Y here, additional sysctl options will be created
69665+ for features that affect processes running as root. Therefore,
69666+ it is critical when using this option that the grsec_lock entry be
69667+ enabled after boot. Only distros with prebuilt kernel packages
69668+ with this option enabled that can ensure grsec_lock is enabled
69669+ after boot should use this option.
69670+ *Failure to set grsec_lock after boot makes all grsec features
69671+ this option covers useless*
69672+
69673+ Currently this option creates the following sysctl entries:
69674+ "Disable Privileged I/O": "disable_priv_io"
69675+
69676+config GRKERNSEC_SYSCTL_ON
69677+ bool "Turn on features by default"
69678+ default y if GRKERNSEC_CONFIG_AUTO
69679+ depends on GRKERNSEC_SYSCTL
69680+ help
69681+ If you say Y here, instead of having all features enabled in the
69682+ kernel configuration disabled at boot time, the features will be
69683+ enabled at boot time. It is recommended you say Y here unless
69684+ there is some reason you would want all sysctl-tunable features to
69685+ be disabled by default. As mentioned elsewhere, it is important
69686+ to enable the grsec_lock entry once you have finished modifying
69687+ the sysctl entries.
69688+
69689+endmenu
69690+menu "Logging Options"
69691+depends on GRKERNSEC
69692+
69693+config GRKERNSEC_FLOODTIME
69694+ int "Seconds in between log messages (minimum)"
69695+ default 10
69696+ help
69697+ This option allows you to enforce the number of seconds between
69698+ grsecurity log messages. The default should be suitable for most
69699+ people, however, if you choose to change it, choose a value small enough
69700+ to allow informative logs to be produced, but large enough to
69701+ prevent flooding.
69702+
69703+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69704+ any rate limiting on grsecurity log messages.
69705+
69706+config GRKERNSEC_FLOODBURST
69707+ int "Number of messages in a burst (maximum)"
69708+ default 6
69709+ help
69710+ This option allows you to choose the maximum number of messages allowed
69711+ within the flood time interval you chose in a separate option. The
69712+ default should be suitable for most people, however if you find that
69713+ many of your logs are being interpreted as flooding, you may want to
69714+ raise this value.
69715+
69716+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69717+ any rate limiting on grsecurity log messages.
69718+
69719+endmenu
69720diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69721new file mode 100644
69722index 0000000..30ababb
69723--- /dev/null
69724+++ b/grsecurity/Makefile
69725@@ -0,0 +1,54 @@
69726+# grsecurity – access control and security hardening for Linux
69727+# All code in this directory and various hooks located throughout the Linux kernel are
69728+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69729+# http://www.grsecurity.net spender@grsecurity.net
69730+#
69731+# This program is free software; you can redistribute it and/or
69732+# modify it under the terms of the GNU General Public License version 2
69733+# as published by the Free Software Foundation.
69734+#
69735+# This program is distributed in the hope that it will be useful,
69736+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69737+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69738+# GNU General Public License for more details.
69739+#
69740+# You should have received a copy of the GNU General Public License
69741+# along with this program; if not, write to the Free Software
69742+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69743+
69744+KBUILD_CFLAGS += -Werror
69745+
69746+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69747+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69748+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69749+ grsec_usb.o grsec_ipc.o grsec_proc.o
69750+
69751+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69752+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69753+ gracl_learn.o grsec_log.o gracl_policy.o
69754+ifdef CONFIG_COMPAT
69755+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69756+endif
69757+
69758+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69759+
69760+ifdef CONFIG_NET
69761+obj-y += grsec_sock.o
69762+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69763+endif
69764+
69765+ifndef CONFIG_GRKERNSEC
69766+obj-y += grsec_disabled.o
69767+endif
69768+
69769+ifdef CONFIG_GRKERNSEC_HIDESYM
69770+extra-y := grsec_hidesym.o
69771+$(obj)/grsec_hidesym.o:
69772+ @-chmod -f 500 /boot
69773+ @-chmod -f 500 /lib/modules
69774+ @-chmod -f 500 /lib64/modules
69775+ @-chmod -f 500 /lib32/modules
69776+ @-chmod -f 700 .
69777+ @-chmod -f 700 $(objtree)
69778+ @echo ' grsec: protected kernel image paths'
69779+endif
69780diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69781new file mode 100644
69782index 0000000..6c1e154
69783--- /dev/null
69784+++ b/grsecurity/gracl.c
69785@@ -0,0 +1,2749 @@
69786+#include <linux/kernel.h>
69787+#include <linux/module.h>
69788+#include <linux/sched.h>
69789+#include <linux/mm.h>
69790+#include <linux/file.h>
69791+#include <linux/fs.h>
69792+#include <linux/namei.h>
69793+#include <linux/mount.h>
69794+#include <linux/tty.h>
69795+#include <linux/proc_fs.h>
69796+#include <linux/lglock.h>
69797+#include <linux/slab.h>
69798+#include <linux/vmalloc.h>
69799+#include <linux/types.h>
69800+#include <linux/sysctl.h>
69801+#include <linux/netdevice.h>
69802+#include <linux/ptrace.h>
69803+#include <linux/gracl.h>
69804+#include <linux/gralloc.h>
69805+#include <linux/security.h>
69806+#include <linux/grinternal.h>
69807+#include <linux/pid_namespace.h>
69808+#include <linux/stop_machine.h>
69809+#include <linux/fdtable.h>
69810+#include <linux/percpu.h>
69811+#include <linux/lglock.h>
69812+#include <linux/hugetlb.h>
69813+#include <linux/posix-timers.h>
69814+#include <linux/prefetch.h>
69815+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69816+#include <linux/magic.h>
69817+#include <linux/pagemap.h>
69818+#include "../fs/btrfs/async-thread.h"
69819+#include "../fs/btrfs/ctree.h"
69820+#include "../fs/btrfs/btrfs_inode.h"
69821+#endif
69822+#include "../fs/mount.h"
69823+
69824+#include <asm/uaccess.h>
69825+#include <asm/errno.h>
69826+#include <asm/mman.h>
69827+
69828+#define FOR_EACH_ROLE_START(role) \
69829+ role = running_polstate.role_list; \
69830+ while (role) {
69831+
69832+#define FOR_EACH_ROLE_END(role) \
69833+ role = role->prev; \
69834+ }
69835+
69836+extern struct path gr_real_root;
69837+
69838+static struct gr_policy_state running_polstate;
69839+struct gr_policy_state *polstate = &running_polstate;
69840+extern struct gr_alloc_state *current_alloc_state;
69841+
69842+extern char *gr_shared_page[4];
69843+DEFINE_RWLOCK(gr_inode_lock);
69844+
69845+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69846+
69847+#ifdef CONFIG_NET
69848+extern struct vfsmount *sock_mnt;
69849+#endif
69850+
69851+extern struct vfsmount *pipe_mnt;
69852+extern struct vfsmount *shm_mnt;
69853+
69854+#ifdef CONFIG_HUGETLBFS
69855+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69856+#endif
69857+
69858+extern u16 acl_sp_role_value;
69859+extern struct acl_object_label *fakefs_obj_rw;
69860+extern struct acl_object_label *fakefs_obj_rwx;
69861+
69862+int gr_acl_is_enabled(void)
69863+{
69864+ return (gr_status & GR_READY);
69865+}
69866+
69867+void gr_enable_rbac_system(void)
69868+{
69869+ pax_open_kernel();
69870+ gr_status |= GR_READY;
69871+ pax_close_kernel();
69872+}
69873+
69874+int gr_rbac_disable(void *unused)
69875+{
69876+ pax_open_kernel();
69877+ gr_status &= ~GR_READY;
69878+ pax_close_kernel();
69879+
69880+ return 0;
69881+}
69882+
69883+static inline dev_t __get_dev(const struct dentry *dentry)
69884+{
69885+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69886+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69887+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69888+ else
69889+#endif
69890+ return dentry->d_sb->s_dev;
69891+}
69892+
69893+static inline u64 __get_ino(const struct dentry *dentry)
69894+{
69895+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69896+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69897+ return btrfs_ino(dentry->d_inode);
69898+ else
69899+#endif
69900+ return dentry->d_inode->i_ino;
69901+}
69902+
69903+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69904+{
69905+ return __get_dev(dentry);
69906+}
69907+
69908+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69909+{
69910+ return __get_ino(dentry);
69911+}
69912+
69913+static char gr_task_roletype_to_char(struct task_struct *task)
69914+{
69915+ switch (task->role->roletype &
69916+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69917+ GR_ROLE_SPECIAL)) {
69918+ case GR_ROLE_DEFAULT:
69919+ return 'D';
69920+ case GR_ROLE_USER:
69921+ return 'U';
69922+ case GR_ROLE_GROUP:
69923+ return 'G';
69924+ case GR_ROLE_SPECIAL:
69925+ return 'S';
69926+ }
69927+
69928+ return 'X';
69929+}
69930+
69931+char gr_roletype_to_char(void)
69932+{
69933+ return gr_task_roletype_to_char(current);
69934+}
69935+
69936+__inline__ int
69937+gr_acl_tpe_check(void)
69938+{
69939+ if (unlikely(!(gr_status & GR_READY)))
69940+ return 0;
69941+ if (current->role->roletype & GR_ROLE_TPE)
69942+ return 1;
69943+ else
69944+ return 0;
69945+}
69946+
69947+int
69948+gr_handle_rawio(const struct inode *inode)
69949+{
69950+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69951+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69952+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69953+ !capable(CAP_SYS_RAWIO))
69954+ return 1;
69955+#endif
69956+ return 0;
69957+}
69958+
69959+int
69960+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69961+{
69962+ if (likely(lena != lenb))
69963+ return 0;
69964+
69965+ return !memcmp(a, b, lena);
69966+}
69967+
69968+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69969+{
69970+ *buflen -= namelen;
69971+ if (*buflen < 0)
69972+ return -ENAMETOOLONG;
69973+ *buffer -= namelen;
69974+ memcpy(*buffer, str, namelen);
69975+ return 0;
69976+}
69977+
69978+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69979+{
69980+ return prepend(buffer, buflen, name->name, name->len);
69981+}
69982+
69983+static int prepend_path(const struct path *path, struct path *root,
69984+ char **buffer, int *buflen)
69985+{
69986+ struct dentry *dentry = path->dentry;
69987+ struct vfsmount *vfsmnt = path->mnt;
69988+ struct mount *mnt = real_mount(vfsmnt);
69989+ bool slash = false;
69990+ int error = 0;
69991+
69992+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69993+ struct dentry * parent;
69994+
69995+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69996+ /* Global root? */
69997+ if (!mnt_has_parent(mnt)) {
69998+ goto out;
69999+ }
70000+ dentry = mnt->mnt_mountpoint;
70001+ mnt = mnt->mnt_parent;
70002+ vfsmnt = &mnt->mnt;
70003+ continue;
70004+ }
70005+ parent = dentry->d_parent;
70006+ prefetch(parent);
70007+ spin_lock(&dentry->d_lock);
70008+ error = prepend_name(buffer, buflen, &dentry->d_name);
70009+ spin_unlock(&dentry->d_lock);
70010+ if (!error)
70011+ error = prepend(buffer, buflen, "/", 1);
70012+ if (error)
70013+ break;
70014+
70015+ slash = true;
70016+ dentry = parent;
70017+ }
70018+
70019+out:
70020+ if (!error && !slash)
70021+ error = prepend(buffer, buflen, "/", 1);
70022+
70023+ return error;
70024+}
70025+
70026+/* this must be called with mount_lock and rename_lock held */
70027+
70028+static char *__our_d_path(const struct path *path, struct path *root,
70029+ char *buf, int buflen)
70030+{
70031+ char *res = buf + buflen;
70032+ int error;
70033+
70034+ prepend(&res, &buflen, "\0", 1);
70035+ error = prepend_path(path, root, &res, &buflen);
70036+ if (error)
70037+ return ERR_PTR(error);
70038+
70039+ return res;
70040+}
70041+
70042+static char *
70043+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70044+{
70045+ char *retval;
70046+
70047+ retval = __our_d_path(path, root, buf, buflen);
70048+ if (unlikely(IS_ERR(retval)))
70049+ retval = strcpy(buf, "<path too long>");
70050+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70051+ retval[1] = '\0';
70052+
70053+ return retval;
70054+}
70055+
70056+static char *
70057+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70058+ char *buf, int buflen)
70059+{
70060+ struct path path;
70061+ char *res;
70062+
70063+ path.dentry = (struct dentry *)dentry;
70064+ path.mnt = (struct vfsmount *)vfsmnt;
70065+
70066+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70067+ by the RBAC system */
70068+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70069+
70070+ return res;
70071+}
70072+
70073+static char *
70074+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70075+ char *buf, int buflen)
70076+{
70077+ char *res;
70078+ struct path path;
70079+ struct path root;
70080+ struct task_struct *reaper = init_pid_ns.child_reaper;
70081+
70082+ path.dentry = (struct dentry *)dentry;
70083+ path.mnt = (struct vfsmount *)vfsmnt;
70084+
70085+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70086+ get_fs_root(reaper->fs, &root);
70087+
70088+ read_seqlock_excl(&mount_lock);
70089+ write_seqlock(&rename_lock);
70090+ res = gen_full_path(&path, &root, buf, buflen);
70091+ write_sequnlock(&rename_lock);
70092+ read_sequnlock_excl(&mount_lock);
70093+
70094+ path_put(&root);
70095+ return res;
70096+}
70097+
70098+char *
70099+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70100+{
70101+ char *ret;
70102+ read_seqlock_excl(&mount_lock);
70103+ write_seqlock(&rename_lock);
70104+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70105+ PAGE_SIZE);
70106+ write_sequnlock(&rename_lock);
70107+ read_sequnlock_excl(&mount_lock);
70108+ return ret;
70109+}
70110+
70111+static char *
70112+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70113+{
70114+ char *ret;
70115+ char *buf;
70116+ int buflen;
70117+
70118+ read_seqlock_excl(&mount_lock);
70119+ write_seqlock(&rename_lock);
70120+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70121+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70122+ buflen = (int)(ret - buf);
70123+ if (buflen >= 5)
70124+ prepend(&ret, &buflen, "/proc", 5);
70125+ else
70126+ ret = strcpy(buf, "<path too long>");
70127+ write_sequnlock(&rename_lock);
70128+ read_sequnlock_excl(&mount_lock);
70129+ return ret;
70130+}
70131+
70132+char *
70133+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70134+{
70135+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70136+ PAGE_SIZE);
70137+}
70138+
70139+char *
70140+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70141+{
70142+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70143+ PAGE_SIZE);
70144+}
70145+
70146+char *
70147+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70148+{
70149+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70150+ PAGE_SIZE);
70151+}
70152+
70153+char *
70154+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70155+{
70156+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70157+ PAGE_SIZE);
70158+}
70159+
70160+char *
70161+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70162+{
70163+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70164+ PAGE_SIZE);
70165+}
70166+
70167+__inline__ __u32
70168+to_gr_audit(const __u32 reqmode)
70169+{
70170+ /* masks off auditable permission flags, then shifts them to create
70171+ auditing flags, and adds the special case of append auditing if
70172+ we're requesting write */
70173+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70174+}
70175+
70176+struct acl_role_label *
70177+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70178+ const gid_t gid)
70179+{
70180+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70181+ struct acl_role_label *match;
70182+ struct role_allowed_ip *ipp;
70183+ unsigned int x;
70184+ u32 curr_ip = task->signal->saved_ip;
70185+
70186+ match = state->acl_role_set.r_hash[index];
70187+
70188+ while (match) {
70189+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70190+ for (x = 0; x < match->domain_child_num; x++) {
70191+ if (match->domain_children[x] == uid)
70192+ goto found;
70193+ }
70194+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70195+ break;
70196+ match = match->next;
70197+ }
70198+found:
70199+ if (match == NULL) {
70200+ try_group:
70201+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70202+ match = state->acl_role_set.r_hash[index];
70203+
70204+ while (match) {
70205+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70206+ for (x = 0; x < match->domain_child_num; x++) {
70207+ if (match->domain_children[x] == gid)
70208+ goto found2;
70209+ }
70210+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70211+ break;
70212+ match = match->next;
70213+ }
70214+found2:
70215+ if (match == NULL)
70216+ match = state->default_role;
70217+ if (match->allowed_ips == NULL)
70218+ return match;
70219+ else {
70220+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70221+ if (likely
70222+ ((ntohl(curr_ip) & ipp->netmask) ==
70223+ (ntohl(ipp->addr) & ipp->netmask)))
70224+ return match;
70225+ }
70226+ match = state->default_role;
70227+ }
70228+ } else if (match->allowed_ips == NULL) {
70229+ return match;
70230+ } else {
70231+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70232+ if (likely
70233+ ((ntohl(curr_ip) & ipp->netmask) ==
70234+ (ntohl(ipp->addr) & ipp->netmask)))
70235+ return match;
70236+ }
70237+ goto try_group;
70238+ }
70239+
70240+ return match;
70241+}
70242+
70243+static struct acl_role_label *
70244+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
70245+ const gid_t gid)
70246+{
70247+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
70248+}
70249+
70250+struct acl_subject_label *
70251+lookup_acl_subj_label(const u64 ino, const dev_t dev,
70252+ const struct acl_role_label *role)
70253+{
70254+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70255+ struct acl_subject_label *match;
70256+
70257+ match = role->subj_hash[index];
70258+
70259+ while (match && (match->inode != ino || match->device != dev ||
70260+ (match->mode & GR_DELETED))) {
70261+ match = match->next;
70262+ }
70263+
70264+ if (match && !(match->mode & GR_DELETED))
70265+ return match;
70266+ else
70267+ return NULL;
70268+}
70269+
70270+struct acl_subject_label *
70271+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
70272+ const struct acl_role_label *role)
70273+{
70274+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70275+ struct acl_subject_label *match;
70276+
70277+ match = role->subj_hash[index];
70278+
70279+ while (match && (match->inode != ino || match->device != dev ||
70280+ !(match->mode & GR_DELETED))) {
70281+ match = match->next;
70282+ }
70283+
70284+ if (match && (match->mode & GR_DELETED))
70285+ return match;
70286+ else
70287+ return NULL;
70288+}
70289+
70290+static struct acl_object_label *
70291+lookup_acl_obj_label(const u64 ino, const dev_t dev,
70292+ const struct acl_subject_label *subj)
70293+{
70294+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70295+ struct acl_object_label *match;
70296+
70297+ match = subj->obj_hash[index];
70298+
70299+ while (match && (match->inode != ino || match->device != dev ||
70300+ (match->mode & GR_DELETED))) {
70301+ match = match->next;
70302+ }
70303+
70304+ if (match && !(match->mode & GR_DELETED))
70305+ return match;
70306+ else
70307+ return NULL;
70308+}
70309+
70310+static struct acl_object_label *
70311+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
70312+ const struct acl_subject_label *subj)
70313+{
70314+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70315+ struct acl_object_label *match;
70316+
70317+ match = subj->obj_hash[index];
70318+
70319+ while (match && (match->inode != ino || match->device != dev ||
70320+ !(match->mode & GR_DELETED))) {
70321+ match = match->next;
70322+ }
70323+
70324+ if (match && (match->mode & GR_DELETED))
70325+ return match;
70326+
70327+ match = subj->obj_hash[index];
70328+
70329+ while (match && (match->inode != ino || match->device != dev ||
70330+ (match->mode & GR_DELETED))) {
70331+ match = match->next;
70332+ }
70333+
70334+ if (match && !(match->mode & GR_DELETED))
70335+ return match;
70336+ else
70337+ return NULL;
70338+}
70339+
70340+struct name_entry *
70341+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70342+{
70343+ unsigned int len = strlen(name);
70344+ unsigned int key = full_name_hash(name, len);
70345+ unsigned int index = key % state->name_set.n_size;
70346+ struct name_entry *match;
70347+
70348+ match = state->name_set.n_hash[index];
70349+
70350+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70351+ match = match->next;
70352+
70353+ return match;
70354+}
70355+
70356+static struct name_entry *
70357+lookup_name_entry(const char *name)
70358+{
70359+ return __lookup_name_entry(&running_polstate, name);
70360+}
70361+
70362+static struct name_entry *
70363+lookup_name_entry_create(const char *name)
70364+{
70365+ unsigned int len = strlen(name);
70366+ unsigned int key = full_name_hash(name, len);
70367+ unsigned int index = key % running_polstate.name_set.n_size;
70368+ struct name_entry *match;
70369+
70370+ match = running_polstate.name_set.n_hash[index];
70371+
70372+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70373+ !match->deleted))
70374+ match = match->next;
70375+
70376+ if (match && match->deleted)
70377+ return match;
70378+
70379+ match = running_polstate.name_set.n_hash[index];
70380+
70381+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70382+ match->deleted))
70383+ match = match->next;
70384+
70385+ if (match && !match->deleted)
70386+ return match;
70387+ else
70388+ return NULL;
70389+}
70390+
70391+static struct inodev_entry *
70392+lookup_inodev_entry(const u64 ino, const dev_t dev)
70393+{
70394+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70395+ struct inodev_entry *match;
70396+
70397+ match = running_polstate.inodev_set.i_hash[index];
70398+
70399+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70400+ match = match->next;
70401+
70402+ return match;
70403+}
70404+
70405+void
70406+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70407+{
70408+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70409+ state->inodev_set.i_size);
70410+ struct inodev_entry **curr;
70411+
70412+ entry->prev = NULL;
70413+
70414+ curr = &state->inodev_set.i_hash[index];
70415+ if (*curr != NULL)
70416+ (*curr)->prev = entry;
70417+
70418+ entry->next = *curr;
70419+ *curr = entry;
70420+
70421+ return;
70422+}
70423+
70424+static void
70425+insert_inodev_entry(struct inodev_entry *entry)
70426+{
70427+ __insert_inodev_entry(&running_polstate, entry);
70428+}
70429+
70430+void
70431+insert_acl_obj_label(struct acl_object_label *obj,
70432+ struct acl_subject_label *subj)
70433+{
70434+ unsigned int index =
70435+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70436+ struct acl_object_label **curr;
70437+
70438+ obj->prev = NULL;
70439+
70440+ curr = &subj->obj_hash[index];
70441+ if (*curr != NULL)
70442+ (*curr)->prev = obj;
70443+
70444+ obj->next = *curr;
70445+ *curr = obj;
70446+
70447+ return;
70448+}
70449+
70450+void
70451+insert_acl_subj_label(struct acl_subject_label *obj,
70452+ struct acl_role_label *role)
70453+{
70454+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70455+ struct acl_subject_label **curr;
70456+
70457+ obj->prev = NULL;
70458+
70459+ curr = &role->subj_hash[index];
70460+ if (*curr != NULL)
70461+ (*curr)->prev = obj;
70462+
70463+ obj->next = *curr;
70464+ *curr = obj;
70465+
70466+ return;
70467+}
70468+
70469+/* derived from glibc fnmatch() 0: match, 1: no match*/
70470+
70471+static int
70472+glob_match(const char *p, const char *n)
70473+{
70474+ char c;
70475+
70476+ while ((c = *p++) != '\0') {
70477+ switch (c) {
70478+ case '?':
70479+ if (*n == '\0')
70480+ return 1;
70481+ else if (*n == '/')
70482+ return 1;
70483+ break;
70484+ case '\\':
70485+ if (*n != c)
70486+ return 1;
70487+ break;
70488+ case '*':
70489+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70490+ if (*n == '/')
70491+ return 1;
70492+ else if (c == '?') {
70493+ if (*n == '\0')
70494+ return 1;
70495+ else
70496+ ++n;
70497+ }
70498+ }
70499+ if (c == '\0') {
70500+ return 0;
70501+ } else {
70502+ const char *endp;
70503+
70504+ if ((endp = strchr(n, '/')) == NULL)
70505+ endp = n + strlen(n);
70506+
70507+ if (c == '[') {
70508+ for (--p; n < endp; ++n)
70509+ if (!glob_match(p, n))
70510+ return 0;
70511+ } else if (c == '/') {
70512+ while (*n != '\0' && *n != '/')
70513+ ++n;
70514+ if (*n == '/' && !glob_match(p, n + 1))
70515+ return 0;
70516+ } else {
70517+ for (--p; n < endp; ++n)
70518+ if (*n == c && !glob_match(p, n))
70519+ return 0;
70520+ }
70521+
70522+ return 1;
70523+ }
70524+ case '[':
70525+ {
70526+ int not;
70527+ char cold;
70528+
70529+ if (*n == '\0' || *n == '/')
70530+ return 1;
70531+
70532+ not = (*p == '!' || *p == '^');
70533+ if (not)
70534+ ++p;
70535+
70536+ c = *p++;
70537+ for (;;) {
70538+ unsigned char fn = (unsigned char)*n;
70539+
70540+ if (c == '\0')
70541+ return 1;
70542+ else {
70543+ if (c == fn)
70544+ goto matched;
70545+ cold = c;
70546+ c = *p++;
70547+
70548+ if (c == '-' && *p != ']') {
70549+ unsigned char cend = *p++;
70550+
70551+ if (cend == '\0')
70552+ return 1;
70553+
70554+ if (cold <= fn && fn <= cend)
70555+ goto matched;
70556+
70557+ c = *p++;
70558+ }
70559+ }
70560+
70561+ if (c == ']')
70562+ break;
70563+ }
70564+ if (!not)
70565+ return 1;
70566+ break;
70567+ matched:
70568+ while (c != ']') {
70569+ if (c == '\0')
70570+ return 1;
70571+
70572+ c = *p++;
70573+ }
70574+ if (not)
70575+ return 1;
70576+ }
70577+ break;
70578+ default:
70579+ if (c != *n)
70580+ return 1;
70581+ }
70582+
70583+ ++n;
70584+ }
70585+
70586+ if (*n == '\0')
70587+ return 0;
70588+
70589+ if (*n == '/')
70590+ return 0;
70591+
70592+ return 1;
70593+}
70594+
70595+static struct acl_object_label *
70596+chk_glob_label(struct acl_object_label *globbed,
70597+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70598+{
70599+ struct acl_object_label *tmp;
70600+
70601+ if (*path == NULL)
70602+ *path = gr_to_filename_nolock(dentry, mnt);
70603+
70604+ tmp = globbed;
70605+
70606+ while (tmp) {
70607+ if (!glob_match(tmp->filename, *path))
70608+ return tmp;
70609+ tmp = tmp->next;
70610+ }
70611+
70612+ return NULL;
70613+}
70614+
70615+static struct acl_object_label *
70616+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70617+ const u64 curr_ino, const dev_t curr_dev,
70618+ const struct acl_subject_label *subj, char **path, const int checkglob)
70619+{
70620+ struct acl_subject_label *tmpsubj;
70621+ struct acl_object_label *retval;
70622+ struct acl_object_label *retval2;
70623+
70624+ tmpsubj = (struct acl_subject_label *) subj;
70625+ read_lock(&gr_inode_lock);
70626+ do {
70627+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70628+ if (retval) {
70629+ if (checkglob && retval->globbed) {
70630+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70631+ if (retval2)
70632+ retval = retval2;
70633+ }
70634+ break;
70635+ }
70636+ } while ((tmpsubj = tmpsubj->parent_subject));
70637+ read_unlock(&gr_inode_lock);
70638+
70639+ return retval;
70640+}
70641+
70642+static __inline__ struct acl_object_label *
70643+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70644+ struct dentry *curr_dentry,
70645+ const struct acl_subject_label *subj, char **path, const int checkglob)
70646+{
70647+ int newglob = checkglob;
70648+ u64 inode;
70649+ dev_t device;
70650+
70651+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70652+ as we don't want a / * rule to match instead of the / object
70653+ don't do this for create lookups that call this function though, since they're looking up
70654+ on the parent and thus need globbing checks on all paths
70655+ */
70656+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70657+ newglob = GR_NO_GLOB;
70658+
70659+ spin_lock(&curr_dentry->d_lock);
70660+ inode = __get_ino(curr_dentry);
70661+ device = __get_dev(curr_dentry);
70662+ spin_unlock(&curr_dentry->d_lock);
70663+
70664+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70665+}
70666+
70667+#ifdef CONFIG_HUGETLBFS
70668+static inline bool
70669+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70670+{
70671+ int i;
70672+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70673+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70674+ return true;
70675+ }
70676+
70677+ return false;
70678+}
70679+#endif
70680+
70681+static struct acl_object_label *
70682+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70683+ const struct acl_subject_label *subj, char *path, const int checkglob)
70684+{
70685+ struct dentry *dentry = (struct dentry *) l_dentry;
70686+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70687+ struct mount *real_mnt = real_mount(mnt);
70688+ struct acl_object_label *retval;
70689+ struct dentry *parent;
70690+
70691+ read_seqlock_excl(&mount_lock);
70692+ write_seqlock(&rename_lock);
70693+
70694+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70695+#ifdef CONFIG_NET
70696+ mnt == sock_mnt ||
70697+#endif
70698+#ifdef CONFIG_HUGETLBFS
70699+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70700+#endif
70701+ /* ignore Eric Biederman */
70702+ IS_PRIVATE(l_dentry->d_inode))) {
70703+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70704+ goto out;
70705+ }
70706+
70707+ for (;;) {
70708+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70709+ break;
70710+
70711+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70712+ if (!mnt_has_parent(real_mnt))
70713+ break;
70714+
70715+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70716+ if (retval != NULL)
70717+ goto out;
70718+
70719+ dentry = real_mnt->mnt_mountpoint;
70720+ real_mnt = real_mnt->mnt_parent;
70721+ mnt = &real_mnt->mnt;
70722+ continue;
70723+ }
70724+
70725+ parent = dentry->d_parent;
70726+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70727+ if (retval != NULL)
70728+ goto out;
70729+
70730+ dentry = parent;
70731+ }
70732+
70733+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70734+
70735+ /* gr_real_root is pinned so we don't have to hold a reference */
70736+ if (retval == NULL)
70737+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70738+out:
70739+ write_sequnlock(&rename_lock);
70740+ read_sequnlock_excl(&mount_lock);
70741+
70742+ BUG_ON(retval == NULL);
70743+
70744+ return retval;
70745+}
70746+
70747+static __inline__ struct acl_object_label *
70748+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70749+ const struct acl_subject_label *subj)
70750+{
70751+ char *path = NULL;
70752+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70753+}
70754+
70755+static __inline__ struct acl_object_label *
70756+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70757+ const struct acl_subject_label *subj)
70758+{
70759+ char *path = NULL;
70760+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70761+}
70762+
70763+static __inline__ struct acl_object_label *
70764+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70765+ const struct acl_subject_label *subj, char *path)
70766+{
70767+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70768+}
70769+
70770+struct acl_subject_label *
70771+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70772+ const struct acl_role_label *role)
70773+{
70774+ struct dentry *dentry = (struct dentry *) l_dentry;
70775+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70776+ struct mount *real_mnt = real_mount(mnt);
70777+ struct acl_subject_label *retval;
70778+ struct dentry *parent;
70779+
70780+ read_seqlock_excl(&mount_lock);
70781+ write_seqlock(&rename_lock);
70782+
70783+ for (;;) {
70784+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70785+ break;
70786+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70787+ if (!mnt_has_parent(real_mnt))
70788+ break;
70789+
70790+ spin_lock(&dentry->d_lock);
70791+ read_lock(&gr_inode_lock);
70792+ retval =
70793+ lookup_acl_subj_label(__get_ino(dentry),
70794+ __get_dev(dentry), role);
70795+ read_unlock(&gr_inode_lock);
70796+ spin_unlock(&dentry->d_lock);
70797+ if (retval != NULL)
70798+ goto out;
70799+
70800+ dentry = real_mnt->mnt_mountpoint;
70801+ real_mnt = real_mnt->mnt_parent;
70802+ mnt = &real_mnt->mnt;
70803+ continue;
70804+ }
70805+
70806+ spin_lock(&dentry->d_lock);
70807+ read_lock(&gr_inode_lock);
70808+ retval = lookup_acl_subj_label(__get_ino(dentry),
70809+ __get_dev(dentry), role);
70810+ read_unlock(&gr_inode_lock);
70811+ parent = dentry->d_parent;
70812+ spin_unlock(&dentry->d_lock);
70813+
70814+ if (retval != NULL)
70815+ goto out;
70816+
70817+ dentry = parent;
70818+ }
70819+
70820+ spin_lock(&dentry->d_lock);
70821+ read_lock(&gr_inode_lock);
70822+ retval = lookup_acl_subj_label(__get_ino(dentry),
70823+ __get_dev(dentry), role);
70824+ read_unlock(&gr_inode_lock);
70825+ spin_unlock(&dentry->d_lock);
70826+
70827+ if (unlikely(retval == NULL)) {
70828+ /* gr_real_root is pinned, we don't need to hold a reference */
70829+ read_lock(&gr_inode_lock);
70830+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70831+ __get_dev(gr_real_root.dentry), role);
70832+ read_unlock(&gr_inode_lock);
70833+ }
70834+out:
70835+ write_sequnlock(&rename_lock);
70836+ read_sequnlock_excl(&mount_lock);
70837+
70838+ BUG_ON(retval == NULL);
70839+
70840+ return retval;
70841+}
70842+
70843+void
70844+assign_special_role(const char *rolename)
70845+{
70846+ struct acl_object_label *obj;
70847+ struct acl_role_label *r;
70848+ struct acl_role_label *assigned = NULL;
70849+ struct task_struct *tsk;
70850+ struct file *filp;
70851+
70852+ FOR_EACH_ROLE_START(r)
70853+ if (!strcmp(rolename, r->rolename) &&
70854+ (r->roletype & GR_ROLE_SPECIAL)) {
70855+ assigned = r;
70856+ break;
70857+ }
70858+ FOR_EACH_ROLE_END(r)
70859+
70860+ if (!assigned)
70861+ return;
70862+
70863+ read_lock(&tasklist_lock);
70864+ read_lock(&grsec_exec_file_lock);
70865+
70866+ tsk = current->real_parent;
70867+ if (tsk == NULL)
70868+ goto out_unlock;
70869+
70870+ filp = tsk->exec_file;
70871+ if (filp == NULL)
70872+ goto out_unlock;
70873+
70874+ tsk->is_writable = 0;
70875+ tsk->inherited = 0;
70876+
70877+ tsk->acl_sp_role = 1;
70878+ tsk->acl_role_id = ++acl_sp_role_value;
70879+ tsk->role = assigned;
70880+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70881+
70882+ /* ignore additional mmap checks for processes that are writable
70883+ by the default ACL */
70884+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70885+ if (unlikely(obj->mode & GR_WRITE))
70886+ tsk->is_writable = 1;
70887+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70888+ if (unlikely(obj->mode & GR_WRITE))
70889+ tsk->is_writable = 1;
70890+
70891+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70892+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70893+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70894+#endif
70895+
70896+out_unlock:
70897+ read_unlock(&grsec_exec_file_lock);
70898+ read_unlock(&tasklist_lock);
70899+ return;
70900+}
70901+
70902+
70903+static void
70904+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70905+{
70906+ struct task_struct *task = current;
70907+ const struct cred *cred = current_cred();
70908+
70909+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70910+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70911+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70912+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70913+
70914+ return;
70915+}
70916+
70917+static void
70918+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70919+{
70920+ struct task_struct *task = current;
70921+ const struct cred *cred = current_cred();
70922+
70923+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70924+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70925+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70926+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70927+
70928+ return;
70929+}
70930+
70931+static void
70932+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70933+{
70934+ struct task_struct *task = current;
70935+ const struct cred *cred = current_cred();
70936+
70937+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70938+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70939+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70940+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70941+
70942+ return;
70943+}
70944+
70945+static void
70946+gr_set_proc_res(struct task_struct *task)
70947+{
70948+ struct acl_subject_label *proc;
70949+ unsigned short i;
70950+
70951+ proc = task->acl;
70952+
70953+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70954+ return;
70955+
70956+ for (i = 0; i < RLIM_NLIMITS; i++) {
70957+ unsigned long rlim_cur, rlim_max;
70958+
70959+ if (!(proc->resmask & (1U << i)))
70960+ continue;
70961+
70962+ rlim_cur = proc->res[i].rlim_cur;
70963+ rlim_max = proc->res[i].rlim_max;
70964+
70965+ if (i == RLIMIT_NOFILE) {
70966+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70967+ if (rlim_cur > saved_sysctl_nr_open)
70968+ rlim_cur = saved_sysctl_nr_open;
70969+ if (rlim_max > saved_sysctl_nr_open)
70970+ rlim_max = saved_sysctl_nr_open;
70971+ }
70972+
70973+ task->signal->rlim[i].rlim_cur = rlim_cur;
70974+ task->signal->rlim[i].rlim_max = rlim_max;
70975+
70976+ if (i == RLIMIT_CPU)
70977+ update_rlimit_cpu(task, rlim_cur);
70978+ }
70979+
70980+ return;
70981+}
70982+
70983+/* both of the below must be called with
70984+ rcu_read_lock();
70985+ read_lock(&tasklist_lock);
70986+ read_lock(&grsec_exec_file_lock);
70987+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70988+*/
70989+
70990+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70991+{
70992+ char *tmpname;
70993+ struct acl_subject_label *tmpsubj;
70994+ struct file *filp;
70995+ struct name_entry *nmatch;
70996+
70997+ filp = task->exec_file;
70998+ if (filp == NULL)
70999+ return NULL;
71000+
71001+ /* the following is to apply the correct subject
71002+ on binaries running when the RBAC system
71003+ is enabled, when the binaries have been
71004+ replaced or deleted since their execution
71005+ -----
71006+ when the RBAC system starts, the inode/dev
71007+ from exec_file will be one the RBAC system
71008+ is unaware of. It only knows the inode/dev
71009+ of the present file on disk, or the absence
71010+ of it.
71011+ */
71012+
71013+ if (filename)
71014+ nmatch = __lookup_name_entry(state, filename);
71015+ else {
71016+ preempt_disable();
71017+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71018+
71019+ nmatch = __lookup_name_entry(state, tmpname);
71020+ preempt_enable();
71021+ }
71022+ tmpsubj = NULL;
71023+ if (nmatch) {
71024+ if (nmatch->deleted)
71025+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71026+ else
71027+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71028+ }
71029+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71030+ then we fall back to a normal lookup based on the binary's ino/dev
71031+ */
71032+ if (tmpsubj == NULL && fallback)
71033+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71034+
71035+ return tmpsubj;
71036+}
71037+
71038+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
71039+{
71040+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
71041+}
71042+
71043+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71044+{
71045+ struct acl_object_label *obj;
71046+ struct file *filp;
71047+
71048+ filp = task->exec_file;
71049+
71050+ task->acl = subj;
71051+ task->is_writable = 0;
71052+ /* ignore additional mmap checks for processes that are writable
71053+ by the default ACL */
71054+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71055+ if (unlikely(obj->mode & GR_WRITE))
71056+ task->is_writable = 1;
71057+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71058+ if (unlikely(obj->mode & GR_WRITE))
71059+ task->is_writable = 1;
71060+
71061+ gr_set_proc_res(task);
71062+
71063+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71064+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71065+#endif
71066+}
71067+
71068+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71069+{
71070+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71071+}
71072+
71073+__u32
71074+gr_search_file(const struct dentry * dentry, const __u32 mode,
71075+ const struct vfsmount * mnt)
71076+{
71077+ __u32 retval = mode;
71078+ struct acl_subject_label *curracl;
71079+ struct acl_object_label *currobj;
71080+
71081+ if (unlikely(!(gr_status & GR_READY)))
71082+ return (mode & ~GR_AUDITS);
71083+
71084+ curracl = current->acl;
71085+
71086+ currobj = chk_obj_label(dentry, mnt, curracl);
71087+ retval = currobj->mode & mode;
71088+
71089+ /* if we're opening a specified transfer file for writing
71090+ (e.g. /dev/initctl), then transfer our role to init
71091+ */
71092+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71093+ current->role->roletype & GR_ROLE_PERSIST)) {
71094+ struct task_struct *task = init_pid_ns.child_reaper;
71095+
71096+ if (task->role != current->role) {
71097+ struct acl_subject_label *subj;
71098+
71099+ task->acl_sp_role = 0;
71100+ task->acl_role_id = current->acl_role_id;
71101+ task->role = current->role;
71102+ rcu_read_lock();
71103+ read_lock(&grsec_exec_file_lock);
71104+ subj = gr_get_subject_for_task(task, NULL, 1);
71105+ gr_apply_subject_to_task(task, subj);
71106+ read_unlock(&grsec_exec_file_lock);
71107+ rcu_read_unlock();
71108+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71109+ }
71110+ }
71111+
71112+ if (unlikely
71113+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71114+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71115+ __u32 new_mode = mode;
71116+
71117+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71118+
71119+ retval = new_mode;
71120+
71121+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71122+ new_mode |= GR_INHERIT;
71123+
71124+ if (!(mode & GR_NOLEARN))
71125+ gr_log_learn(dentry, mnt, new_mode);
71126+ }
71127+
71128+ return retval;
71129+}
71130+
71131+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71132+ const struct dentry *parent,
71133+ const struct vfsmount *mnt)
71134+{
71135+ struct name_entry *match;
71136+ struct acl_object_label *matchpo;
71137+ struct acl_subject_label *curracl;
71138+ char *path;
71139+
71140+ if (unlikely(!(gr_status & GR_READY)))
71141+ return NULL;
71142+
71143+ preempt_disable();
71144+ path = gr_to_filename_rbac(new_dentry, mnt);
71145+ match = lookup_name_entry_create(path);
71146+
71147+ curracl = current->acl;
71148+
71149+ if (match) {
71150+ read_lock(&gr_inode_lock);
71151+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71152+ read_unlock(&gr_inode_lock);
71153+
71154+ if (matchpo) {
71155+ preempt_enable();
71156+ return matchpo;
71157+ }
71158+ }
71159+
71160+ // lookup parent
71161+
71162+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71163+
71164+ preempt_enable();
71165+ return matchpo;
71166+}
71167+
71168+__u32
71169+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71170+ const struct vfsmount * mnt, const __u32 mode)
71171+{
71172+ struct acl_object_label *matchpo;
71173+ __u32 retval;
71174+
71175+ if (unlikely(!(gr_status & GR_READY)))
71176+ return (mode & ~GR_AUDITS);
71177+
71178+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71179+
71180+ retval = matchpo->mode & mode;
71181+
71182+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71183+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71184+ __u32 new_mode = mode;
71185+
71186+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71187+
71188+ gr_log_learn(new_dentry, mnt, new_mode);
71189+ return new_mode;
71190+ }
71191+
71192+ return retval;
71193+}
71194+
71195+__u32
71196+gr_check_link(const struct dentry * new_dentry,
71197+ const struct dentry * parent_dentry,
71198+ const struct vfsmount * parent_mnt,
71199+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71200+{
71201+ struct acl_object_label *obj;
71202+ __u32 oldmode, newmode;
71203+ __u32 needmode;
71204+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71205+ GR_DELETE | GR_INHERIT;
71206+
71207+ if (unlikely(!(gr_status & GR_READY)))
71208+ return (GR_CREATE | GR_LINK);
71209+
71210+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71211+ oldmode = obj->mode;
71212+
71213+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71214+ newmode = obj->mode;
71215+
71216+ needmode = newmode & checkmodes;
71217+
71218+ // old name for hardlink must have at least the permissions of the new name
71219+ if ((oldmode & needmode) != needmode)
71220+ goto bad;
71221+
71222+ // if old name had restrictions/auditing, make sure the new name does as well
71223+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71224+
71225+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71226+ if (is_privileged_binary(old_dentry))
71227+ needmode |= GR_SETID;
71228+
71229+ if ((newmode & needmode) != needmode)
71230+ goto bad;
71231+
71232+ // enforce minimum permissions
71233+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
71234+ return newmode;
71235+bad:
71236+ needmode = oldmode;
71237+ if (is_privileged_binary(old_dentry))
71238+ needmode |= GR_SETID;
71239+
71240+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
71241+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
71242+ return (GR_CREATE | GR_LINK);
71243+ } else if (newmode & GR_SUPPRESS)
71244+ return GR_SUPPRESS;
71245+ else
71246+ return 0;
71247+}
71248+
71249+int
71250+gr_check_hidden_task(const struct task_struct *task)
71251+{
71252+ if (unlikely(!(gr_status & GR_READY)))
71253+ return 0;
71254+
71255+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
71256+ return 1;
71257+
71258+ return 0;
71259+}
71260+
71261+int
71262+gr_check_protected_task(const struct task_struct *task)
71263+{
71264+ if (unlikely(!(gr_status & GR_READY) || !task))
71265+ return 0;
71266+
71267+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71268+ task->acl != current->acl)
71269+ return 1;
71270+
71271+ return 0;
71272+}
71273+
71274+int
71275+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
71276+{
71277+ struct task_struct *p;
71278+ int ret = 0;
71279+
71280+ if (unlikely(!(gr_status & GR_READY) || !pid))
71281+ return ret;
71282+
71283+ read_lock(&tasklist_lock);
71284+ do_each_pid_task(pid, type, p) {
71285+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71286+ p->acl != current->acl) {
71287+ ret = 1;
71288+ goto out;
71289+ }
71290+ } while_each_pid_task(pid, type, p);
71291+out:
71292+ read_unlock(&tasklist_lock);
71293+
71294+ return ret;
71295+}
71296+
71297+void
71298+gr_copy_label(struct task_struct *tsk)
71299+{
71300+ struct task_struct *p = current;
71301+
71302+ tsk->inherited = p->inherited;
71303+ tsk->acl_sp_role = 0;
71304+ tsk->acl_role_id = p->acl_role_id;
71305+ tsk->acl = p->acl;
71306+ tsk->role = p->role;
71307+ tsk->signal->used_accept = 0;
71308+ tsk->signal->curr_ip = p->signal->curr_ip;
71309+ tsk->signal->saved_ip = p->signal->saved_ip;
71310+ if (p->exec_file)
71311+ get_file(p->exec_file);
71312+ tsk->exec_file = p->exec_file;
71313+ tsk->is_writable = p->is_writable;
71314+ if (unlikely(p->signal->used_accept)) {
71315+ p->signal->curr_ip = 0;
71316+ p->signal->saved_ip = 0;
71317+ }
71318+
71319+ return;
71320+}
71321+
71322+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71323+
71324+int
71325+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71326+{
71327+ unsigned int i;
71328+ __u16 num;
71329+ uid_t *uidlist;
71330+ uid_t curuid;
71331+ int realok = 0;
71332+ int effectiveok = 0;
71333+ int fsok = 0;
71334+ uid_t globalreal, globaleffective, globalfs;
71335+
71336+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71337+ struct user_struct *user;
71338+
71339+ if (!uid_valid(real))
71340+ goto skipit;
71341+
71342+ /* find user based on global namespace */
71343+
71344+ globalreal = GR_GLOBAL_UID(real);
71345+
71346+ user = find_user(make_kuid(&init_user_ns, globalreal));
71347+ if (user == NULL)
71348+ goto skipit;
71349+
71350+ if (gr_process_kernel_setuid_ban(user)) {
71351+ /* for find_user */
71352+ free_uid(user);
71353+ return 1;
71354+ }
71355+
71356+ /* for find_user */
71357+ free_uid(user);
71358+
71359+skipit:
71360+#endif
71361+
71362+ if (unlikely(!(gr_status & GR_READY)))
71363+ return 0;
71364+
71365+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71366+ gr_log_learn_uid_change(real, effective, fs);
71367+
71368+ num = current->acl->user_trans_num;
71369+ uidlist = current->acl->user_transitions;
71370+
71371+ if (uidlist == NULL)
71372+ return 0;
71373+
71374+ if (!uid_valid(real)) {
71375+ realok = 1;
71376+ globalreal = (uid_t)-1;
71377+ } else {
71378+ globalreal = GR_GLOBAL_UID(real);
71379+ }
71380+ if (!uid_valid(effective)) {
71381+ effectiveok = 1;
71382+ globaleffective = (uid_t)-1;
71383+ } else {
71384+ globaleffective = GR_GLOBAL_UID(effective);
71385+ }
71386+ if (!uid_valid(fs)) {
71387+ fsok = 1;
71388+ globalfs = (uid_t)-1;
71389+ } else {
71390+ globalfs = GR_GLOBAL_UID(fs);
71391+ }
71392+
71393+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71394+ for (i = 0; i < num; i++) {
71395+ curuid = uidlist[i];
71396+ if (globalreal == curuid)
71397+ realok = 1;
71398+ if (globaleffective == curuid)
71399+ effectiveok = 1;
71400+ if (globalfs == curuid)
71401+ fsok = 1;
71402+ }
71403+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71404+ for (i = 0; i < num; i++) {
71405+ curuid = uidlist[i];
71406+ if (globalreal == curuid)
71407+ break;
71408+ if (globaleffective == curuid)
71409+ break;
71410+ if (globalfs == curuid)
71411+ break;
71412+ }
71413+ /* not in deny list */
71414+ if (i == num) {
71415+ realok = 1;
71416+ effectiveok = 1;
71417+ fsok = 1;
71418+ }
71419+ }
71420+
71421+ if (realok && effectiveok && fsok)
71422+ return 0;
71423+ else {
71424+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71425+ return 1;
71426+ }
71427+}
71428+
71429+int
71430+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71431+{
71432+ unsigned int i;
71433+ __u16 num;
71434+ gid_t *gidlist;
71435+ gid_t curgid;
71436+ int realok = 0;
71437+ int effectiveok = 0;
71438+ int fsok = 0;
71439+ gid_t globalreal, globaleffective, globalfs;
71440+
71441+ if (unlikely(!(gr_status & GR_READY)))
71442+ return 0;
71443+
71444+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71445+ gr_log_learn_gid_change(real, effective, fs);
71446+
71447+ num = current->acl->group_trans_num;
71448+ gidlist = current->acl->group_transitions;
71449+
71450+ if (gidlist == NULL)
71451+ return 0;
71452+
71453+ if (!gid_valid(real)) {
71454+ realok = 1;
71455+ globalreal = (gid_t)-1;
71456+ } else {
71457+ globalreal = GR_GLOBAL_GID(real);
71458+ }
71459+ if (!gid_valid(effective)) {
71460+ effectiveok = 1;
71461+ globaleffective = (gid_t)-1;
71462+ } else {
71463+ globaleffective = GR_GLOBAL_GID(effective);
71464+ }
71465+ if (!gid_valid(fs)) {
71466+ fsok = 1;
71467+ globalfs = (gid_t)-1;
71468+ } else {
71469+ globalfs = GR_GLOBAL_GID(fs);
71470+ }
71471+
71472+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71473+ for (i = 0; i < num; i++) {
71474+ curgid = gidlist[i];
71475+ if (globalreal == curgid)
71476+ realok = 1;
71477+ if (globaleffective == curgid)
71478+ effectiveok = 1;
71479+ if (globalfs == curgid)
71480+ fsok = 1;
71481+ }
71482+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71483+ for (i = 0; i < num; i++) {
71484+ curgid = gidlist[i];
71485+ if (globalreal == curgid)
71486+ break;
71487+ if (globaleffective == curgid)
71488+ break;
71489+ if (globalfs == curgid)
71490+ break;
71491+ }
71492+ /* not in deny list */
71493+ if (i == num) {
71494+ realok = 1;
71495+ effectiveok = 1;
71496+ fsok = 1;
71497+ }
71498+ }
71499+
71500+ if (realok && effectiveok && fsok)
71501+ return 0;
71502+ else {
71503+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71504+ return 1;
71505+ }
71506+}
71507+
71508+extern int gr_acl_is_capable(const int cap);
71509+
71510+void
71511+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71512+{
71513+ struct acl_role_label *role = task->role;
71514+ struct acl_role_label *origrole = role;
71515+ struct acl_subject_label *subj = NULL;
71516+ struct acl_object_label *obj;
71517+ struct file *filp;
71518+ uid_t uid;
71519+ gid_t gid;
71520+
71521+ if (unlikely(!(gr_status & GR_READY)))
71522+ return;
71523+
71524+ uid = GR_GLOBAL_UID(kuid);
71525+ gid = GR_GLOBAL_GID(kgid);
71526+
71527+ filp = task->exec_file;
71528+
71529+ /* kernel process, we'll give them the kernel role */
71530+ if (unlikely(!filp)) {
71531+ task->role = running_polstate.kernel_role;
71532+ task->acl = running_polstate.kernel_role->root_label;
71533+ return;
71534+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71535+ /* save the current ip at time of role lookup so that the proper
71536+ IP will be learned for role_allowed_ip */
71537+ task->signal->saved_ip = task->signal->curr_ip;
71538+ role = lookup_acl_role_label(task, uid, gid);
71539+ }
71540+
71541+ /* don't change the role if we're not a privileged process */
71542+ if (role && task->role != role &&
71543+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71544+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71545+ return;
71546+
71547+ task->role = role;
71548+
71549+ if (task->inherited) {
71550+ /* if we reached our subject through inheritance, then first see
71551+ if there's a subject of the same name in the new role that has
71552+ an object that would result in the same inherited subject
71553+ */
71554+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71555+ if (subj) {
71556+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71557+ if (!(obj->mode & GR_INHERIT))
71558+ subj = NULL;
71559+ }
71560+
71561+ }
71562+ if (subj == NULL) {
71563+ /* otherwise:
71564+ perform subject lookup in possibly new role
71565+ we can use this result below in the case where role == task->role
71566+ */
71567+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71568+ }
71569+
71570+ /* if we changed uid/gid, but result in the same role
71571+ and are using inheritance, don't lose the inherited subject
71572+ if current subject is other than what normal lookup
71573+ would result in, we arrived via inheritance, don't
71574+ lose subject
71575+ */
71576+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71577+ (subj == task->acl)))
71578+ task->acl = subj;
71579+
71580+ /* leave task->inherited unaffected */
71581+
71582+ task->is_writable = 0;
71583+
71584+ /* ignore additional mmap checks for processes that are writable
71585+ by the default ACL */
71586+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71587+ if (unlikely(obj->mode & GR_WRITE))
71588+ task->is_writable = 1;
71589+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71590+ if (unlikely(obj->mode & GR_WRITE))
71591+ task->is_writable = 1;
71592+
71593+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71594+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71595+#endif
71596+
71597+ gr_set_proc_res(task);
71598+
71599+ return;
71600+}
71601+
71602+int
71603+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71604+ const int unsafe_flags)
71605+{
71606+ struct task_struct *task = current;
71607+ struct acl_subject_label *newacl;
71608+ struct acl_object_label *obj;
71609+ __u32 retmode;
71610+
71611+ if (unlikely(!(gr_status & GR_READY)))
71612+ return 0;
71613+
71614+ newacl = chk_subj_label(dentry, mnt, task->role);
71615+
71616+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71617+ did an exec
71618+ */
71619+ rcu_read_lock();
71620+ read_lock(&tasklist_lock);
71621+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71622+ (task->parent->acl->mode & GR_POVERRIDE))) {
71623+ read_unlock(&tasklist_lock);
71624+ rcu_read_unlock();
71625+ goto skip_check;
71626+ }
71627+ read_unlock(&tasklist_lock);
71628+ rcu_read_unlock();
71629+
71630+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71631+ !(task->role->roletype & GR_ROLE_GOD) &&
71632+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71633+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71634+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71635+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71636+ else
71637+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71638+ return -EACCES;
71639+ }
71640+
71641+skip_check:
71642+
71643+ obj = chk_obj_label(dentry, mnt, task->acl);
71644+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71645+
71646+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71647+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71648+ if (obj->nested)
71649+ task->acl = obj->nested;
71650+ else
71651+ task->acl = newacl;
71652+ task->inherited = 0;
71653+ } else {
71654+ task->inherited = 1;
71655+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71656+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71657+ }
71658+
71659+ task->is_writable = 0;
71660+
71661+ /* ignore additional mmap checks for processes that are writable
71662+ by the default ACL */
71663+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71664+ if (unlikely(obj->mode & GR_WRITE))
71665+ task->is_writable = 1;
71666+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71667+ if (unlikely(obj->mode & GR_WRITE))
71668+ task->is_writable = 1;
71669+
71670+ gr_set_proc_res(task);
71671+
71672+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71673+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71674+#endif
71675+ return 0;
71676+}
71677+
71678+/* always called with valid inodev ptr */
71679+static void
71680+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71681+{
71682+ struct acl_object_label *matchpo;
71683+ struct acl_subject_label *matchps;
71684+ struct acl_subject_label *subj;
71685+ struct acl_role_label *role;
71686+ unsigned int x;
71687+
71688+ FOR_EACH_ROLE_START(role)
71689+ FOR_EACH_SUBJECT_START(role, subj, x)
71690+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71691+ matchpo->mode |= GR_DELETED;
71692+ FOR_EACH_SUBJECT_END(subj,x)
71693+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71694+ /* nested subjects aren't in the role's subj_hash table */
71695+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71696+ matchpo->mode |= GR_DELETED;
71697+ FOR_EACH_NESTED_SUBJECT_END(subj)
71698+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71699+ matchps->mode |= GR_DELETED;
71700+ FOR_EACH_ROLE_END(role)
71701+
71702+ inodev->nentry->deleted = 1;
71703+
71704+ return;
71705+}
71706+
71707+void
71708+gr_handle_delete(const u64 ino, const dev_t dev)
71709+{
71710+ struct inodev_entry *inodev;
71711+
71712+ if (unlikely(!(gr_status & GR_READY)))
71713+ return;
71714+
71715+ write_lock(&gr_inode_lock);
71716+ inodev = lookup_inodev_entry(ino, dev);
71717+ if (inodev != NULL)
71718+ do_handle_delete(inodev, ino, dev);
71719+ write_unlock(&gr_inode_lock);
71720+
71721+ return;
71722+}
71723+
71724+static void
71725+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71726+ const u64 newinode, const dev_t newdevice,
71727+ struct acl_subject_label *subj)
71728+{
71729+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71730+ struct acl_object_label *match;
71731+
71732+ match = subj->obj_hash[index];
71733+
71734+ while (match && (match->inode != oldinode ||
71735+ match->device != olddevice ||
71736+ !(match->mode & GR_DELETED)))
71737+ match = match->next;
71738+
71739+ if (match && (match->inode == oldinode)
71740+ && (match->device == olddevice)
71741+ && (match->mode & GR_DELETED)) {
71742+ if (match->prev == NULL) {
71743+ subj->obj_hash[index] = match->next;
71744+ if (match->next != NULL)
71745+ match->next->prev = NULL;
71746+ } else {
71747+ match->prev->next = match->next;
71748+ if (match->next != NULL)
71749+ match->next->prev = match->prev;
71750+ }
71751+ match->prev = NULL;
71752+ match->next = NULL;
71753+ match->inode = newinode;
71754+ match->device = newdevice;
71755+ match->mode &= ~GR_DELETED;
71756+
71757+ insert_acl_obj_label(match, subj);
71758+ }
71759+
71760+ return;
71761+}
71762+
71763+static void
71764+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71765+ const u64 newinode, const dev_t newdevice,
71766+ struct acl_role_label *role)
71767+{
71768+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71769+ struct acl_subject_label *match;
71770+
71771+ match = role->subj_hash[index];
71772+
71773+ while (match && (match->inode != oldinode ||
71774+ match->device != olddevice ||
71775+ !(match->mode & GR_DELETED)))
71776+ match = match->next;
71777+
71778+ if (match && (match->inode == oldinode)
71779+ && (match->device == olddevice)
71780+ && (match->mode & GR_DELETED)) {
71781+ if (match->prev == NULL) {
71782+ role->subj_hash[index] = match->next;
71783+ if (match->next != NULL)
71784+ match->next->prev = NULL;
71785+ } else {
71786+ match->prev->next = match->next;
71787+ if (match->next != NULL)
71788+ match->next->prev = match->prev;
71789+ }
71790+ match->prev = NULL;
71791+ match->next = NULL;
71792+ match->inode = newinode;
71793+ match->device = newdevice;
71794+ match->mode &= ~GR_DELETED;
71795+
71796+ insert_acl_subj_label(match, role);
71797+ }
71798+
71799+ return;
71800+}
71801+
71802+static void
71803+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71804+ const u64 newinode, const dev_t newdevice)
71805+{
71806+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71807+ struct inodev_entry *match;
71808+
71809+ match = running_polstate.inodev_set.i_hash[index];
71810+
71811+ while (match && (match->nentry->inode != oldinode ||
71812+ match->nentry->device != olddevice || !match->nentry->deleted))
71813+ match = match->next;
71814+
71815+ if (match && (match->nentry->inode == oldinode)
71816+ && (match->nentry->device == olddevice) &&
71817+ match->nentry->deleted) {
71818+ if (match->prev == NULL) {
71819+ running_polstate.inodev_set.i_hash[index] = match->next;
71820+ if (match->next != NULL)
71821+ match->next->prev = NULL;
71822+ } else {
71823+ match->prev->next = match->next;
71824+ if (match->next != NULL)
71825+ match->next->prev = match->prev;
71826+ }
71827+ match->prev = NULL;
71828+ match->next = NULL;
71829+ match->nentry->inode = newinode;
71830+ match->nentry->device = newdevice;
71831+ match->nentry->deleted = 0;
71832+
71833+ insert_inodev_entry(match);
71834+ }
71835+
71836+ return;
71837+}
71838+
71839+static void
71840+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71841+{
71842+ struct acl_subject_label *subj;
71843+ struct acl_role_label *role;
71844+ unsigned int x;
71845+
71846+ FOR_EACH_ROLE_START(role)
71847+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71848+
71849+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71850+ if ((subj->inode == ino) && (subj->device == dev)) {
71851+ subj->inode = ino;
71852+ subj->device = dev;
71853+ }
71854+ /* nested subjects aren't in the role's subj_hash table */
71855+ update_acl_obj_label(matchn->inode, matchn->device,
71856+ ino, dev, subj);
71857+ FOR_EACH_NESTED_SUBJECT_END(subj)
71858+ FOR_EACH_SUBJECT_START(role, subj, x)
71859+ update_acl_obj_label(matchn->inode, matchn->device,
71860+ ino, dev, subj);
71861+ FOR_EACH_SUBJECT_END(subj,x)
71862+ FOR_EACH_ROLE_END(role)
71863+
71864+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71865+
71866+ return;
71867+}
71868+
71869+static void
71870+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71871+ const struct vfsmount *mnt)
71872+{
71873+ u64 ino = __get_ino(dentry);
71874+ dev_t dev = __get_dev(dentry);
71875+
71876+ __do_handle_create(matchn, ino, dev);
71877+
71878+ return;
71879+}
71880+
71881+void
71882+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71883+{
71884+ struct name_entry *matchn;
71885+
71886+ if (unlikely(!(gr_status & GR_READY)))
71887+ return;
71888+
71889+ preempt_disable();
71890+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71891+
71892+ if (unlikely((unsigned long)matchn)) {
71893+ write_lock(&gr_inode_lock);
71894+ do_handle_create(matchn, dentry, mnt);
71895+ write_unlock(&gr_inode_lock);
71896+ }
71897+ preempt_enable();
71898+
71899+ return;
71900+}
71901+
71902+void
71903+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71904+{
71905+ struct name_entry *matchn;
71906+
71907+ if (unlikely(!(gr_status & GR_READY)))
71908+ return;
71909+
71910+ preempt_disable();
71911+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71912+
71913+ if (unlikely((unsigned long)matchn)) {
71914+ write_lock(&gr_inode_lock);
71915+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71916+ write_unlock(&gr_inode_lock);
71917+ }
71918+ preempt_enable();
71919+
71920+ return;
71921+}
71922+
71923+void
71924+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71925+ struct dentry *old_dentry,
71926+ struct dentry *new_dentry,
71927+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71928+{
71929+ struct name_entry *matchn;
71930+ struct name_entry *matchn2 = NULL;
71931+ struct inodev_entry *inodev;
71932+ struct inode *inode = new_dentry->d_inode;
71933+ u64 old_ino = __get_ino(old_dentry);
71934+ dev_t old_dev = __get_dev(old_dentry);
71935+ unsigned int exchange = flags & RENAME_EXCHANGE;
71936+
71937+ /* vfs_rename swaps the name and parent link for old_dentry and
71938+ new_dentry
71939+ at this point, old_dentry has the new name, parent link, and inode
71940+ for the renamed file
71941+ if a file is being replaced by a rename, new_dentry has the inode
71942+ and name for the replaced file
71943+ */
71944+
71945+ if (unlikely(!(gr_status & GR_READY)))
71946+ return;
71947+
71948+ preempt_disable();
71949+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71950+
71951+ /* exchange cases:
71952+ a filename exists for the source, but not dest
71953+ do a recreate on source
71954+ a filename exists for the dest, but not source
71955+ do a recreate on dest
71956+ a filename exists for both source and dest
71957+ delete source and dest, then create source and dest
71958+ a filename exists for neither source nor dest
71959+ no updates needed
71960+
71961+ the name entry lookups get us the old inode/dev associated with
71962+ each name, so do the deletes first (if possible) so that when
71963+ we do the create, we pick up on the right entries
71964+ */
71965+
71966+ if (exchange)
71967+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71968+
71969+ /* we wouldn't have to check d_inode if it weren't for
71970+ NFS silly-renaming
71971+ */
71972+
71973+ write_lock(&gr_inode_lock);
71974+ if (unlikely((replace || exchange) && inode)) {
71975+ u64 new_ino = __get_ino(new_dentry);
71976+ dev_t new_dev = __get_dev(new_dentry);
71977+
71978+ inodev = lookup_inodev_entry(new_ino, new_dev);
71979+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71980+ do_handle_delete(inodev, new_ino, new_dev);
71981+ }
71982+
71983+ inodev = lookup_inodev_entry(old_ino, old_dev);
71984+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71985+ do_handle_delete(inodev, old_ino, old_dev);
71986+
71987+ if (unlikely(matchn != NULL))
71988+ do_handle_create(matchn, old_dentry, mnt);
71989+
71990+ if (unlikely(matchn2 != NULL))
71991+ do_handle_create(matchn2, new_dentry, mnt);
71992+
71993+ write_unlock(&gr_inode_lock);
71994+ preempt_enable();
71995+
71996+ return;
71997+}
71998+
71999+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72000+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72001+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72002+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72003+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72004+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72005+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72006+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72007+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72008+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72009+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72010+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72011+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72012+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72013+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72014+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72015+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72016+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72017+};
72018+
72019+void
72020+gr_learn_resource(const struct task_struct *task,
72021+ const int res, const unsigned long wanted, const int gt)
72022+{
72023+ struct acl_subject_label *acl;
72024+ const struct cred *cred;
72025+
72026+ if (unlikely((gr_status & GR_READY) &&
72027+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72028+ goto skip_reslog;
72029+
72030+ gr_log_resource(task, res, wanted, gt);
72031+skip_reslog:
72032+
72033+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72034+ return;
72035+
72036+ acl = task->acl;
72037+
72038+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72039+ !(acl->resmask & (1U << (unsigned short) res))))
72040+ return;
72041+
72042+ if (wanted >= acl->res[res].rlim_cur) {
72043+ unsigned long res_add;
72044+
72045+ res_add = wanted + res_learn_bumps[res];
72046+
72047+ acl->res[res].rlim_cur = res_add;
72048+
72049+ if (wanted > acl->res[res].rlim_max)
72050+ acl->res[res].rlim_max = res_add;
72051+
72052+ /* only log the subject filename, since resource logging is supported for
72053+ single-subject learning only */
72054+ rcu_read_lock();
72055+ cred = __task_cred(task);
72056+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72057+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72058+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72059+ "", (unsigned long) res, &task->signal->saved_ip);
72060+ rcu_read_unlock();
72061+ }
72062+
72063+ return;
72064+}
72065+EXPORT_SYMBOL_GPL(gr_learn_resource);
72066+#endif
72067+
72068+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72069+void
72070+pax_set_initial_flags(struct linux_binprm *bprm)
72071+{
72072+ struct task_struct *task = current;
72073+ struct acl_subject_label *proc;
72074+ unsigned long flags;
72075+
72076+ if (unlikely(!(gr_status & GR_READY)))
72077+ return;
72078+
72079+ flags = pax_get_flags(task);
72080+
72081+ proc = task->acl;
72082+
72083+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72084+ flags &= ~MF_PAX_PAGEEXEC;
72085+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72086+ flags &= ~MF_PAX_SEGMEXEC;
72087+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72088+ flags &= ~MF_PAX_RANDMMAP;
72089+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72090+ flags &= ~MF_PAX_EMUTRAMP;
72091+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72092+ flags &= ~MF_PAX_MPROTECT;
72093+
72094+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72095+ flags |= MF_PAX_PAGEEXEC;
72096+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72097+ flags |= MF_PAX_SEGMEXEC;
72098+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72099+ flags |= MF_PAX_RANDMMAP;
72100+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72101+ flags |= MF_PAX_EMUTRAMP;
72102+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72103+ flags |= MF_PAX_MPROTECT;
72104+
72105+ pax_set_flags(task, flags);
72106+
72107+ return;
72108+}
72109+#endif
72110+
72111+int
72112+gr_handle_proc_ptrace(struct task_struct *task)
72113+{
72114+ struct file *filp;
72115+ struct task_struct *tmp = task;
72116+ struct task_struct *curtemp = current;
72117+ __u32 retmode;
72118+
72119+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72120+ if (unlikely(!(gr_status & GR_READY)))
72121+ return 0;
72122+#endif
72123+
72124+ read_lock(&tasklist_lock);
72125+ read_lock(&grsec_exec_file_lock);
72126+ filp = task->exec_file;
72127+
72128+ while (task_pid_nr(tmp) > 0) {
72129+ if (tmp == curtemp)
72130+ break;
72131+ tmp = tmp->real_parent;
72132+ }
72133+
72134+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72135+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72136+ read_unlock(&grsec_exec_file_lock);
72137+ read_unlock(&tasklist_lock);
72138+ return 1;
72139+ }
72140+
72141+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72142+ if (!(gr_status & GR_READY)) {
72143+ read_unlock(&grsec_exec_file_lock);
72144+ read_unlock(&tasklist_lock);
72145+ return 0;
72146+ }
72147+#endif
72148+
72149+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72150+ read_unlock(&grsec_exec_file_lock);
72151+ read_unlock(&tasklist_lock);
72152+
72153+ if (retmode & GR_NOPTRACE)
72154+ return 1;
72155+
72156+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72157+ && (current->acl != task->acl || (current->acl != current->role->root_label
72158+ && task_pid_nr(current) != task_pid_nr(task))))
72159+ return 1;
72160+
72161+ return 0;
72162+}
72163+
72164+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72165+{
72166+ if (unlikely(!(gr_status & GR_READY)))
72167+ return;
72168+
72169+ if (!(current->role->roletype & GR_ROLE_GOD))
72170+ return;
72171+
72172+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72173+ p->role->rolename, gr_task_roletype_to_char(p),
72174+ p->acl->filename);
72175+}
72176+
72177+int
72178+gr_handle_ptrace(struct task_struct *task, const long request)
72179+{
72180+ struct task_struct *tmp = task;
72181+ struct task_struct *curtemp = current;
72182+ __u32 retmode;
72183+
72184+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72185+ if (unlikely(!(gr_status & GR_READY)))
72186+ return 0;
72187+#endif
72188+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72189+ read_lock(&tasklist_lock);
72190+ while (task_pid_nr(tmp) > 0) {
72191+ if (tmp == curtemp)
72192+ break;
72193+ tmp = tmp->real_parent;
72194+ }
72195+
72196+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72197+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72198+ read_unlock(&tasklist_lock);
72199+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72200+ return 1;
72201+ }
72202+ read_unlock(&tasklist_lock);
72203+ }
72204+
72205+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72206+ if (!(gr_status & GR_READY))
72207+ return 0;
72208+#endif
72209+
72210+ read_lock(&grsec_exec_file_lock);
72211+ if (unlikely(!task->exec_file)) {
72212+ read_unlock(&grsec_exec_file_lock);
72213+ return 0;
72214+ }
72215+
72216+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72217+ read_unlock(&grsec_exec_file_lock);
72218+
72219+ if (retmode & GR_NOPTRACE) {
72220+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72221+ return 1;
72222+ }
72223+
72224+ if (retmode & GR_PTRACERD) {
72225+ switch (request) {
72226+ case PTRACE_SEIZE:
72227+ case PTRACE_POKETEXT:
72228+ case PTRACE_POKEDATA:
72229+ case PTRACE_POKEUSR:
72230+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72231+ case PTRACE_SETREGS:
72232+ case PTRACE_SETFPREGS:
72233+#endif
72234+#ifdef CONFIG_X86
72235+ case PTRACE_SETFPXREGS:
72236+#endif
72237+#ifdef CONFIG_ALTIVEC
72238+ case PTRACE_SETVRREGS:
72239+#endif
72240+ return 1;
72241+ default:
72242+ return 0;
72243+ }
72244+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72245+ !(current->role->roletype & GR_ROLE_GOD) &&
72246+ (current->acl != task->acl)) {
72247+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72248+ return 1;
72249+ }
72250+
72251+ return 0;
72252+}
72253+
72254+static int is_writable_mmap(const struct file *filp)
72255+{
72256+ struct task_struct *task = current;
72257+ struct acl_object_label *obj, *obj2;
72258+
72259+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
72260+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
72261+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72262+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
72263+ task->role->root_label);
72264+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
72265+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
72266+ return 1;
72267+ }
72268+ }
72269+ return 0;
72270+}
72271+
72272+int
72273+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
72274+{
72275+ __u32 mode;
72276+
72277+ if (unlikely(!file || !(prot & PROT_EXEC)))
72278+ return 1;
72279+
72280+ if (is_writable_mmap(file))
72281+ return 0;
72282+
72283+ mode =
72284+ gr_search_file(file->f_path.dentry,
72285+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72286+ file->f_path.mnt);
72287+
72288+ if (!gr_tpe_allow(file))
72289+ return 0;
72290+
72291+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72292+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72293+ return 0;
72294+ } else if (unlikely(!(mode & GR_EXEC))) {
72295+ return 0;
72296+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72297+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72298+ return 1;
72299+ }
72300+
72301+ return 1;
72302+}
72303+
72304+int
72305+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72306+{
72307+ __u32 mode;
72308+
72309+ if (unlikely(!file || !(prot & PROT_EXEC)))
72310+ return 1;
72311+
72312+ if (is_writable_mmap(file))
72313+ return 0;
72314+
72315+ mode =
72316+ gr_search_file(file->f_path.dentry,
72317+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72318+ file->f_path.mnt);
72319+
72320+ if (!gr_tpe_allow(file))
72321+ return 0;
72322+
72323+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72324+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72325+ return 0;
72326+ } else if (unlikely(!(mode & GR_EXEC))) {
72327+ return 0;
72328+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72329+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72330+ return 1;
72331+ }
72332+
72333+ return 1;
72334+}
72335+
72336+void
72337+gr_acl_handle_psacct(struct task_struct *task, const long code)
72338+{
72339+ unsigned long runtime, cputime;
72340+ cputime_t utime, stime;
72341+ unsigned int wday, cday;
72342+ __u8 whr, chr;
72343+ __u8 wmin, cmin;
72344+ __u8 wsec, csec;
72345+ struct timespec curtime, starttime;
72346+
72347+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72348+ !(task->acl->mode & GR_PROCACCT)))
72349+ return;
72350+
72351+ curtime = ns_to_timespec(ktime_get_ns());
72352+ starttime = ns_to_timespec(task->start_time);
72353+ runtime = curtime.tv_sec - starttime.tv_sec;
72354+ wday = runtime / (60 * 60 * 24);
72355+ runtime -= wday * (60 * 60 * 24);
72356+ whr = runtime / (60 * 60);
72357+ runtime -= whr * (60 * 60);
72358+ wmin = runtime / 60;
72359+ runtime -= wmin * 60;
72360+ wsec = runtime;
72361+
72362+ task_cputime(task, &utime, &stime);
72363+ cputime = cputime_to_secs(utime + stime);
72364+ cday = cputime / (60 * 60 * 24);
72365+ cputime -= cday * (60 * 60 * 24);
72366+ chr = cputime / (60 * 60);
72367+ cputime -= chr * (60 * 60);
72368+ cmin = cputime / 60;
72369+ cputime -= cmin * 60;
72370+ csec = cputime;
72371+
72372+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72373+
72374+ return;
72375+}
72376+
72377+#ifdef CONFIG_TASKSTATS
72378+int gr_is_taskstats_denied(int pid)
72379+{
72380+ struct task_struct *task;
72381+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72382+ const struct cred *cred;
72383+#endif
72384+ int ret = 0;
72385+
72386+ /* restrict taskstats viewing to un-chrooted root users
72387+ who have the 'view' subject flag if the RBAC system is enabled
72388+ */
72389+
72390+ rcu_read_lock();
72391+ read_lock(&tasklist_lock);
72392+ task = find_task_by_vpid(pid);
72393+ if (task) {
72394+#ifdef CONFIG_GRKERNSEC_CHROOT
72395+ if (proc_is_chrooted(task))
72396+ ret = -EACCES;
72397+#endif
72398+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72399+ cred = __task_cred(task);
72400+#ifdef CONFIG_GRKERNSEC_PROC_USER
72401+ if (gr_is_global_nonroot(cred->uid))
72402+ ret = -EACCES;
72403+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72404+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72405+ ret = -EACCES;
72406+#endif
72407+#endif
72408+ if (gr_status & GR_READY) {
72409+ if (!(task->acl->mode & GR_VIEW))
72410+ ret = -EACCES;
72411+ }
72412+ } else
72413+ ret = -ENOENT;
72414+
72415+ read_unlock(&tasklist_lock);
72416+ rcu_read_unlock();
72417+
72418+ return ret;
72419+}
72420+#endif
72421+
72422+/* AUXV entries are filled via a descendant of search_binary_handler
72423+ after we've already applied the subject for the target
72424+*/
72425+int gr_acl_enable_at_secure(void)
72426+{
72427+ if (unlikely(!(gr_status & GR_READY)))
72428+ return 0;
72429+
72430+ if (current->acl->mode & GR_ATSECURE)
72431+ return 1;
72432+
72433+ return 0;
72434+}
72435+
72436+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
72437+{
72438+ struct task_struct *task = current;
72439+ struct dentry *dentry = file->f_path.dentry;
72440+ struct vfsmount *mnt = file->f_path.mnt;
72441+ struct acl_object_label *obj, *tmp;
72442+ struct acl_subject_label *subj;
72443+ unsigned int bufsize;
72444+ int is_not_root;
72445+ char *path;
72446+ dev_t dev = __get_dev(dentry);
72447+
72448+ if (unlikely(!(gr_status & GR_READY)))
72449+ return 1;
72450+
72451+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72452+ return 1;
72453+
72454+ /* ignore Eric Biederman */
72455+ if (IS_PRIVATE(dentry->d_inode))
72456+ return 1;
72457+
72458+ subj = task->acl;
72459+ read_lock(&gr_inode_lock);
72460+ do {
72461+ obj = lookup_acl_obj_label(ino, dev, subj);
72462+ if (obj != NULL) {
72463+ read_unlock(&gr_inode_lock);
72464+ return (obj->mode & GR_FIND) ? 1 : 0;
72465+ }
72466+ } while ((subj = subj->parent_subject));
72467+ read_unlock(&gr_inode_lock);
72468+
72469+ /* this is purely an optimization since we're looking for an object
72470+ for the directory we're doing a readdir on
72471+ if it's possible for any globbed object to match the entry we're
72472+ filling into the directory, then the object we find here will be
72473+ an anchor point with attached globbed objects
72474+ */
72475+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72476+ if (obj->globbed == NULL)
72477+ return (obj->mode & GR_FIND) ? 1 : 0;
72478+
72479+ is_not_root = ((obj->filename[0] == '/') &&
72480+ (obj->filename[1] == '\0')) ? 0 : 1;
72481+ bufsize = PAGE_SIZE - namelen - is_not_root;
72482+
72483+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72484+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72485+ return 1;
72486+
72487+ preempt_disable();
72488+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72489+ bufsize);
72490+
72491+ bufsize = strlen(path);
72492+
72493+ /* if base is "/", don't append an additional slash */
72494+ if (is_not_root)
72495+ *(path + bufsize) = '/';
72496+ memcpy(path + bufsize + is_not_root, name, namelen);
72497+ *(path + bufsize + namelen + is_not_root) = '\0';
72498+
72499+ tmp = obj->globbed;
72500+ while (tmp) {
72501+ if (!glob_match(tmp->filename, path)) {
72502+ preempt_enable();
72503+ return (tmp->mode & GR_FIND) ? 1 : 0;
72504+ }
72505+ tmp = tmp->next;
72506+ }
72507+ preempt_enable();
72508+ return (obj->mode & GR_FIND) ? 1 : 0;
72509+}
72510+
72511+void gr_put_exec_file(struct task_struct *task)
72512+{
72513+ struct file *filp;
72514+
72515+ write_lock(&grsec_exec_file_lock);
72516+ filp = task->exec_file;
72517+ task->exec_file = NULL;
72518+ write_unlock(&grsec_exec_file_lock);
72519+
72520+ if (filp)
72521+ fput(filp);
72522+
72523+ return;
72524+}
72525+
72526+
72527+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72528+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72529+#endif
72530+#ifdef CONFIG_SECURITY
72531+EXPORT_SYMBOL_GPL(gr_check_user_change);
72532+EXPORT_SYMBOL_GPL(gr_check_group_change);
72533+#endif
72534+
72535diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72536new file mode 100644
72537index 0000000..18ffbbd
72538--- /dev/null
72539+++ b/grsecurity/gracl_alloc.c
72540@@ -0,0 +1,105 @@
72541+#include <linux/kernel.h>
72542+#include <linux/mm.h>
72543+#include <linux/slab.h>
72544+#include <linux/vmalloc.h>
72545+#include <linux/gracl.h>
72546+#include <linux/grsecurity.h>
72547+
72548+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72549+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72550+
72551+static __inline__ int
72552+alloc_pop(void)
72553+{
72554+ if (current_alloc_state->alloc_stack_next == 1)
72555+ return 0;
72556+
72557+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72558+
72559+ current_alloc_state->alloc_stack_next--;
72560+
72561+ return 1;
72562+}
72563+
72564+static __inline__ int
72565+alloc_push(void *buf)
72566+{
72567+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72568+ return 1;
72569+
72570+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72571+
72572+ current_alloc_state->alloc_stack_next++;
72573+
72574+ return 0;
72575+}
72576+
72577+void *
72578+acl_alloc(unsigned long len)
72579+{
72580+ void *ret = NULL;
72581+
72582+ if (!len || len > PAGE_SIZE)
72583+ goto out;
72584+
72585+ ret = kmalloc(len, GFP_KERNEL);
72586+
72587+ if (ret) {
72588+ if (alloc_push(ret)) {
72589+ kfree(ret);
72590+ ret = NULL;
72591+ }
72592+ }
72593+
72594+out:
72595+ return ret;
72596+}
72597+
72598+void *
72599+acl_alloc_num(unsigned long num, unsigned long len)
72600+{
72601+ if (!len || (num > (PAGE_SIZE / len)))
72602+ return NULL;
72603+
72604+ return acl_alloc(num * len);
72605+}
72606+
72607+void
72608+acl_free_all(void)
72609+{
72610+ if (!current_alloc_state->alloc_stack)
72611+ return;
72612+
72613+ while (alloc_pop()) ;
72614+
72615+ if (current_alloc_state->alloc_stack) {
72616+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72617+ kfree(current_alloc_state->alloc_stack);
72618+ else
72619+ vfree(current_alloc_state->alloc_stack);
72620+ }
72621+
72622+ current_alloc_state->alloc_stack = NULL;
72623+ current_alloc_state->alloc_stack_size = 1;
72624+ current_alloc_state->alloc_stack_next = 1;
72625+
72626+ return;
72627+}
72628+
72629+int
72630+acl_alloc_stack_init(unsigned long size)
72631+{
72632+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72633+ current_alloc_state->alloc_stack =
72634+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72635+ else
72636+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72637+
72638+ current_alloc_state->alloc_stack_size = size;
72639+ current_alloc_state->alloc_stack_next = 1;
72640+
72641+ if (!current_alloc_state->alloc_stack)
72642+ return 0;
72643+ else
72644+ return 1;
72645+}
72646diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72647new file mode 100644
72648index 0000000..1a94c11
72649--- /dev/null
72650+++ b/grsecurity/gracl_cap.c
72651@@ -0,0 +1,127 @@
72652+#include <linux/kernel.h>
72653+#include <linux/module.h>
72654+#include <linux/sched.h>
72655+#include <linux/gracl.h>
72656+#include <linux/grsecurity.h>
72657+#include <linux/grinternal.h>
72658+
72659+extern const char *captab_log[];
72660+extern int captab_log_entries;
72661+
72662+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72663+{
72664+ struct acl_subject_label *curracl;
72665+
72666+ if (!gr_acl_is_enabled())
72667+ return 1;
72668+
72669+ curracl = task->acl;
72670+
72671+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72672+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72673+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72674+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72675+ gr_to_filename(task->exec_file->f_path.dentry,
72676+ task->exec_file->f_path.mnt) : curracl->filename,
72677+ curracl->filename, 0UL,
72678+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72679+ return 1;
72680+ }
72681+
72682+ return 0;
72683+}
72684+
72685+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72686+{
72687+ struct acl_subject_label *curracl;
72688+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72689+ kernel_cap_t cap_audit = __cap_empty_set;
72690+
72691+ if (!gr_acl_is_enabled())
72692+ return 1;
72693+
72694+ curracl = task->acl;
72695+
72696+ cap_drop = curracl->cap_lower;
72697+ cap_mask = curracl->cap_mask;
72698+ cap_audit = curracl->cap_invert_audit;
72699+
72700+ while ((curracl = curracl->parent_subject)) {
72701+ /* if the cap isn't specified in the current computed mask but is specified in the
72702+ current level subject, and is lowered in the current level subject, then add
72703+ it to the set of dropped capabilities
72704+ otherwise, add the current level subject's mask to the current computed mask
72705+ */
72706+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72707+ cap_raise(cap_mask, cap);
72708+ if (cap_raised(curracl->cap_lower, cap))
72709+ cap_raise(cap_drop, cap);
72710+ if (cap_raised(curracl->cap_invert_audit, cap))
72711+ cap_raise(cap_audit, cap);
72712+ }
72713+ }
72714+
72715+ if (!cap_raised(cap_drop, cap)) {
72716+ if (cap_raised(cap_audit, cap))
72717+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72718+ return 1;
72719+ }
72720+
72721+ /* only learn the capability use if the process has the capability in the
72722+ general case, the two uses in sys.c of gr_learn_cap are an exception
72723+ to this rule to ensure any role transition involves what the full-learned
72724+ policy believes in a privileged process
72725+ */
72726+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72727+ return 1;
72728+
72729+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72730+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72731+
72732+ return 0;
72733+}
72734+
72735+int
72736+gr_acl_is_capable(const int cap)
72737+{
72738+ return gr_task_acl_is_capable(current, current_cred(), cap);
72739+}
72740+
72741+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72742+{
72743+ struct acl_subject_label *curracl;
72744+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72745+
72746+ if (!gr_acl_is_enabled())
72747+ return 1;
72748+
72749+ curracl = task->acl;
72750+
72751+ cap_drop = curracl->cap_lower;
72752+ cap_mask = curracl->cap_mask;
72753+
72754+ while ((curracl = curracl->parent_subject)) {
72755+ /* if the cap isn't specified in the current computed mask but is specified in the
72756+ current level subject, and is lowered in the current level subject, then add
72757+ it to the set of dropped capabilities
72758+ otherwise, add the current level subject's mask to the current computed mask
72759+ */
72760+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72761+ cap_raise(cap_mask, cap);
72762+ if (cap_raised(curracl->cap_lower, cap))
72763+ cap_raise(cap_drop, cap);
72764+ }
72765+ }
72766+
72767+ if (!cap_raised(cap_drop, cap))
72768+ return 1;
72769+
72770+ return 0;
72771+}
72772+
72773+int
72774+gr_acl_is_capable_nolog(const int cap)
72775+{
72776+ return gr_task_acl_is_capable_nolog(current, cap);
72777+}
72778+
72779diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72780new file mode 100644
72781index 0000000..a43dd06
72782--- /dev/null
72783+++ b/grsecurity/gracl_compat.c
72784@@ -0,0 +1,269 @@
72785+#include <linux/kernel.h>
72786+#include <linux/gracl.h>
72787+#include <linux/compat.h>
72788+#include <linux/gracl_compat.h>
72789+
72790+#include <asm/uaccess.h>
72791+
72792+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72793+{
72794+ struct gr_arg_wrapper_compat uwrapcompat;
72795+
72796+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72797+ return -EFAULT;
72798+
72799+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72800+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72801+ return -EINVAL;
72802+
72803+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72804+ uwrap->version = uwrapcompat.version;
72805+ uwrap->size = sizeof(struct gr_arg);
72806+
72807+ return 0;
72808+}
72809+
72810+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72811+{
72812+ struct gr_arg_compat argcompat;
72813+
72814+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72815+ return -EFAULT;
72816+
72817+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72818+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72819+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72820+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72821+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72822+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72823+
72824+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72825+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72826+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72827+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72828+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72829+ arg->segv_device = argcompat.segv_device;
72830+ arg->segv_inode = argcompat.segv_inode;
72831+ arg->segv_uid = argcompat.segv_uid;
72832+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72833+ arg->mode = argcompat.mode;
72834+
72835+ return 0;
72836+}
72837+
72838+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72839+{
72840+ struct acl_object_label_compat objcompat;
72841+
72842+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72843+ return -EFAULT;
72844+
72845+ obj->filename = compat_ptr(objcompat.filename);
72846+ obj->inode = objcompat.inode;
72847+ obj->device = objcompat.device;
72848+ obj->mode = objcompat.mode;
72849+
72850+ obj->nested = compat_ptr(objcompat.nested);
72851+ obj->globbed = compat_ptr(objcompat.globbed);
72852+
72853+ obj->prev = compat_ptr(objcompat.prev);
72854+ obj->next = compat_ptr(objcompat.next);
72855+
72856+ return 0;
72857+}
72858+
72859+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72860+{
72861+ unsigned int i;
72862+ struct acl_subject_label_compat subjcompat;
72863+
72864+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72865+ return -EFAULT;
72866+
72867+ subj->filename = compat_ptr(subjcompat.filename);
72868+ subj->inode = subjcompat.inode;
72869+ subj->device = subjcompat.device;
72870+ subj->mode = subjcompat.mode;
72871+ subj->cap_mask = subjcompat.cap_mask;
72872+ subj->cap_lower = subjcompat.cap_lower;
72873+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72874+
72875+ for (i = 0; i < GR_NLIMITS; i++) {
72876+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72877+ subj->res[i].rlim_cur = RLIM_INFINITY;
72878+ else
72879+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72880+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72881+ subj->res[i].rlim_max = RLIM_INFINITY;
72882+ else
72883+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72884+ }
72885+ subj->resmask = subjcompat.resmask;
72886+
72887+ subj->user_trans_type = subjcompat.user_trans_type;
72888+ subj->group_trans_type = subjcompat.group_trans_type;
72889+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72890+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72891+ subj->user_trans_num = subjcompat.user_trans_num;
72892+ subj->group_trans_num = subjcompat.group_trans_num;
72893+
72894+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72895+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72896+ subj->ip_type = subjcompat.ip_type;
72897+ subj->ips = compat_ptr(subjcompat.ips);
72898+ subj->ip_num = subjcompat.ip_num;
72899+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72900+
72901+ subj->crashes = subjcompat.crashes;
72902+ subj->expires = subjcompat.expires;
72903+
72904+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72905+ subj->hash = compat_ptr(subjcompat.hash);
72906+ subj->prev = compat_ptr(subjcompat.prev);
72907+ subj->next = compat_ptr(subjcompat.next);
72908+
72909+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72910+ subj->obj_hash_size = subjcompat.obj_hash_size;
72911+ subj->pax_flags = subjcompat.pax_flags;
72912+
72913+ return 0;
72914+}
72915+
72916+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72917+{
72918+ struct acl_role_label_compat rolecompat;
72919+
72920+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72921+ return -EFAULT;
72922+
72923+ role->rolename = compat_ptr(rolecompat.rolename);
72924+ role->uidgid = rolecompat.uidgid;
72925+ role->roletype = rolecompat.roletype;
72926+
72927+ role->auth_attempts = rolecompat.auth_attempts;
72928+ role->expires = rolecompat.expires;
72929+
72930+ role->root_label = compat_ptr(rolecompat.root_label);
72931+ role->hash = compat_ptr(rolecompat.hash);
72932+
72933+ role->prev = compat_ptr(rolecompat.prev);
72934+ role->next = compat_ptr(rolecompat.next);
72935+
72936+ role->transitions = compat_ptr(rolecompat.transitions);
72937+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72938+ role->domain_children = compat_ptr(rolecompat.domain_children);
72939+ role->domain_child_num = rolecompat.domain_child_num;
72940+
72941+ role->umask = rolecompat.umask;
72942+
72943+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72944+ role->subj_hash_size = rolecompat.subj_hash_size;
72945+
72946+ return 0;
72947+}
72948+
72949+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72950+{
72951+ struct role_allowed_ip_compat roleip_compat;
72952+
72953+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72954+ return -EFAULT;
72955+
72956+ roleip->addr = roleip_compat.addr;
72957+ roleip->netmask = roleip_compat.netmask;
72958+
72959+ roleip->prev = compat_ptr(roleip_compat.prev);
72960+ roleip->next = compat_ptr(roleip_compat.next);
72961+
72962+ return 0;
72963+}
72964+
72965+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72966+{
72967+ struct role_transition_compat trans_compat;
72968+
72969+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72970+ return -EFAULT;
72971+
72972+ trans->rolename = compat_ptr(trans_compat.rolename);
72973+
72974+ trans->prev = compat_ptr(trans_compat.prev);
72975+ trans->next = compat_ptr(trans_compat.next);
72976+
72977+ return 0;
72978+
72979+}
72980+
72981+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72982+{
72983+ struct gr_hash_struct_compat hash_compat;
72984+
72985+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72986+ return -EFAULT;
72987+
72988+ hash->table = compat_ptr(hash_compat.table);
72989+ hash->nametable = compat_ptr(hash_compat.nametable);
72990+ hash->first = compat_ptr(hash_compat.first);
72991+
72992+ hash->table_size = hash_compat.table_size;
72993+ hash->used_size = hash_compat.used_size;
72994+
72995+ hash->type = hash_compat.type;
72996+
72997+ return 0;
72998+}
72999+
73000+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73001+{
73002+ compat_uptr_t ptrcompat;
73003+
73004+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73005+ return -EFAULT;
73006+
73007+ *(void **)ptr = compat_ptr(ptrcompat);
73008+
73009+ return 0;
73010+}
73011+
73012+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73013+{
73014+ struct acl_ip_label_compat ip_compat;
73015+
73016+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73017+ return -EFAULT;
73018+
73019+ ip->iface = compat_ptr(ip_compat.iface);
73020+ ip->addr = ip_compat.addr;
73021+ ip->netmask = ip_compat.netmask;
73022+ ip->low = ip_compat.low;
73023+ ip->high = ip_compat.high;
73024+ ip->mode = ip_compat.mode;
73025+ ip->type = ip_compat.type;
73026+
73027+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73028+
73029+ ip->prev = compat_ptr(ip_compat.prev);
73030+ ip->next = compat_ptr(ip_compat.next);
73031+
73032+ return 0;
73033+}
73034+
73035+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73036+{
73037+ struct sprole_pw_compat pw_compat;
73038+
73039+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73040+ return -EFAULT;
73041+
73042+ pw->rolename = compat_ptr(pw_compat.rolename);
73043+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73044+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73045+
73046+ return 0;
73047+}
73048+
73049+size_t get_gr_arg_wrapper_size_compat(void)
73050+{
73051+ return sizeof(struct gr_arg_wrapper_compat);
73052+}
73053+
73054diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73055new file mode 100644
73056index 0000000..8ee8e4f
73057--- /dev/null
73058+++ b/grsecurity/gracl_fs.c
73059@@ -0,0 +1,447 @@
73060+#include <linux/kernel.h>
73061+#include <linux/sched.h>
73062+#include <linux/types.h>
73063+#include <linux/fs.h>
73064+#include <linux/file.h>
73065+#include <linux/stat.h>
73066+#include <linux/grsecurity.h>
73067+#include <linux/grinternal.h>
73068+#include <linux/gracl.h>
73069+
73070+umode_t
73071+gr_acl_umask(void)
73072+{
73073+ if (unlikely(!gr_acl_is_enabled()))
73074+ return 0;
73075+
73076+ return current->role->umask;
73077+}
73078+
73079+__u32
73080+gr_acl_handle_hidden_file(const struct dentry * dentry,
73081+ const struct vfsmount * mnt)
73082+{
73083+ __u32 mode;
73084+
73085+ if (unlikely(d_is_negative(dentry)))
73086+ return GR_FIND;
73087+
73088+ mode =
73089+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73090+
73091+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73092+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73093+ return mode;
73094+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73095+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73096+ return 0;
73097+ } else if (unlikely(!(mode & GR_FIND)))
73098+ return 0;
73099+
73100+ return GR_FIND;
73101+}
73102+
73103+__u32
73104+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73105+ int acc_mode)
73106+{
73107+ __u32 reqmode = GR_FIND;
73108+ __u32 mode;
73109+
73110+ if (unlikely(d_is_negative(dentry)))
73111+ return reqmode;
73112+
73113+ if (acc_mode & MAY_APPEND)
73114+ reqmode |= GR_APPEND;
73115+ else if (acc_mode & MAY_WRITE)
73116+ reqmode |= GR_WRITE;
73117+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73118+ reqmode |= GR_READ;
73119+
73120+ mode =
73121+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73122+ mnt);
73123+
73124+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73125+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73126+ reqmode & GR_READ ? " reading" : "",
73127+ reqmode & GR_WRITE ? " writing" : reqmode &
73128+ GR_APPEND ? " appending" : "");
73129+ return reqmode;
73130+ } else
73131+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73132+ {
73133+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73134+ reqmode & GR_READ ? " reading" : "",
73135+ reqmode & GR_WRITE ? " writing" : reqmode &
73136+ GR_APPEND ? " appending" : "");
73137+ return 0;
73138+ } else if (unlikely((mode & reqmode) != reqmode))
73139+ return 0;
73140+
73141+ return reqmode;
73142+}
73143+
73144+__u32
73145+gr_acl_handle_creat(const struct dentry * dentry,
73146+ const struct dentry * p_dentry,
73147+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73148+ const int imode)
73149+{
73150+ __u32 reqmode = GR_WRITE | GR_CREATE;
73151+ __u32 mode;
73152+
73153+ if (acc_mode & MAY_APPEND)
73154+ reqmode |= GR_APPEND;
73155+ // if a directory was required or the directory already exists, then
73156+ // don't count this open as a read
73157+ if ((acc_mode & MAY_READ) &&
73158+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73159+ reqmode |= GR_READ;
73160+ if ((open_flags & O_CREAT) &&
73161+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73162+ reqmode |= GR_SETID;
73163+
73164+ mode =
73165+ gr_check_create(dentry, p_dentry, p_mnt,
73166+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73167+
73168+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73169+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73170+ reqmode & GR_READ ? " reading" : "",
73171+ reqmode & GR_WRITE ? " writing" : reqmode &
73172+ GR_APPEND ? " appending" : "");
73173+ return reqmode;
73174+ } else
73175+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73176+ {
73177+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73178+ reqmode & GR_READ ? " reading" : "",
73179+ reqmode & GR_WRITE ? " writing" : reqmode &
73180+ GR_APPEND ? " appending" : "");
73181+ return 0;
73182+ } else if (unlikely((mode & reqmode) != reqmode))
73183+ return 0;
73184+
73185+ return reqmode;
73186+}
73187+
73188+__u32
73189+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73190+ const int fmode)
73191+{
73192+ __u32 mode, reqmode = GR_FIND;
73193+
73194+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73195+ reqmode |= GR_EXEC;
73196+ if (fmode & S_IWOTH)
73197+ reqmode |= GR_WRITE;
73198+ if (fmode & S_IROTH)
73199+ reqmode |= GR_READ;
73200+
73201+ mode =
73202+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73203+ mnt);
73204+
73205+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73206+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73207+ reqmode & GR_READ ? " reading" : "",
73208+ reqmode & GR_WRITE ? " writing" : "",
73209+ reqmode & GR_EXEC ? " executing" : "");
73210+ return reqmode;
73211+ } else
73212+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73213+ {
73214+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73215+ reqmode & GR_READ ? " reading" : "",
73216+ reqmode & GR_WRITE ? " writing" : "",
73217+ reqmode & GR_EXEC ? " executing" : "");
73218+ return 0;
73219+ } else if (unlikely((mode & reqmode) != reqmode))
73220+ return 0;
73221+
73222+ return reqmode;
73223+}
73224+
73225+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73226+{
73227+ __u32 mode;
73228+
73229+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73230+
73231+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73232+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73233+ return mode;
73234+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73235+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73236+ return 0;
73237+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73238+ return 0;
73239+
73240+ return (reqmode);
73241+}
73242+
73243+__u32
73244+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73245+{
73246+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
73247+}
73248+
73249+__u32
73250+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
73251+{
73252+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
73253+}
73254+
73255+__u32
73256+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
73257+{
73258+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
73259+}
73260+
73261+__u32
73262+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
73263+{
73264+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
73265+}
73266+
73267+__u32
73268+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
73269+ umode_t *modeptr)
73270+{
73271+ umode_t mode;
73272+
73273+ *modeptr &= ~gr_acl_umask();
73274+ mode = *modeptr;
73275+
73276+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
73277+ return 1;
73278+
73279+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
73280+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
73281+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
73282+ GR_CHMOD_ACL_MSG);
73283+ } else {
73284+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
73285+ }
73286+}
73287+
73288+__u32
73289+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
73290+{
73291+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
73292+}
73293+
73294+__u32
73295+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73296+{
73297+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73298+}
73299+
73300+__u32
73301+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73302+{
73303+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73304+}
73305+
73306+__u32
73307+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73308+{
73309+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73310+}
73311+
73312+__u32
73313+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73314+{
73315+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73316+ GR_UNIXCONNECT_ACL_MSG);
73317+}
73318+
73319+/* hardlinks require at minimum create and link permission,
73320+ any additional privilege required is based on the
73321+ privilege of the file being linked to
73322+*/
73323+__u32
73324+gr_acl_handle_link(const struct dentry * new_dentry,
73325+ const struct dentry * parent_dentry,
73326+ const struct vfsmount * parent_mnt,
73327+ const struct dentry * old_dentry,
73328+ const struct vfsmount * old_mnt, const struct filename *to)
73329+{
73330+ __u32 mode;
73331+ __u32 needmode = GR_CREATE | GR_LINK;
73332+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73333+
73334+ mode =
73335+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73336+ old_mnt);
73337+
73338+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73339+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73340+ return mode;
73341+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73342+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73343+ return 0;
73344+ } else if (unlikely((mode & needmode) != needmode))
73345+ return 0;
73346+
73347+ return 1;
73348+}
73349+
73350+__u32
73351+gr_acl_handle_symlink(const struct dentry * new_dentry,
73352+ const struct dentry * parent_dentry,
73353+ const struct vfsmount * parent_mnt, const struct filename *from)
73354+{
73355+ __u32 needmode = GR_WRITE | GR_CREATE;
73356+ __u32 mode;
73357+
73358+ mode =
73359+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73360+ GR_CREATE | GR_AUDIT_CREATE |
73361+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73362+
73363+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73364+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73365+ return mode;
73366+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73367+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73368+ return 0;
73369+ } else if (unlikely((mode & needmode) != needmode))
73370+ return 0;
73371+
73372+ return (GR_WRITE | GR_CREATE);
73373+}
73374+
73375+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73376+{
73377+ __u32 mode;
73378+
73379+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73380+
73381+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73382+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73383+ return mode;
73384+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73385+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73386+ return 0;
73387+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73388+ return 0;
73389+
73390+ return (reqmode);
73391+}
73392+
73393+__u32
73394+gr_acl_handle_mknod(const struct dentry * new_dentry,
73395+ const struct dentry * parent_dentry,
73396+ const struct vfsmount * parent_mnt,
73397+ const int mode)
73398+{
73399+ __u32 reqmode = GR_WRITE | GR_CREATE;
73400+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73401+ reqmode |= GR_SETID;
73402+
73403+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73404+ reqmode, GR_MKNOD_ACL_MSG);
73405+}
73406+
73407+__u32
73408+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73409+ const struct dentry *parent_dentry,
73410+ const struct vfsmount *parent_mnt)
73411+{
73412+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73413+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73414+}
73415+
73416+#define RENAME_CHECK_SUCCESS(old, new) \
73417+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73418+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73419+
73420+int
73421+gr_acl_handle_rename(struct dentry *new_dentry,
73422+ struct dentry *parent_dentry,
73423+ const struct vfsmount *parent_mnt,
73424+ struct dentry *old_dentry,
73425+ struct inode *old_parent_inode,
73426+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73427+{
73428+ __u32 comp1, comp2;
73429+ int error = 0;
73430+
73431+ if (unlikely(!gr_acl_is_enabled()))
73432+ return 0;
73433+
73434+ if (flags & RENAME_EXCHANGE) {
73435+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73436+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73437+ GR_SUPPRESS, parent_mnt);
73438+ comp2 =
73439+ gr_search_file(old_dentry,
73440+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73441+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73442+ } else if (d_is_negative(new_dentry)) {
73443+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73444+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73445+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73446+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73447+ GR_DELETE | GR_AUDIT_DELETE |
73448+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73449+ GR_SUPPRESS, old_mnt);
73450+ } else {
73451+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73452+ GR_CREATE | GR_DELETE |
73453+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73454+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73455+ GR_SUPPRESS, parent_mnt);
73456+ comp2 =
73457+ gr_search_file(old_dentry,
73458+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73459+ GR_DELETE | GR_AUDIT_DELETE |
73460+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73461+ }
73462+
73463+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73464+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73465+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73466+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73467+ && !(comp2 & GR_SUPPRESS)) {
73468+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73469+ error = -EACCES;
73470+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73471+ error = -EACCES;
73472+
73473+ return error;
73474+}
73475+
73476+void
73477+gr_acl_handle_exit(void)
73478+{
73479+ u16 id;
73480+ char *rolename;
73481+
73482+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73483+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73484+ id = current->acl_role_id;
73485+ rolename = current->role->rolename;
73486+ gr_set_acls(1);
73487+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73488+ }
73489+
73490+ gr_put_exec_file(current);
73491+ return;
73492+}
73493+
73494+int
73495+gr_acl_handle_procpidmem(const struct task_struct *task)
73496+{
73497+ if (unlikely(!gr_acl_is_enabled()))
73498+ return 0;
73499+
73500+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
73501+ !(current->acl->mode & GR_POVERRIDE) &&
73502+ !(current->role->roletype & GR_ROLE_GOD))
73503+ return -EACCES;
73504+
73505+ return 0;
73506+}
73507diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73508new file mode 100644
73509index 0000000..f056b81
73510--- /dev/null
73511+++ b/grsecurity/gracl_ip.c
73512@@ -0,0 +1,386 @@
73513+#include <linux/kernel.h>
73514+#include <asm/uaccess.h>
73515+#include <asm/errno.h>
73516+#include <net/sock.h>
73517+#include <linux/file.h>
73518+#include <linux/fs.h>
73519+#include <linux/net.h>
73520+#include <linux/in.h>
73521+#include <linux/skbuff.h>
73522+#include <linux/ip.h>
73523+#include <linux/udp.h>
73524+#include <linux/types.h>
73525+#include <linux/sched.h>
73526+#include <linux/netdevice.h>
73527+#include <linux/inetdevice.h>
73528+#include <linux/gracl.h>
73529+#include <linux/grsecurity.h>
73530+#include <linux/grinternal.h>
73531+
73532+#define GR_BIND 0x01
73533+#define GR_CONNECT 0x02
73534+#define GR_INVERT 0x04
73535+#define GR_BINDOVERRIDE 0x08
73536+#define GR_CONNECTOVERRIDE 0x10
73537+#define GR_SOCK_FAMILY 0x20
73538+
73539+static const char * gr_protocols[IPPROTO_MAX] = {
73540+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73541+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73542+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73543+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73544+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73545+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73546+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73547+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73548+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73549+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73550+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73551+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73552+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73553+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73554+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73555+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73556+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73557+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73558+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73559+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73560+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73561+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73562+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73563+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73564+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73565+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73566+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73567+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73568+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73569+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73570+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73571+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73572+ };
73573+
73574+static const char * gr_socktypes[SOCK_MAX] = {
73575+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73576+ "unknown:7", "unknown:8", "unknown:9", "packet"
73577+ };
73578+
73579+static const char * gr_sockfamilies[AF_MAX+1] = {
73580+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73581+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73582+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73583+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73584+ };
73585+
73586+const char *
73587+gr_proto_to_name(unsigned char proto)
73588+{
73589+ return gr_protocols[proto];
73590+}
73591+
73592+const char *
73593+gr_socktype_to_name(unsigned char type)
73594+{
73595+ return gr_socktypes[type];
73596+}
73597+
73598+const char *
73599+gr_sockfamily_to_name(unsigned char family)
73600+{
73601+ return gr_sockfamilies[family];
73602+}
73603+
73604+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73605+
73606+int
73607+gr_search_socket(const int domain, const int type, const int protocol)
73608+{
73609+ struct acl_subject_label *curr;
73610+ const struct cred *cred = current_cred();
73611+
73612+ if (unlikely(!gr_acl_is_enabled()))
73613+ goto exit;
73614+
73615+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73616+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73617+ goto exit; // let the kernel handle it
73618+
73619+ curr = current->acl;
73620+
73621+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73622+ /* the family is allowed, if this is PF_INET allow it only if
73623+ the extra sock type/protocol checks pass */
73624+ if (domain == PF_INET)
73625+ goto inet_check;
73626+ goto exit;
73627+ } else {
73628+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73629+ __u32 fakeip = 0;
73630+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73631+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73632+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73633+ gr_to_filename(current->exec_file->f_path.dentry,
73634+ current->exec_file->f_path.mnt) :
73635+ curr->filename, curr->filename,
73636+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73637+ &current->signal->saved_ip);
73638+ goto exit;
73639+ }
73640+ goto exit_fail;
73641+ }
73642+
73643+inet_check:
73644+ /* the rest of this checking is for IPv4 only */
73645+ if (!curr->ips)
73646+ goto exit;
73647+
73648+ if ((curr->ip_type & (1U << type)) &&
73649+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73650+ goto exit;
73651+
73652+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73653+ /* we don't place acls on raw sockets , and sometimes
73654+ dgram/ip sockets are opened for ioctl and not
73655+ bind/connect, so we'll fake a bind learn log */
73656+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73657+ __u32 fakeip = 0;
73658+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73659+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73660+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73661+ gr_to_filename(current->exec_file->f_path.dentry,
73662+ current->exec_file->f_path.mnt) :
73663+ curr->filename, curr->filename,
73664+ &fakeip, 0, type,
73665+ protocol, GR_CONNECT, &current->signal->saved_ip);
73666+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73667+ __u32 fakeip = 0;
73668+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73669+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73670+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73671+ gr_to_filename(current->exec_file->f_path.dentry,
73672+ current->exec_file->f_path.mnt) :
73673+ curr->filename, curr->filename,
73674+ &fakeip, 0, type,
73675+ protocol, GR_BIND, &current->signal->saved_ip);
73676+ }
73677+ /* we'll log when they use connect or bind */
73678+ goto exit;
73679+ }
73680+
73681+exit_fail:
73682+ if (domain == PF_INET)
73683+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73684+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73685+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73686+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73687+ gr_socktype_to_name(type), protocol);
73688+
73689+ return 0;
73690+exit:
73691+ return 1;
73692+}
73693+
73694+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73695+{
73696+ if ((ip->mode & mode) &&
73697+ (ip_port >= ip->low) &&
73698+ (ip_port <= ip->high) &&
73699+ ((ntohl(ip_addr) & our_netmask) ==
73700+ (ntohl(our_addr) & our_netmask))
73701+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73702+ && (ip->type & (1U << type))) {
73703+ if (ip->mode & GR_INVERT)
73704+ return 2; // specifically denied
73705+ else
73706+ return 1; // allowed
73707+ }
73708+
73709+ return 0; // not specifically allowed, may continue parsing
73710+}
73711+
73712+static int
73713+gr_search_connectbind(const int full_mode, struct sock *sk,
73714+ struct sockaddr_in *addr, const int type)
73715+{
73716+ char iface[IFNAMSIZ] = {0};
73717+ struct acl_subject_label *curr;
73718+ struct acl_ip_label *ip;
73719+ struct inet_sock *isk;
73720+ struct net_device *dev;
73721+ struct in_device *idev;
73722+ unsigned long i;
73723+ int ret;
73724+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73725+ __u32 ip_addr = 0;
73726+ __u32 our_addr;
73727+ __u32 our_netmask;
73728+ char *p;
73729+ __u16 ip_port = 0;
73730+ const struct cred *cred = current_cred();
73731+
73732+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73733+ return 0;
73734+
73735+ curr = current->acl;
73736+ isk = inet_sk(sk);
73737+
73738+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73739+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73740+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73741+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73742+ struct sockaddr_in saddr;
73743+ int err;
73744+
73745+ saddr.sin_family = AF_INET;
73746+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73747+ saddr.sin_port = isk->inet_sport;
73748+
73749+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73750+ if (err)
73751+ return err;
73752+
73753+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73754+ if (err)
73755+ return err;
73756+ }
73757+
73758+ if (!curr->ips)
73759+ return 0;
73760+
73761+ ip_addr = addr->sin_addr.s_addr;
73762+ ip_port = ntohs(addr->sin_port);
73763+
73764+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73765+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73766+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73767+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73768+ gr_to_filename(current->exec_file->f_path.dentry,
73769+ current->exec_file->f_path.mnt) :
73770+ curr->filename, curr->filename,
73771+ &ip_addr, ip_port, type,
73772+ sk->sk_protocol, mode, &current->signal->saved_ip);
73773+ return 0;
73774+ }
73775+
73776+ for (i = 0; i < curr->ip_num; i++) {
73777+ ip = *(curr->ips + i);
73778+ if (ip->iface != NULL) {
73779+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73780+ p = strchr(iface, ':');
73781+ if (p != NULL)
73782+ *p = '\0';
73783+ dev = dev_get_by_name(sock_net(sk), iface);
73784+ if (dev == NULL)
73785+ continue;
73786+ idev = in_dev_get(dev);
73787+ if (idev == NULL) {
73788+ dev_put(dev);
73789+ continue;
73790+ }
73791+ rcu_read_lock();
73792+ for_ifa(idev) {
73793+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73794+ our_addr = ifa->ifa_address;
73795+ our_netmask = 0xffffffff;
73796+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73797+ if (ret == 1) {
73798+ rcu_read_unlock();
73799+ in_dev_put(idev);
73800+ dev_put(dev);
73801+ return 0;
73802+ } else if (ret == 2) {
73803+ rcu_read_unlock();
73804+ in_dev_put(idev);
73805+ dev_put(dev);
73806+ goto denied;
73807+ }
73808+ }
73809+ } endfor_ifa(idev);
73810+ rcu_read_unlock();
73811+ in_dev_put(idev);
73812+ dev_put(dev);
73813+ } else {
73814+ our_addr = ip->addr;
73815+ our_netmask = ip->netmask;
73816+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73817+ if (ret == 1)
73818+ return 0;
73819+ else if (ret == 2)
73820+ goto denied;
73821+ }
73822+ }
73823+
73824+denied:
73825+ if (mode == GR_BIND)
73826+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73827+ else if (mode == GR_CONNECT)
73828+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73829+
73830+ return -EACCES;
73831+}
73832+
73833+int
73834+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73835+{
73836+ /* always allow disconnection of dgram sockets with connect */
73837+ if (addr->sin_family == AF_UNSPEC)
73838+ return 0;
73839+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73840+}
73841+
73842+int
73843+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73844+{
73845+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73846+}
73847+
73848+int gr_search_listen(struct socket *sock)
73849+{
73850+ struct sock *sk = sock->sk;
73851+ struct sockaddr_in addr;
73852+
73853+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73854+ addr.sin_port = inet_sk(sk)->inet_sport;
73855+
73856+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73857+}
73858+
73859+int gr_search_accept(struct socket *sock)
73860+{
73861+ struct sock *sk = sock->sk;
73862+ struct sockaddr_in addr;
73863+
73864+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73865+ addr.sin_port = inet_sk(sk)->inet_sport;
73866+
73867+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73868+}
73869+
73870+int
73871+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73872+{
73873+ if (addr)
73874+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73875+ else {
73876+ struct sockaddr_in sin;
73877+ const struct inet_sock *inet = inet_sk(sk);
73878+
73879+ sin.sin_addr.s_addr = inet->inet_daddr;
73880+ sin.sin_port = inet->inet_dport;
73881+
73882+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73883+ }
73884+}
73885+
73886+int
73887+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73888+{
73889+ struct sockaddr_in sin;
73890+
73891+ if (unlikely(skb->len < sizeof (struct udphdr)))
73892+ return 0; // skip this packet
73893+
73894+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73895+ sin.sin_port = udp_hdr(skb)->source;
73896+
73897+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73898+}
73899diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73900new file mode 100644
73901index 0000000..25f54ef
73902--- /dev/null
73903+++ b/grsecurity/gracl_learn.c
73904@@ -0,0 +1,207 @@
73905+#include <linux/kernel.h>
73906+#include <linux/mm.h>
73907+#include <linux/sched.h>
73908+#include <linux/poll.h>
73909+#include <linux/string.h>
73910+#include <linux/file.h>
73911+#include <linux/types.h>
73912+#include <linux/vmalloc.h>
73913+#include <linux/grinternal.h>
73914+
73915+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73916+ size_t count, loff_t *ppos);
73917+extern int gr_acl_is_enabled(void);
73918+
73919+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73920+static int gr_learn_attached;
73921+
73922+/* use a 512k buffer */
73923+#define LEARN_BUFFER_SIZE (512 * 1024)
73924+
73925+static DEFINE_SPINLOCK(gr_learn_lock);
73926+static DEFINE_MUTEX(gr_learn_user_mutex);
73927+
73928+/* we need to maintain two buffers, so that the kernel context of grlearn
73929+ uses a semaphore around the userspace copying, and the other kernel contexts
73930+ use a spinlock when copying into the buffer, since they cannot sleep
73931+*/
73932+static char *learn_buffer;
73933+static char *learn_buffer_user;
73934+static int learn_buffer_len;
73935+static int learn_buffer_user_len;
73936+
73937+static ssize_t
73938+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73939+{
73940+ DECLARE_WAITQUEUE(wait, current);
73941+ ssize_t retval = 0;
73942+
73943+ add_wait_queue(&learn_wait, &wait);
73944+ set_current_state(TASK_INTERRUPTIBLE);
73945+ do {
73946+ mutex_lock(&gr_learn_user_mutex);
73947+ spin_lock(&gr_learn_lock);
73948+ if (learn_buffer_len)
73949+ break;
73950+ spin_unlock(&gr_learn_lock);
73951+ mutex_unlock(&gr_learn_user_mutex);
73952+ if (file->f_flags & O_NONBLOCK) {
73953+ retval = -EAGAIN;
73954+ goto out;
73955+ }
73956+ if (signal_pending(current)) {
73957+ retval = -ERESTARTSYS;
73958+ goto out;
73959+ }
73960+
73961+ schedule();
73962+ } while (1);
73963+
73964+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73965+ learn_buffer_user_len = learn_buffer_len;
73966+ retval = learn_buffer_len;
73967+ learn_buffer_len = 0;
73968+
73969+ spin_unlock(&gr_learn_lock);
73970+
73971+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73972+ retval = -EFAULT;
73973+
73974+ mutex_unlock(&gr_learn_user_mutex);
73975+out:
73976+ set_current_state(TASK_RUNNING);
73977+ remove_wait_queue(&learn_wait, &wait);
73978+ return retval;
73979+}
73980+
73981+static unsigned int
73982+poll_learn(struct file * file, poll_table * wait)
73983+{
73984+ poll_wait(file, &learn_wait, wait);
73985+
73986+ if (learn_buffer_len)
73987+ return (POLLIN | POLLRDNORM);
73988+
73989+ return 0;
73990+}
73991+
73992+void
73993+gr_clear_learn_entries(void)
73994+{
73995+ char *tmp;
73996+
73997+ mutex_lock(&gr_learn_user_mutex);
73998+ spin_lock(&gr_learn_lock);
73999+ tmp = learn_buffer;
74000+ learn_buffer = NULL;
74001+ spin_unlock(&gr_learn_lock);
74002+ if (tmp)
74003+ vfree(tmp);
74004+ if (learn_buffer_user != NULL) {
74005+ vfree(learn_buffer_user);
74006+ learn_buffer_user = NULL;
74007+ }
74008+ learn_buffer_len = 0;
74009+ mutex_unlock(&gr_learn_user_mutex);
74010+
74011+ return;
74012+}
74013+
74014+void
74015+gr_add_learn_entry(const char *fmt, ...)
74016+{
74017+ va_list args;
74018+ unsigned int len;
74019+
74020+ if (!gr_learn_attached)
74021+ return;
74022+
74023+ spin_lock(&gr_learn_lock);
74024+
74025+ /* leave a gap at the end so we know when it's "full" but don't have to
74026+ compute the exact length of the string we're trying to append
74027+ */
74028+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74029+ spin_unlock(&gr_learn_lock);
74030+ wake_up_interruptible(&learn_wait);
74031+ return;
74032+ }
74033+ if (learn_buffer == NULL) {
74034+ spin_unlock(&gr_learn_lock);
74035+ return;
74036+ }
74037+
74038+ va_start(args, fmt);
74039+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74040+ va_end(args);
74041+
74042+ learn_buffer_len += len + 1;
74043+
74044+ spin_unlock(&gr_learn_lock);
74045+ wake_up_interruptible(&learn_wait);
74046+
74047+ return;
74048+}
74049+
74050+static int
74051+open_learn(struct inode *inode, struct file *file)
74052+{
74053+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74054+ return -EBUSY;
74055+ if (file->f_mode & FMODE_READ) {
74056+ int retval = 0;
74057+ mutex_lock(&gr_learn_user_mutex);
74058+ if (learn_buffer == NULL)
74059+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74060+ if (learn_buffer_user == NULL)
74061+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74062+ if (learn_buffer == NULL) {
74063+ retval = -ENOMEM;
74064+ goto out_error;
74065+ }
74066+ if (learn_buffer_user == NULL) {
74067+ retval = -ENOMEM;
74068+ goto out_error;
74069+ }
74070+ learn_buffer_len = 0;
74071+ learn_buffer_user_len = 0;
74072+ gr_learn_attached = 1;
74073+out_error:
74074+ mutex_unlock(&gr_learn_user_mutex);
74075+ return retval;
74076+ }
74077+ return 0;
74078+}
74079+
74080+static int
74081+close_learn(struct inode *inode, struct file *file)
74082+{
74083+ if (file->f_mode & FMODE_READ) {
74084+ char *tmp = NULL;
74085+ mutex_lock(&gr_learn_user_mutex);
74086+ spin_lock(&gr_learn_lock);
74087+ tmp = learn_buffer;
74088+ learn_buffer = NULL;
74089+ spin_unlock(&gr_learn_lock);
74090+ if (tmp)
74091+ vfree(tmp);
74092+ if (learn_buffer_user != NULL) {
74093+ vfree(learn_buffer_user);
74094+ learn_buffer_user = NULL;
74095+ }
74096+ learn_buffer_len = 0;
74097+ learn_buffer_user_len = 0;
74098+ gr_learn_attached = 0;
74099+ mutex_unlock(&gr_learn_user_mutex);
74100+ }
74101+
74102+ return 0;
74103+}
74104+
74105+const struct file_operations grsec_fops = {
74106+ .read = read_learn,
74107+ .write = write_grsec_handler,
74108+ .open = open_learn,
74109+ .release = close_learn,
74110+ .poll = poll_learn,
74111+};
74112diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74113new file mode 100644
74114index 0000000..fd26052
74115--- /dev/null
74116+++ b/grsecurity/gracl_policy.c
74117@@ -0,0 +1,1781 @@
74118+#include <linux/kernel.h>
74119+#include <linux/module.h>
74120+#include <linux/sched.h>
74121+#include <linux/mm.h>
74122+#include <linux/file.h>
74123+#include <linux/fs.h>
74124+#include <linux/namei.h>
74125+#include <linux/mount.h>
74126+#include <linux/tty.h>
74127+#include <linux/proc_fs.h>
74128+#include <linux/lglock.h>
74129+#include <linux/slab.h>
74130+#include <linux/vmalloc.h>
74131+#include <linux/types.h>
74132+#include <linux/sysctl.h>
74133+#include <linux/netdevice.h>
74134+#include <linux/ptrace.h>
74135+#include <linux/gracl.h>
74136+#include <linux/gralloc.h>
74137+#include <linux/security.h>
74138+#include <linux/grinternal.h>
74139+#include <linux/pid_namespace.h>
74140+#include <linux/stop_machine.h>
74141+#include <linux/fdtable.h>
74142+#include <linux/percpu.h>
74143+#include <linux/lglock.h>
74144+#include <linux/hugetlb.h>
74145+#include <linux/posix-timers.h>
74146+#include "../fs/mount.h"
74147+
74148+#include <asm/uaccess.h>
74149+#include <asm/errno.h>
74150+#include <asm/mman.h>
74151+
74152+extern struct gr_policy_state *polstate;
74153+
74154+#define FOR_EACH_ROLE_START(role) \
74155+ role = polstate->role_list; \
74156+ while (role) {
74157+
74158+#define FOR_EACH_ROLE_END(role) \
74159+ role = role->prev; \
74160+ }
74161+
74162+struct path gr_real_root;
74163+
74164+extern struct gr_alloc_state *current_alloc_state;
74165+
74166+u16 acl_sp_role_value;
74167+
74168+static DEFINE_MUTEX(gr_dev_mutex);
74169+
74170+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74171+extern void gr_clear_learn_entries(void);
74172+
74173+struct gr_arg *gr_usermode __read_only;
74174+unsigned char *gr_system_salt __read_only;
74175+unsigned char *gr_system_sum __read_only;
74176+
74177+static unsigned int gr_auth_attempts = 0;
74178+static unsigned long gr_auth_expires = 0UL;
74179+
74180+struct acl_object_label *fakefs_obj_rw;
74181+struct acl_object_label *fakefs_obj_rwx;
74182+
74183+extern int gr_init_uidset(void);
74184+extern void gr_free_uidset(void);
74185+extern void gr_remove_uid(uid_t uid);
74186+extern int gr_find_uid(uid_t uid);
74187+
74188+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
74189+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74190+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74191+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74192+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74193+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74194+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74195+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74196+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74197+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
74198+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
74199+extern void assign_special_role(const char *rolename);
74200+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74201+extern int gr_rbac_disable(void *unused);
74202+extern void gr_enable_rbac_system(void);
74203+
74204+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74205+{
74206+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74207+ return -EFAULT;
74208+
74209+ return 0;
74210+}
74211+
74212+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74213+{
74214+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74215+ return -EFAULT;
74216+
74217+ return 0;
74218+}
74219+
74220+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74221+{
74222+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74223+ return -EFAULT;
74224+
74225+ return 0;
74226+}
74227+
74228+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74229+{
74230+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74231+ return -EFAULT;
74232+
74233+ return 0;
74234+}
74235+
74236+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74237+{
74238+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74239+ return -EFAULT;
74240+
74241+ return 0;
74242+}
74243+
74244+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74245+{
74246+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74247+ return -EFAULT;
74248+
74249+ return 0;
74250+}
74251+
74252+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74253+{
74254+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
74255+ return -EFAULT;
74256+
74257+ return 0;
74258+}
74259+
74260+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
74261+{
74262+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
74263+ return -EFAULT;
74264+
74265+ return 0;
74266+}
74267+
74268+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
74269+{
74270+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
74271+ return -EFAULT;
74272+
74273+ return 0;
74274+}
74275+
74276+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
74277+{
74278+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
74279+ return -EFAULT;
74280+
74281+ if ((uwrap->version != GRSECURITY_VERSION) ||
74282+ (uwrap->size != sizeof(struct gr_arg)))
74283+ return -EINVAL;
74284+
74285+ return 0;
74286+}
74287+
74288+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
74289+{
74290+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
74291+ return -EFAULT;
74292+
74293+ return 0;
74294+}
74295+
74296+static size_t get_gr_arg_wrapper_size_normal(void)
74297+{
74298+ return sizeof(struct gr_arg_wrapper);
74299+}
74300+
74301+#ifdef CONFIG_COMPAT
74302+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74303+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74304+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74305+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74306+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74307+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74308+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74309+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74310+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74311+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74312+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74313+extern size_t get_gr_arg_wrapper_size_compat(void);
74314+
74315+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74316+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74317+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74318+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74319+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74320+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74321+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74322+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74323+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74324+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74325+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74326+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74327+
74328+#else
74329+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74330+#define copy_gr_arg copy_gr_arg_normal
74331+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74332+#define copy_acl_object_label copy_acl_object_label_normal
74333+#define copy_acl_subject_label copy_acl_subject_label_normal
74334+#define copy_acl_role_label copy_acl_role_label_normal
74335+#define copy_acl_ip_label copy_acl_ip_label_normal
74336+#define copy_pointer_from_array copy_pointer_from_array_normal
74337+#define copy_sprole_pw copy_sprole_pw_normal
74338+#define copy_role_transition copy_role_transition_normal
74339+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74340+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74341+#endif
74342+
74343+static struct acl_subject_label *
74344+lookup_subject_map(const struct acl_subject_label *userp)
74345+{
74346+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74347+ struct subject_map *match;
74348+
74349+ match = polstate->subj_map_set.s_hash[index];
74350+
74351+ while (match && match->user != userp)
74352+ match = match->next;
74353+
74354+ if (match != NULL)
74355+ return match->kernel;
74356+ else
74357+ return NULL;
74358+}
74359+
74360+static void
74361+insert_subj_map_entry(struct subject_map *subjmap)
74362+{
74363+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74364+ struct subject_map **curr;
74365+
74366+ subjmap->prev = NULL;
74367+
74368+ curr = &polstate->subj_map_set.s_hash[index];
74369+ if (*curr != NULL)
74370+ (*curr)->prev = subjmap;
74371+
74372+ subjmap->next = *curr;
74373+ *curr = subjmap;
74374+
74375+ return;
74376+}
74377+
74378+static void
74379+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74380+{
74381+ unsigned int index =
74382+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74383+ struct acl_role_label **curr;
74384+ struct acl_role_label *tmp, *tmp2;
74385+
74386+ curr = &polstate->acl_role_set.r_hash[index];
74387+
74388+ /* simple case, slot is empty, just set it to our role */
74389+ if (*curr == NULL) {
74390+ *curr = role;
74391+ } else {
74392+ /* example:
74393+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74394+ 2 -> 3
74395+ */
74396+ /* first check to see if we can already be reached via this slot */
74397+ tmp = *curr;
74398+ while (tmp && tmp != role)
74399+ tmp = tmp->next;
74400+ if (tmp == role) {
74401+ /* we don't need to add ourselves to this slot's chain */
74402+ return;
74403+ }
74404+ /* we need to add ourselves to this chain, two cases */
74405+ if (role->next == NULL) {
74406+ /* simple case, append the current chain to our role */
74407+ role->next = *curr;
74408+ *curr = role;
74409+ } else {
74410+ /* 1 -> 2 -> 3 -> 4
74411+ 2 -> 3 -> 4
74412+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74413+ */
74414+ /* trickier case: walk our role's chain until we find
74415+ the role for the start of the current slot's chain */
74416+ tmp = role;
74417+ tmp2 = *curr;
74418+ while (tmp->next && tmp->next != tmp2)
74419+ tmp = tmp->next;
74420+ if (tmp->next == tmp2) {
74421+ /* from example above, we found 3, so just
74422+ replace this slot's chain with ours */
74423+ *curr = role;
74424+ } else {
74425+ /* we didn't find a subset of our role's chain
74426+ in the current slot's chain, so append their
74427+ chain to ours, and set us as the first role in
74428+ the slot's chain
74429+
74430+ we could fold this case with the case above,
74431+ but making it explicit for clarity
74432+ */
74433+ tmp->next = tmp2;
74434+ *curr = role;
74435+ }
74436+ }
74437+ }
74438+
74439+ return;
74440+}
74441+
74442+static void
74443+insert_acl_role_label(struct acl_role_label *role)
74444+{
74445+ int i;
74446+
74447+ if (polstate->role_list == NULL) {
74448+ polstate->role_list = role;
74449+ role->prev = NULL;
74450+ } else {
74451+ role->prev = polstate->role_list;
74452+ polstate->role_list = role;
74453+ }
74454+
74455+ /* used for hash chains */
74456+ role->next = NULL;
74457+
74458+ if (role->roletype & GR_ROLE_DOMAIN) {
74459+ for (i = 0; i < role->domain_child_num; i++)
74460+ __insert_acl_role_label(role, role->domain_children[i]);
74461+ } else
74462+ __insert_acl_role_label(role, role->uidgid);
74463+}
74464+
74465+static int
74466+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
74467+{
74468+ struct name_entry **curr, *nentry;
74469+ struct inodev_entry *ientry;
74470+ unsigned int len = strlen(name);
74471+ unsigned int key = full_name_hash(name, len);
74472+ unsigned int index = key % polstate->name_set.n_size;
74473+
74474+ curr = &polstate->name_set.n_hash[index];
74475+
74476+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74477+ curr = &((*curr)->next);
74478+
74479+ if (*curr != NULL)
74480+ return 1;
74481+
74482+ nentry = acl_alloc(sizeof (struct name_entry));
74483+ if (nentry == NULL)
74484+ return 0;
74485+ ientry = acl_alloc(sizeof (struct inodev_entry));
74486+ if (ientry == NULL)
74487+ return 0;
74488+ ientry->nentry = nentry;
74489+
74490+ nentry->key = key;
74491+ nentry->name = name;
74492+ nentry->inode = inode;
74493+ nentry->device = device;
74494+ nentry->len = len;
74495+ nentry->deleted = deleted;
74496+
74497+ nentry->prev = NULL;
74498+ curr = &polstate->name_set.n_hash[index];
74499+ if (*curr != NULL)
74500+ (*curr)->prev = nentry;
74501+ nentry->next = *curr;
74502+ *curr = nentry;
74503+
74504+ /* insert us into the table searchable by inode/dev */
74505+ __insert_inodev_entry(polstate, ientry);
74506+
74507+ return 1;
74508+}
74509+
74510+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74511+
74512+static void *
74513+create_table(__u32 * len, int elementsize)
74514+{
74515+ unsigned int table_sizes[] = {
74516+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74517+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74518+ 4194301, 8388593, 16777213, 33554393, 67108859
74519+ };
74520+ void *newtable = NULL;
74521+ unsigned int pwr = 0;
74522+
74523+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74524+ table_sizes[pwr] <= *len)
74525+ pwr++;
74526+
74527+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74528+ return newtable;
74529+
74530+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74531+ newtable =
74532+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74533+ else
74534+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74535+
74536+ *len = table_sizes[pwr];
74537+
74538+ return newtable;
74539+}
74540+
74541+static int
74542+init_variables(const struct gr_arg *arg, bool reload)
74543+{
74544+ struct task_struct *reaper = init_pid_ns.child_reaper;
74545+ unsigned int stacksize;
74546+
74547+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74548+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74549+ polstate->name_set.n_size = arg->role_db.num_objects;
74550+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74551+
74552+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74553+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74554+ return 1;
74555+
74556+ if (!reload) {
74557+ if (!gr_init_uidset())
74558+ return 1;
74559+ }
74560+
74561+ /* set up the stack that holds allocation info */
74562+
74563+ stacksize = arg->role_db.num_pointers + 5;
74564+
74565+ if (!acl_alloc_stack_init(stacksize))
74566+ return 1;
74567+
74568+ if (!reload) {
74569+ /* grab reference for the real root dentry and vfsmount */
74570+ get_fs_root(reaper->fs, &gr_real_root);
74571+
74572+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74573+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74574+#endif
74575+
74576+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74577+ if (fakefs_obj_rw == NULL)
74578+ return 1;
74579+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74580+
74581+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74582+ if (fakefs_obj_rwx == NULL)
74583+ return 1;
74584+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74585+ }
74586+
74587+ polstate->subj_map_set.s_hash =
74588+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74589+ polstate->acl_role_set.r_hash =
74590+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74591+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74592+ polstate->inodev_set.i_hash =
74593+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74594+
74595+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74596+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74597+ return 1;
74598+
74599+ memset(polstate->subj_map_set.s_hash, 0,
74600+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74601+ memset(polstate->acl_role_set.r_hash, 0,
74602+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74603+ memset(polstate->name_set.n_hash, 0,
74604+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74605+ memset(polstate->inodev_set.i_hash, 0,
74606+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74607+
74608+ return 0;
74609+}
74610+
74611+/* free information not needed after startup
74612+ currently contains user->kernel pointer mappings for subjects
74613+*/
74614+
74615+static void
74616+free_init_variables(void)
74617+{
74618+ __u32 i;
74619+
74620+ if (polstate->subj_map_set.s_hash) {
74621+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74622+ if (polstate->subj_map_set.s_hash[i]) {
74623+ kfree(polstate->subj_map_set.s_hash[i]);
74624+ polstate->subj_map_set.s_hash[i] = NULL;
74625+ }
74626+ }
74627+
74628+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74629+ PAGE_SIZE)
74630+ kfree(polstate->subj_map_set.s_hash);
74631+ else
74632+ vfree(polstate->subj_map_set.s_hash);
74633+ }
74634+
74635+ return;
74636+}
74637+
74638+static void
74639+free_variables(bool reload)
74640+{
74641+ struct acl_subject_label *s;
74642+ struct acl_role_label *r;
74643+ struct task_struct *task, *task2;
74644+ unsigned int x;
74645+
74646+ if (!reload) {
74647+ gr_clear_learn_entries();
74648+
74649+ read_lock(&tasklist_lock);
74650+ do_each_thread(task2, task) {
74651+ task->acl_sp_role = 0;
74652+ task->acl_role_id = 0;
74653+ task->inherited = 0;
74654+ task->acl = NULL;
74655+ task->role = NULL;
74656+ } while_each_thread(task2, task);
74657+ read_unlock(&tasklist_lock);
74658+
74659+ kfree(fakefs_obj_rw);
74660+ fakefs_obj_rw = NULL;
74661+ kfree(fakefs_obj_rwx);
74662+ fakefs_obj_rwx = NULL;
74663+
74664+ /* release the reference to the real root dentry and vfsmount */
74665+ path_put(&gr_real_root);
74666+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74667+ }
74668+
74669+ /* free all object hash tables */
74670+
74671+ FOR_EACH_ROLE_START(r)
74672+ if (r->subj_hash == NULL)
74673+ goto next_role;
74674+ FOR_EACH_SUBJECT_START(r, s, x)
74675+ if (s->obj_hash == NULL)
74676+ break;
74677+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74678+ kfree(s->obj_hash);
74679+ else
74680+ vfree(s->obj_hash);
74681+ FOR_EACH_SUBJECT_END(s, x)
74682+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74683+ if (s->obj_hash == NULL)
74684+ break;
74685+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74686+ kfree(s->obj_hash);
74687+ else
74688+ vfree(s->obj_hash);
74689+ FOR_EACH_NESTED_SUBJECT_END(s)
74690+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74691+ kfree(r->subj_hash);
74692+ else
74693+ vfree(r->subj_hash);
74694+ r->subj_hash = NULL;
74695+next_role:
74696+ FOR_EACH_ROLE_END(r)
74697+
74698+ acl_free_all();
74699+
74700+ if (polstate->acl_role_set.r_hash) {
74701+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74702+ PAGE_SIZE)
74703+ kfree(polstate->acl_role_set.r_hash);
74704+ else
74705+ vfree(polstate->acl_role_set.r_hash);
74706+ }
74707+ if (polstate->name_set.n_hash) {
74708+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74709+ PAGE_SIZE)
74710+ kfree(polstate->name_set.n_hash);
74711+ else
74712+ vfree(polstate->name_set.n_hash);
74713+ }
74714+
74715+ if (polstate->inodev_set.i_hash) {
74716+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74717+ PAGE_SIZE)
74718+ kfree(polstate->inodev_set.i_hash);
74719+ else
74720+ vfree(polstate->inodev_set.i_hash);
74721+ }
74722+
74723+ if (!reload)
74724+ gr_free_uidset();
74725+
74726+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74727+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74728+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74729+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74730+
74731+ polstate->default_role = NULL;
74732+ polstate->kernel_role = NULL;
74733+ polstate->role_list = NULL;
74734+
74735+ return;
74736+}
74737+
74738+static struct acl_subject_label *
74739+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74740+
74741+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74742+{
74743+ unsigned int len = strnlen_user(*name, maxlen);
74744+ char *tmp;
74745+
74746+ if (!len || len >= maxlen)
74747+ return -EINVAL;
74748+
74749+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74750+ return -ENOMEM;
74751+
74752+ if (copy_from_user(tmp, *name, len))
74753+ return -EFAULT;
74754+
74755+ tmp[len-1] = '\0';
74756+ *name = tmp;
74757+
74758+ return 0;
74759+}
74760+
74761+static int
74762+copy_user_glob(struct acl_object_label *obj)
74763+{
74764+ struct acl_object_label *g_tmp, **guser;
74765+ int error;
74766+
74767+ if (obj->globbed == NULL)
74768+ return 0;
74769+
74770+ guser = &obj->globbed;
74771+ while (*guser) {
74772+ g_tmp = (struct acl_object_label *)
74773+ acl_alloc(sizeof (struct acl_object_label));
74774+ if (g_tmp == NULL)
74775+ return -ENOMEM;
74776+
74777+ if (copy_acl_object_label(g_tmp, *guser))
74778+ return -EFAULT;
74779+
74780+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74781+ if (error)
74782+ return error;
74783+
74784+ *guser = g_tmp;
74785+ guser = &(g_tmp->next);
74786+ }
74787+
74788+ return 0;
74789+}
74790+
74791+static int
74792+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74793+ struct acl_role_label *role)
74794+{
74795+ struct acl_object_label *o_tmp;
74796+ int ret;
74797+
74798+ while (userp) {
74799+ if ((o_tmp = (struct acl_object_label *)
74800+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74801+ return -ENOMEM;
74802+
74803+ if (copy_acl_object_label(o_tmp, userp))
74804+ return -EFAULT;
74805+
74806+ userp = o_tmp->prev;
74807+
74808+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74809+ if (ret)
74810+ return ret;
74811+
74812+ insert_acl_obj_label(o_tmp, subj);
74813+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74814+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74815+ return -ENOMEM;
74816+
74817+ ret = copy_user_glob(o_tmp);
74818+ if (ret)
74819+ return ret;
74820+
74821+ if (o_tmp->nested) {
74822+ int already_copied;
74823+
74824+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74825+ if (IS_ERR(o_tmp->nested))
74826+ return PTR_ERR(o_tmp->nested);
74827+
74828+ /* insert into nested subject list if we haven't copied this one yet
74829+ to prevent duplicate entries */
74830+ if (!already_copied) {
74831+ o_tmp->nested->next = role->hash->first;
74832+ role->hash->first = o_tmp->nested;
74833+ }
74834+ }
74835+ }
74836+
74837+ return 0;
74838+}
74839+
74840+static __u32
74841+count_user_subjs(struct acl_subject_label *userp)
74842+{
74843+ struct acl_subject_label s_tmp;
74844+ __u32 num = 0;
74845+
74846+ while (userp) {
74847+ if (copy_acl_subject_label(&s_tmp, userp))
74848+ break;
74849+
74850+ userp = s_tmp.prev;
74851+ }
74852+
74853+ return num;
74854+}
74855+
74856+static int
74857+copy_user_allowedips(struct acl_role_label *rolep)
74858+{
74859+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74860+
74861+ ruserip = rolep->allowed_ips;
74862+
74863+ while (ruserip) {
74864+ rlast = rtmp;
74865+
74866+ if ((rtmp = (struct role_allowed_ip *)
74867+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74868+ return -ENOMEM;
74869+
74870+ if (copy_role_allowed_ip(rtmp, ruserip))
74871+ return -EFAULT;
74872+
74873+ ruserip = rtmp->prev;
74874+
74875+ if (!rlast) {
74876+ rtmp->prev = NULL;
74877+ rolep->allowed_ips = rtmp;
74878+ } else {
74879+ rlast->next = rtmp;
74880+ rtmp->prev = rlast;
74881+ }
74882+
74883+ if (!ruserip)
74884+ rtmp->next = NULL;
74885+ }
74886+
74887+ return 0;
74888+}
74889+
74890+static int
74891+copy_user_transitions(struct acl_role_label *rolep)
74892+{
74893+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74894+ int error;
74895+
74896+ rusertp = rolep->transitions;
74897+
74898+ while (rusertp) {
74899+ rlast = rtmp;
74900+
74901+ if ((rtmp = (struct role_transition *)
74902+ acl_alloc(sizeof (struct role_transition))) == NULL)
74903+ return -ENOMEM;
74904+
74905+ if (copy_role_transition(rtmp, rusertp))
74906+ return -EFAULT;
74907+
74908+ rusertp = rtmp->prev;
74909+
74910+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74911+ if (error)
74912+ return error;
74913+
74914+ if (!rlast) {
74915+ rtmp->prev = NULL;
74916+ rolep->transitions = rtmp;
74917+ } else {
74918+ rlast->next = rtmp;
74919+ rtmp->prev = rlast;
74920+ }
74921+
74922+ if (!rusertp)
74923+ rtmp->next = NULL;
74924+ }
74925+
74926+ return 0;
74927+}
74928+
74929+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74930+{
74931+ struct acl_object_label o_tmp;
74932+ __u32 num = 0;
74933+
74934+ while (userp) {
74935+ if (copy_acl_object_label(&o_tmp, userp))
74936+ break;
74937+
74938+ userp = o_tmp.prev;
74939+ num++;
74940+ }
74941+
74942+ return num;
74943+}
74944+
74945+static struct acl_subject_label *
74946+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74947+{
74948+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74949+ __u32 num_objs;
74950+ struct acl_ip_label **i_tmp, *i_utmp2;
74951+ struct gr_hash_struct ghash;
74952+ struct subject_map *subjmap;
74953+ unsigned int i_num;
74954+ int err;
74955+
74956+ if (already_copied != NULL)
74957+ *already_copied = 0;
74958+
74959+ s_tmp = lookup_subject_map(userp);
74960+
74961+ /* we've already copied this subject into the kernel, just return
74962+ the reference to it, and don't copy it over again
74963+ */
74964+ if (s_tmp) {
74965+ if (already_copied != NULL)
74966+ *already_copied = 1;
74967+ return(s_tmp);
74968+ }
74969+
74970+ if ((s_tmp = (struct acl_subject_label *)
74971+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74972+ return ERR_PTR(-ENOMEM);
74973+
74974+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74975+ if (subjmap == NULL)
74976+ return ERR_PTR(-ENOMEM);
74977+
74978+ subjmap->user = userp;
74979+ subjmap->kernel = s_tmp;
74980+ insert_subj_map_entry(subjmap);
74981+
74982+ if (copy_acl_subject_label(s_tmp, userp))
74983+ return ERR_PTR(-EFAULT);
74984+
74985+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74986+ if (err)
74987+ return ERR_PTR(err);
74988+
74989+ if (!strcmp(s_tmp->filename, "/"))
74990+ role->root_label = s_tmp;
74991+
74992+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74993+ return ERR_PTR(-EFAULT);
74994+
74995+ /* copy user and group transition tables */
74996+
74997+ if (s_tmp->user_trans_num) {
74998+ uid_t *uidlist;
74999+
75000+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75001+ if (uidlist == NULL)
75002+ return ERR_PTR(-ENOMEM);
75003+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75004+ return ERR_PTR(-EFAULT);
75005+
75006+ s_tmp->user_transitions = uidlist;
75007+ }
75008+
75009+ if (s_tmp->group_trans_num) {
75010+ gid_t *gidlist;
75011+
75012+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75013+ if (gidlist == NULL)
75014+ return ERR_PTR(-ENOMEM);
75015+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75016+ return ERR_PTR(-EFAULT);
75017+
75018+ s_tmp->group_transitions = gidlist;
75019+ }
75020+
75021+ /* set up object hash table */
75022+ num_objs = count_user_objs(ghash.first);
75023+
75024+ s_tmp->obj_hash_size = num_objs;
75025+ s_tmp->obj_hash =
75026+ (struct acl_object_label **)
75027+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75028+
75029+ if (!s_tmp->obj_hash)
75030+ return ERR_PTR(-ENOMEM);
75031+
75032+ memset(s_tmp->obj_hash, 0,
75033+ s_tmp->obj_hash_size *
75034+ sizeof (struct acl_object_label *));
75035+
75036+ /* add in objects */
75037+ err = copy_user_objs(ghash.first, s_tmp, role);
75038+
75039+ if (err)
75040+ return ERR_PTR(err);
75041+
75042+ /* set pointer for parent subject */
75043+ if (s_tmp->parent_subject) {
75044+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75045+
75046+ if (IS_ERR(s_tmp2))
75047+ return s_tmp2;
75048+
75049+ s_tmp->parent_subject = s_tmp2;
75050+ }
75051+
75052+ /* add in ip acls */
75053+
75054+ if (!s_tmp->ip_num) {
75055+ s_tmp->ips = NULL;
75056+ goto insert;
75057+ }
75058+
75059+ i_tmp =
75060+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75061+ sizeof (struct acl_ip_label *));
75062+
75063+ if (!i_tmp)
75064+ return ERR_PTR(-ENOMEM);
75065+
75066+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75067+ *(i_tmp + i_num) =
75068+ (struct acl_ip_label *)
75069+ acl_alloc(sizeof (struct acl_ip_label));
75070+ if (!*(i_tmp + i_num))
75071+ return ERR_PTR(-ENOMEM);
75072+
75073+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75074+ return ERR_PTR(-EFAULT);
75075+
75076+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75077+ return ERR_PTR(-EFAULT);
75078+
75079+ if ((*(i_tmp + i_num))->iface == NULL)
75080+ continue;
75081+
75082+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75083+ if (err)
75084+ return ERR_PTR(err);
75085+ }
75086+
75087+ s_tmp->ips = i_tmp;
75088+
75089+insert:
75090+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75091+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75092+ return ERR_PTR(-ENOMEM);
75093+
75094+ return s_tmp;
75095+}
75096+
75097+static int
75098+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75099+{
75100+ struct acl_subject_label s_pre;
75101+ struct acl_subject_label * ret;
75102+ int err;
75103+
75104+ while (userp) {
75105+ if (copy_acl_subject_label(&s_pre, userp))
75106+ return -EFAULT;
75107+
75108+ ret = do_copy_user_subj(userp, role, NULL);
75109+
75110+ err = PTR_ERR(ret);
75111+ if (IS_ERR(ret))
75112+ return err;
75113+
75114+ insert_acl_subj_label(ret, role);
75115+
75116+ userp = s_pre.prev;
75117+ }
75118+
75119+ return 0;
75120+}
75121+
75122+static int
75123+copy_user_acl(struct gr_arg *arg)
75124+{
75125+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75126+ struct acl_subject_label *subj_list;
75127+ struct sprole_pw *sptmp;
75128+ struct gr_hash_struct *ghash;
75129+ uid_t *domainlist;
75130+ unsigned int r_num;
75131+ int err = 0;
75132+ __u16 i;
75133+ __u32 num_subjs;
75134+
75135+ /* we need a default and kernel role */
75136+ if (arg->role_db.num_roles < 2)
75137+ return -EINVAL;
75138+
75139+ /* copy special role authentication info from userspace */
75140+
75141+ polstate->num_sprole_pws = arg->num_sprole_pws;
75142+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75143+
75144+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75145+ return -ENOMEM;
75146+
75147+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75148+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75149+ if (!sptmp)
75150+ return -ENOMEM;
75151+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75152+ return -EFAULT;
75153+
75154+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75155+ if (err)
75156+ return err;
75157+
75158+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75159+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75160+#endif
75161+
75162+ polstate->acl_special_roles[i] = sptmp;
75163+ }
75164+
75165+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75166+
75167+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75168+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75169+
75170+ if (!r_tmp)
75171+ return -ENOMEM;
75172+
75173+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75174+ return -EFAULT;
75175+
75176+ if (copy_acl_role_label(r_tmp, r_utmp2))
75177+ return -EFAULT;
75178+
75179+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75180+ if (err)
75181+ return err;
75182+
75183+ if (!strcmp(r_tmp->rolename, "default")
75184+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75185+ polstate->default_role = r_tmp;
75186+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75187+ polstate->kernel_role = r_tmp;
75188+ }
75189+
75190+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75191+ return -ENOMEM;
75192+
75193+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75194+ return -EFAULT;
75195+
75196+ r_tmp->hash = ghash;
75197+
75198+ num_subjs = count_user_subjs(r_tmp->hash->first);
75199+
75200+ r_tmp->subj_hash_size = num_subjs;
75201+ r_tmp->subj_hash =
75202+ (struct acl_subject_label **)
75203+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75204+
75205+ if (!r_tmp->subj_hash)
75206+ return -ENOMEM;
75207+
75208+ err = copy_user_allowedips(r_tmp);
75209+ if (err)
75210+ return err;
75211+
75212+ /* copy domain info */
75213+ if (r_tmp->domain_children != NULL) {
75214+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75215+ if (domainlist == NULL)
75216+ return -ENOMEM;
75217+
75218+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75219+ return -EFAULT;
75220+
75221+ r_tmp->domain_children = domainlist;
75222+ }
75223+
75224+ err = copy_user_transitions(r_tmp);
75225+ if (err)
75226+ return err;
75227+
75228+ memset(r_tmp->subj_hash, 0,
75229+ r_tmp->subj_hash_size *
75230+ sizeof (struct acl_subject_label *));
75231+
75232+ /* acquire the list of subjects, then NULL out
75233+ the list prior to parsing the subjects for this role,
75234+ as during this parsing the list is replaced with a list
75235+ of *nested* subjects for the role
75236+ */
75237+ subj_list = r_tmp->hash->first;
75238+
75239+ /* set nested subject list to null */
75240+ r_tmp->hash->first = NULL;
75241+
75242+ err = copy_user_subjs(subj_list, r_tmp);
75243+
75244+ if (err)
75245+ return err;
75246+
75247+ insert_acl_role_label(r_tmp);
75248+ }
75249+
75250+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
75251+ return -EINVAL;
75252+
75253+ return err;
75254+}
75255+
75256+static int gracl_reload_apply_policies(void *reload)
75257+{
75258+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
75259+ struct task_struct *task, *task2;
75260+ struct acl_role_label *role, *rtmp;
75261+ struct acl_subject_label *subj;
75262+ const struct cred *cred;
75263+ int role_applied;
75264+ int ret = 0;
75265+
75266+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
75267+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
75268+
75269+ /* first make sure we'll be able to apply the new policy cleanly */
75270+ do_each_thread(task2, task) {
75271+ if (task->exec_file == NULL)
75272+ continue;
75273+ role_applied = 0;
75274+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75275+ /* preserve special roles */
75276+ FOR_EACH_ROLE_START(role)
75277+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75278+ rtmp = task->role;
75279+ task->role = role;
75280+ role_applied = 1;
75281+ break;
75282+ }
75283+ FOR_EACH_ROLE_END(role)
75284+ }
75285+ if (!role_applied) {
75286+ cred = __task_cred(task);
75287+ rtmp = task->role;
75288+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75289+ }
75290+ /* this handles non-nested inherited subjects, nested subjects will still
75291+ be dropped currently */
75292+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
75293+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
75294+ /* change the role back so that we've made no modifications to the policy */
75295+ task->role = rtmp;
75296+
75297+ if (subj == NULL || task->tmpacl == NULL) {
75298+ ret = -EINVAL;
75299+ goto out;
75300+ }
75301+ } while_each_thread(task2, task);
75302+
75303+ /* now actually apply the policy */
75304+
75305+ do_each_thread(task2, task) {
75306+ if (task->exec_file) {
75307+ role_applied = 0;
75308+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75309+ /* preserve special roles */
75310+ FOR_EACH_ROLE_START(role)
75311+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75312+ task->role = role;
75313+ role_applied = 1;
75314+ break;
75315+ }
75316+ FOR_EACH_ROLE_END(role)
75317+ }
75318+ if (!role_applied) {
75319+ cred = __task_cred(task);
75320+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75321+ }
75322+ /* this handles non-nested inherited subjects, nested subjects will still
75323+ be dropped currently */
75324+ if (!reload_state->oldmode && task->inherited)
75325+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
75326+ else {
75327+ /* looked up and tagged to the task previously */
75328+ subj = task->tmpacl;
75329+ }
75330+ /* subj will be non-null */
75331+ __gr_apply_subject_to_task(polstate, task, subj);
75332+ if (reload_state->oldmode) {
75333+ task->acl_role_id = 0;
75334+ task->acl_sp_role = 0;
75335+ task->inherited = 0;
75336+ }
75337+ } else {
75338+ // it's a kernel process
75339+ task->role = polstate->kernel_role;
75340+ task->acl = polstate->kernel_role->root_label;
75341+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75342+ task->acl->mode &= ~GR_PROCFIND;
75343+#endif
75344+ }
75345+ } while_each_thread(task2, task);
75346+
75347+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75348+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75349+
75350+out:
75351+
75352+ return ret;
75353+}
75354+
75355+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75356+{
75357+ struct gr_reload_state new_reload_state = { };
75358+ int err;
75359+
75360+ new_reload_state.oldpolicy_ptr = polstate;
75361+ new_reload_state.oldalloc_ptr = current_alloc_state;
75362+ new_reload_state.oldmode = oldmode;
75363+
75364+ current_alloc_state = &new_reload_state.newalloc;
75365+ polstate = &new_reload_state.newpolicy;
75366+
75367+ /* everything relevant is now saved off, copy in the new policy */
75368+ if (init_variables(args, true)) {
75369+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75370+ err = -ENOMEM;
75371+ goto error;
75372+ }
75373+
75374+ err = copy_user_acl(args);
75375+ free_init_variables();
75376+ if (err)
75377+ goto error;
75378+ /* the new policy is copied in, with the old policy available via saved_state
75379+ first go through applying roles, making sure to preserve special roles
75380+ then apply new subjects, making sure to preserve inherited and nested subjects,
75381+ though currently only inherited subjects will be preserved
75382+ */
75383+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75384+ if (err)
75385+ goto error;
75386+
75387+ /* we've now applied the new policy, so restore the old policy state to free it */
75388+ polstate = &new_reload_state.oldpolicy;
75389+ current_alloc_state = &new_reload_state.oldalloc;
75390+ free_variables(true);
75391+
75392+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75393+ to running_polstate/current_alloc_state inside stop_machine
75394+ */
75395+ err = 0;
75396+ goto out;
75397+error:
75398+ /* on error of loading the new policy, we'll just keep the previous
75399+ policy set around
75400+ */
75401+ free_variables(true);
75402+
75403+ /* doesn't affect runtime, but maintains consistent state */
75404+out:
75405+ polstate = new_reload_state.oldpolicy_ptr;
75406+ current_alloc_state = new_reload_state.oldalloc_ptr;
75407+
75408+ return err;
75409+}
75410+
75411+static int
75412+gracl_init(struct gr_arg *args)
75413+{
75414+ int error = 0;
75415+
75416+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75417+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75418+
75419+ if (init_variables(args, false)) {
75420+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75421+ error = -ENOMEM;
75422+ goto out;
75423+ }
75424+
75425+ error = copy_user_acl(args);
75426+ free_init_variables();
75427+ if (error)
75428+ goto out;
75429+
75430+ error = gr_set_acls(0);
75431+ if (error)
75432+ goto out;
75433+
75434+ gr_enable_rbac_system();
75435+
75436+ return 0;
75437+
75438+out:
75439+ free_variables(false);
75440+ return error;
75441+}
75442+
75443+static int
75444+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75445+ unsigned char **sum)
75446+{
75447+ struct acl_role_label *r;
75448+ struct role_allowed_ip *ipp;
75449+ struct role_transition *trans;
75450+ unsigned int i;
75451+ int found = 0;
75452+ u32 curr_ip = current->signal->curr_ip;
75453+
75454+ current->signal->saved_ip = curr_ip;
75455+
75456+ /* check transition table */
75457+
75458+ for (trans = current->role->transitions; trans; trans = trans->next) {
75459+ if (!strcmp(rolename, trans->rolename)) {
75460+ found = 1;
75461+ break;
75462+ }
75463+ }
75464+
75465+ if (!found)
75466+ return 0;
75467+
75468+ /* handle special roles that do not require authentication
75469+ and check ip */
75470+
75471+ FOR_EACH_ROLE_START(r)
75472+ if (!strcmp(rolename, r->rolename) &&
75473+ (r->roletype & GR_ROLE_SPECIAL)) {
75474+ found = 0;
75475+ if (r->allowed_ips != NULL) {
75476+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75477+ if ((ntohl(curr_ip) & ipp->netmask) ==
75478+ (ntohl(ipp->addr) & ipp->netmask))
75479+ found = 1;
75480+ }
75481+ } else
75482+ found = 2;
75483+ if (!found)
75484+ return 0;
75485+
75486+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75487+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75488+ *salt = NULL;
75489+ *sum = NULL;
75490+ return 1;
75491+ }
75492+ }
75493+ FOR_EACH_ROLE_END(r)
75494+
75495+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75496+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75497+ *salt = polstate->acl_special_roles[i]->salt;
75498+ *sum = polstate->acl_special_roles[i]->sum;
75499+ return 1;
75500+ }
75501+ }
75502+
75503+ return 0;
75504+}
75505+
75506+int gr_check_secure_terminal(struct task_struct *task)
75507+{
75508+ struct task_struct *p, *p2, *p3;
75509+ struct files_struct *files;
75510+ struct fdtable *fdt;
75511+ struct file *our_file = NULL, *file;
75512+ int i;
75513+
75514+ if (task->signal->tty == NULL)
75515+ return 1;
75516+
75517+ files = get_files_struct(task);
75518+ if (files != NULL) {
75519+ rcu_read_lock();
75520+ fdt = files_fdtable(files);
75521+ for (i=0; i < fdt->max_fds; i++) {
75522+ file = fcheck_files(files, i);
75523+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75524+ get_file(file);
75525+ our_file = file;
75526+ }
75527+ }
75528+ rcu_read_unlock();
75529+ put_files_struct(files);
75530+ }
75531+
75532+ if (our_file == NULL)
75533+ return 1;
75534+
75535+ read_lock(&tasklist_lock);
75536+ do_each_thread(p2, p) {
75537+ files = get_files_struct(p);
75538+ if (files == NULL ||
75539+ (p->signal && p->signal->tty == task->signal->tty)) {
75540+ if (files != NULL)
75541+ put_files_struct(files);
75542+ continue;
75543+ }
75544+ rcu_read_lock();
75545+ fdt = files_fdtable(files);
75546+ for (i=0; i < fdt->max_fds; i++) {
75547+ file = fcheck_files(files, i);
75548+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75549+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75550+ p3 = task;
75551+ while (task_pid_nr(p3) > 0) {
75552+ if (p3 == p)
75553+ break;
75554+ p3 = p3->real_parent;
75555+ }
75556+ if (p3 == p)
75557+ break;
75558+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75559+ gr_handle_alertkill(p);
75560+ rcu_read_unlock();
75561+ put_files_struct(files);
75562+ read_unlock(&tasklist_lock);
75563+ fput(our_file);
75564+ return 0;
75565+ }
75566+ }
75567+ rcu_read_unlock();
75568+ put_files_struct(files);
75569+ } while_each_thread(p2, p);
75570+ read_unlock(&tasklist_lock);
75571+
75572+ fput(our_file);
75573+ return 1;
75574+}
75575+
75576+ssize_t
75577+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75578+{
75579+ struct gr_arg_wrapper uwrap;
75580+ unsigned char *sprole_salt = NULL;
75581+ unsigned char *sprole_sum = NULL;
75582+ int error = 0;
75583+ int error2 = 0;
75584+ size_t req_count = 0;
75585+ unsigned char oldmode = 0;
75586+
75587+ mutex_lock(&gr_dev_mutex);
75588+
75589+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75590+ error = -EPERM;
75591+ goto out;
75592+ }
75593+
75594+#ifdef CONFIG_COMPAT
75595+ pax_open_kernel();
75596+ if (is_compat_task()) {
75597+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75598+ copy_gr_arg = &copy_gr_arg_compat;
75599+ copy_acl_object_label = &copy_acl_object_label_compat;
75600+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75601+ copy_acl_role_label = &copy_acl_role_label_compat;
75602+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75603+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75604+ copy_role_transition = &copy_role_transition_compat;
75605+ copy_sprole_pw = &copy_sprole_pw_compat;
75606+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75607+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75608+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75609+ } else {
75610+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75611+ copy_gr_arg = &copy_gr_arg_normal;
75612+ copy_acl_object_label = &copy_acl_object_label_normal;
75613+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75614+ copy_acl_role_label = &copy_acl_role_label_normal;
75615+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75616+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75617+ copy_role_transition = &copy_role_transition_normal;
75618+ copy_sprole_pw = &copy_sprole_pw_normal;
75619+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75620+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75621+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75622+ }
75623+ pax_close_kernel();
75624+#endif
75625+
75626+ req_count = get_gr_arg_wrapper_size();
75627+
75628+ if (count != req_count) {
75629+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75630+ error = -EINVAL;
75631+ goto out;
75632+ }
75633+
75634+
75635+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75636+ gr_auth_expires = 0;
75637+ gr_auth_attempts = 0;
75638+ }
75639+
75640+ error = copy_gr_arg_wrapper(buf, &uwrap);
75641+ if (error)
75642+ goto out;
75643+
75644+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75645+ if (error)
75646+ goto out;
75647+
75648+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75649+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75650+ time_after(gr_auth_expires, get_seconds())) {
75651+ error = -EBUSY;
75652+ goto out;
75653+ }
75654+
75655+ /* if non-root trying to do anything other than use a special role,
75656+ do not attempt authentication, do not count towards authentication
75657+ locking
75658+ */
75659+
75660+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75661+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75662+ gr_is_global_nonroot(current_uid())) {
75663+ error = -EPERM;
75664+ goto out;
75665+ }
75666+
75667+ /* ensure pw and special role name are null terminated */
75668+
75669+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75670+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75671+
75672+ /* Okay.
75673+ * We have our enough of the argument structure..(we have yet
75674+ * to copy_from_user the tables themselves) . Copy the tables
75675+ * only if we need them, i.e. for loading operations. */
75676+
75677+ switch (gr_usermode->mode) {
75678+ case GR_STATUS:
75679+ if (gr_acl_is_enabled()) {
75680+ error = 1;
75681+ if (!gr_check_secure_terminal(current))
75682+ error = 3;
75683+ } else
75684+ error = 2;
75685+ goto out;
75686+ case GR_SHUTDOWN:
75687+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75688+ stop_machine(gr_rbac_disable, NULL, NULL);
75689+ free_variables(false);
75690+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75691+ memset(gr_system_salt, 0, GR_SALT_LEN);
75692+ memset(gr_system_sum, 0, GR_SHA_LEN);
75693+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75694+ } else if (gr_acl_is_enabled()) {
75695+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75696+ error = -EPERM;
75697+ } else {
75698+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75699+ error = -EAGAIN;
75700+ }
75701+ break;
75702+ case GR_ENABLE:
75703+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75704+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75705+ else {
75706+ if (gr_acl_is_enabled())
75707+ error = -EAGAIN;
75708+ else
75709+ error = error2;
75710+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75711+ }
75712+ break;
75713+ case GR_OLDRELOAD:
75714+ oldmode = 1;
75715+ case GR_RELOAD:
75716+ if (!gr_acl_is_enabled()) {
75717+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75718+ error = -EAGAIN;
75719+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75720+ error2 = gracl_reload(gr_usermode, oldmode);
75721+ if (!error2)
75722+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75723+ else {
75724+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75725+ error = error2;
75726+ }
75727+ } else {
75728+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75729+ error = -EPERM;
75730+ }
75731+ break;
75732+ case GR_SEGVMOD:
75733+ if (unlikely(!gr_acl_is_enabled())) {
75734+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75735+ error = -EAGAIN;
75736+ break;
75737+ }
75738+
75739+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75740+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75741+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75742+ struct acl_subject_label *segvacl;
75743+ segvacl =
75744+ lookup_acl_subj_label(gr_usermode->segv_inode,
75745+ gr_usermode->segv_device,
75746+ current->role);
75747+ if (segvacl) {
75748+ segvacl->crashes = 0;
75749+ segvacl->expires = 0;
75750+ }
75751+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75752+ gr_remove_uid(gr_usermode->segv_uid);
75753+ }
75754+ } else {
75755+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75756+ error = -EPERM;
75757+ }
75758+ break;
75759+ case GR_SPROLE:
75760+ case GR_SPROLEPAM:
75761+ if (unlikely(!gr_acl_is_enabled())) {
75762+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75763+ error = -EAGAIN;
75764+ break;
75765+ }
75766+
75767+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75768+ current->role->expires = 0;
75769+ current->role->auth_attempts = 0;
75770+ }
75771+
75772+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75773+ time_after(current->role->expires, get_seconds())) {
75774+ error = -EBUSY;
75775+ goto out;
75776+ }
75777+
75778+ if (lookup_special_role_auth
75779+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75780+ && ((!sprole_salt && !sprole_sum)
75781+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75782+ char *p = "";
75783+ assign_special_role(gr_usermode->sp_role);
75784+ read_lock(&tasklist_lock);
75785+ if (current->real_parent)
75786+ p = current->real_parent->role->rolename;
75787+ read_unlock(&tasklist_lock);
75788+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75789+ p, acl_sp_role_value);
75790+ } else {
75791+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75792+ error = -EPERM;
75793+ if(!(current->role->auth_attempts++))
75794+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75795+
75796+ goto out;
75797+ }
75798+ break;
75799+ case GR_UNSPROLE:
75800+ if (unlikely(!gr_acl_is_enabled())) {
75801+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75802+ error = -EAGAIN;
75803+ break;
75804+ }
75805+
75806+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75807+ char *p = "";
75808+ int i = 0;
75809+
75810+ read_lock(&tasklist_lock);
75811+ if (current->real_parent) {
75812+ p = current->real_parent->role->rolename;
75813+ i = current->real_parent->acl_role_id;
75814+ }
75815+ read_unlock(&tasklist_lock);
75816+
75817+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75818+ gr_set_acls(1);
75819+ } else {
75820+ error = -EPERM;
75821+ goto out;
75822+ }
75823+ break;
75824+ default:
75825+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75826+ error = -EINVAL;
75827+ break;
75828+ }
75829+
75830+ if (error != -EPERM)
75831+ goto out;
75832+
75833+ if(!(gr_auth_attempts++))
75834+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75835+
75836+ out:
75837+ mutex_unlock(&gr_dev_mutex);
75838+
75839+ if (!error)
75840+ error = req_count;
75841+
75842+ return error;
75843+}
75844+
75845+int
75846+gr_set_acls(const int type)
75847+{
75848+ struct task_struct *task, *task2;
75849+ struct acl_role_label *role = current->role;
75850+ struct acl_subject_label *subj;
75851+ __u16 acl_role_id = current->acl_role_id;
75852+ const struct cred *cred;
75853+ int ret;
75854+
75855+ rcu_read_lock();
75856+ read_lock(&tasklist_lock);
75857+ read_lock(&grsec_exec_file_lock);
75858+ do_each_thread(task2, task) {
75859+ /* check to see if we're called from the exit handler,
75860+ if so, only replace ACLs that have inherited the admin
75861+ ACL */
75862+
75863+ if (type && (task->role != role ||
75864+ task->acl_role_id != acl_role_id))
75865+ continue;
75866+
75867+ task->acl_role_id = 0;
75868+ task->acl_sp_role = 0;
75869+ task->inherited = 0;
75870+
75871+ if (task->exec_file) {
75872+ cred = __task_cred(task);
75873+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75874+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75875+ if (subj == NULL) {
75876+ ret = -EINVAL;
75877+ read_unlock(&grsec_exec_file_lock);
75878+ read_unlock(&tasklist_lock);
75879+ rcu_read_unlock();
75880+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75881+ return ret;
75882+ }
75883+ __gr_apply_subject_to_task(polstate, task, subj);
75884+ } else {
75885+ // it's a kernel process
75886+ task->role = polstate->kernel_role;
75887+ task->acl = polstate->kernel_role->root_label;
75888+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75889+ task->acl->mode &= ~GR_PROCFIND;
75890+#endif
75891+ }
75892+ } while_each_thread(task2, task);
75893+ read_unlock(&grsec_exec_file_lock);
75894+ read_unlock(&tasklist_lock);
75895+ rcu_read_unlock();
75896+
75897+ return 0;
75898+}
75899diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75900new file mode 100644
75901index 0000000..39645c9
75902--- /dev/null
75903+++ b/grsecurity/gracl_res.c
75904@@ -0,0 +1,68 @@
75905+#include <linux/kernel.h>
75906+#include <linux/sched.h>
75907+#include <linux/gracl.h>
75908+#include <linux/grinternal.h>
75909+
75910+static const char *restab_log[] = {
75911+ [RLIMIT_CPU] = "RLIMIT_CPU",
75912+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75913+ [RLIMIT_DATA] = "RLIMIT_DATA",
75914+ [RLIMIT_STACK] = "RLIMIT_STACK",
75915+ [RLIMIT_CORE] = "RLIMIT_CORE",
75916+ [RLIMIT_RSS] = "RLIMIT_RSS",
75917+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75918+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75919+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75920+ [RLIMIT_AS] = "RLIMIT_AS",
75921+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75922+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75923+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75924+ [RLIMIT_NICE] = "RLIMIT_NICE",
75925+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75926+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75927+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75928+};
75929+
75930+void
75931+gr_log_resource(const struct task_struct *task,
75932+ const int res, const unsigned long wanted, const int gt)
75933+{
75934+ const struct cred *cred;
75935+ unsigned long rlim;
75936+
75937+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75938+ return;
75939+
75940+ // not yet supported resource
75941+ if (unlikely(!restab_log[res]))
75942+ return;
75943+
75944+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75945+ rlim = task_rlimit_max(task, res);
75946+ else
75947+ rlim = task_rlimit(task, res);
75948+
75949+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75950+ return;
75951+
75952+ rcu_read_lock();
75953+ cred = __task_cred(task);
75954+
75955+ if (res == RLIMIT_NPROC &&
75956+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75957+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75958+ goto out_rcu_unlock;
75959+ else if (res == RLIMIT_MEMLOCK &&
75960+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75961+ goto out_rcu_unlock;
75962+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75963+ goto out_rcu_unlock;
75964+ rcu_read_unlock();
75965+
75966+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75967+
75968+ return;
75969+out_rcu_unlock:
75970+ rcu_read_unlock();
75971+ return;
75972+}
75973diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75974new file mode 100644
75975index 0000000..218b66b
75976--- /dev/null
75977+++ b/grsecurity/gracl_segv.c
75978@@ -0,0 +1,324 @@
75979+#include <linux/kernel.h>
75980+#include <linux/mm.h>
75981+#include <asm/uaccess.h>
75982+#include <asm/errno.h>
75983+#include <asm/mman.h>
75984+#include <net/sock.h>
75985+#include <linux/file.h>
75986+#include <linux/fs.h>
75987+#include <linux/net.h>
75988+#include <linux/in.h>
75989+#include <linux/slab.h>
75990+#include <linux/types.h>
75991+#include <linux/sched.h>
75992+#include <linux/timer.h>
75993+#include <linux/gracl.h>
75994+#include <linux/grsecurity.h>
75995+#include <linux/grinternal.h>
75996+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75997+#include <linux/magic.h>
75998+#include <linux/pagemap.h>
75999+#include "../fs/btrfs/async-thread.h"
76000+#include "../fs/btrfs/ctree.h"
76001+#include "../fs/btrfs/btrfs_inode.h"
76002+#endif
76003+
76004+static struct crash_uid *uid_set;
76005+static unsigned short uid_used;
76006+static DEFINE_SPINLOCK(gr_uid_lock);
76007+extern rwlock_t gr_inode_lock;
76008+extern struct acl_subject_label *
76009+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
76010+ struct acl_role_label *role);
76011+
76012+static inline dev_t __get_dev(const struct dentry *dentry)
76013+{
76014+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76015+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76016+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76017+ else
76018+#endif
76019+ return dentry->d_sb->s_dev;
76020+}
76021+
76022+static inline u64 __get_ino(const struct dentry *dentry)
76023+{
76024+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76025+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76026+ return btrfs_ino(dentry->d_inode);
76027+ else
76028+#endif
76029+ return dentry->d_inode->i_ino;
76030+}
76031+
76032+int
76033+gr_init_uidset(void)
76034+{
76035+ uid_set =
76036+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76037+ uid_used = 0;
76038+
76039+ return uid_set ? 1 : 0;
76040+}
76041+
76042+void
76043+gr_free_uidset(void)
76044+{
76045+ if (uid_set) {
76046+ struct crash_uid *tmpset;
76047+ spin_lock(&gr_uid_lock);
76048+ tmpset = uid_set;
76049+ uid_set = NULL;
76050+ uid_used = 0;
76051+ spin_unlock(&gr_uid_lock);
76052+ if (tmpset)
76053+ kfree(tmpset);
76054+ }
76055+
76056+ return;
76057+}
76058+
76059+int
76060+gr_find_uid(const uid_t uid)
76061+{
76062+ struct crash_uid *tmp = uid_set;
76063+ uid_t buid;
76064+ int low = 0, high = uid_used - 1, mid;
76065+
76066+ while (high >= low) {
76067+ mid = (low + high) >> 1;
76068+ buid = tmp[mid].uid;
76069+ if (buid == uid)
76070+ return mid;
76071+ if (buid > uid)
76072+ high = mid - 1;
76073+ if (buid < uid)
76074+ low = mid + 1;
76075+ }
76076+
76077+ return -1;
76078+}
76079+
76080+static __inline__ void
76081+gr_insertsort(void)
76082+{
76083+ unsigned short i, j;
76084+ struct crash_uid index;
76085+
76086+ for (i = 1; i < uid_used; i++) {
76087+ index = uid_set[i];
76088+ j = i;
76089+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76090+ uid_set[j] = uid_set[j - 1];
76091+ j--;
76092+ }
76093+ uid_set[j] = index;
76094+ }
76095+
76096+ return;
76097+}
76098+
76099+static __inline__ void
76100+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76101+{
76102+ int loc;
76103+ uid_t uid = GR_GLOBAL_UID(kuid);
76104+
76105+ if (uid_used == GR_UIDTABLE_MAX)
76106+ return;
76107+
76108+ loc = gr_find_uid(uid);
76109+
76110+ if (loc >= 0) {
76111+ uid_set[loc].expires = expires;
76112+ return;
76113+ }
76114+
76115+ uid_set[uid_used].uid = uid;
76116+ uid_set[uid_used].expires = expires;
76117+ uid_used++;
76118+
76119+ gr_insertsort();
76120+
76121+ return;
76122+}
76123+
76124+void
76125+gr_remove_uid(const unsigned short loc)
76126+{
76127+ unsigned short i;
76128+
76129+ for (i = loc + 1; i < uid_used; i++)
76130+ uid_set[i - 1] = uid_set[i];
76131+
76132+ uid_used--;
76133+
76134+ return;
76135+}
76136+
76137+int
76138+gr_check_crash_uid(const kuid_t kuid)
76139+{
76140+ int loc;
76141+ int ret = 0;
76142+ uid_t uid;
76143+
76144+ if (unlikely(!gr_acl_is_enabled()))
76145+ return 0;
76146+
76147+ uid = GR_GLOBAL_UID(kuid);
76148+
76149+ spin_lock(&gr_uid_lock);
76150+ loc = gr_find_uid(uid);
76151+
76152+ if (loc < 0)
76153+ goto out_unlock;
76154+
76155+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76156+ gr_remove_uid(loc);
76157+ else
76158+ ret = 1;
76159+
76160+out_unlock:
76161+ spin_unlock(&gr_uid_lock);
76162+ return ret;
76163+}
76164+
76165+static __inline__ int
76166+proc_is_setxid(const struct cred *cred)
76167+{
76168+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76169+ !uid_eq(cred->uid, cred->fsuid))
76170+ return 1;
76171+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76172+ !gid_eq(cred->gid, cred->fsgid))
76173+ return 1;
76174+
76175+ return 0;
76176+}
76177+
76178+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76179+
76180+void
76181+gr_handle_crash(struct task_struct *task, const int sig)
76182+{
76183+ struct acl_subject_label *curr;
76184+ struct task_struct *tsk, *tsk2;
76185+ const struct cred *cred;
76186+ const struct cred *cred2;
76187+
76188+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76189+ return;
76190+
76191+ if (unlikely(!gr_acl_is_enabled()))
76192+ return;
76193+
76194+ curr = task->acl;
76195+
76196+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76197+ return;
76198+
76199+ if (time_before_eq(curr->expires, get_seconds())) {
76200+ curr->expires = 0;
76201+ curr->crashes = 0;
76202+ }
76203+
76204+ curr->crashes++;
76205+
76206+ if (!curr->expires)
76207+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76208+
76209+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76210+ time_after(curr->expires, get_seconds())) {
76211+ rcu_read_lock();
76212+ cred = __task_cred(task);
76213+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76214+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76215+ spin_lock(&gr_uid_lock);
76216+ gr_insert_uid(cred->uid, curr->expires);
76217+ spin_unlock(&gr_uid_lock);
76218+ curr->expires = 0;
76219+ curr->crashes = 0;
76220+ read_lock(&tasklist_lock);
76221+ do_each_thread(tsk2, tsk) {
76222+ cred2 = __task_cred(tsk);
76223+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76224+ gr_fake_force_sig(SIGKILL, tsk);
76225+ } while_each_thread(tsk2, tsk);
76226+ read_unlock(&tasklist_lock);
76227+ } else {
76228+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76229+ read_lock(&tasklist_lock);
76230+ read_lock(&grsec_exec_file_lock);
76231+ do_each_thread(tsk2, tsk) {
76232+ if (likely(tsk != task)) {
76233+ // if this thread has the same subject as the one that triggered
76234+ // RES_CRASH and it's the same binary, kill it
76235+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76236+ gr_fake_force_sig(SIGKILL, tsk);
76237+ }
76238+ } while_each_thread(tsk2, tsk);
76239+ read_unlock(&grsec_exec_file_lock);
76240+ read_unlock(&tasklist_lock);
76241+ }
76242+ rcu_read_unlock();
76243+ }
76244+
76245+ return;
76246+}
76247+
76248+int
76249+gr_check_crash_exec(const struct file *filp)
76250+{
76251+ struct acl_subject_label *curr;
76252+ struct dentry *dentry;
76253+
76254+ if (unlikely(!gr_acl_is_enabled()))
76255+ return 0;
76256+
76257+ read_lock(&gr_inode_lock);
76258+ dentry = filp->f_path.dentry;
76259+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
76260+ current->role);
76261+ read_unlock(&gr_inode_lock);
76262+
76263+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
76264+ (!curr->crashes && !curr->expires))
76265+ return 0;
76266+
76267+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76268+ time_after(curr->expires, get_seconds()))
76269+ return 1;
76270+ else if (time_before_eq(curr->expires, get_seconds())) {
76271+ curr->crashes = 0;
76272+ curr->expires = 0;
76273+ }
76274+
76275+ return 0;
76276+}
76277+
76278+void
76279+gr_handle_alertkill(struct task_struct *task)
76280+{
76281+ struct acl_subject_label *curracl;
76282+ __u32 curr_ip;
76283+ struct task_struct *p, *p2;
76284+
76285+ if (unlikely(!gr_acl_is_enabled()))
76286+ return;
76287+
76288+ curracl = task->acl;
76289+ curr_ip = task->signal->curr_ip;
76290+
76291+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
76292+ read_lock(&tasklist_lock);
76293+ do_each_thread(p2, p) {
76294+ if (p->signal->curr_ip == curr_ip)
76295+ gr_fake_force_sig(SIGKILL, p);
76296+ } while_each_thread(p2, p);
76297+ read_unlock(&tasklist_lock);
76298+ } else if (curracl->mode & GR_KILLPROC)
76299+ gr_fake_force_sig(SIGKILL, task);
76300+
76301+ return;
76302+}
76303diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
76304new file mode 100644
76305index 0000000..6b0c9cc
76306--- /dev/null
76307+++ b/grsecurity/gracl_shm.c
76308@@ -0,0 +1,40 @@
76309+#include <linux/kernel.h>
76310+#include <linux/mm.h>
76311+#include <linux/sched.h>
76312+#include <linux/file.h>
76313+#include <linux/ipc.h>
76314+#include <linux/gracl.h>
76315+#include <linux/grsecurity.h>
76316+#include <linux/grinternal.h>
76317+
76318+int
76319+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76320+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76321+{
76322+ struct task_struct *task;
76323+
76324+ if (!gr_acl_is_enabled())
76325+ return 1;
76326+
76327+ rcu_read_lock();
76328+ read_lock(&tasklist_lock);
76329+
76330+ task = find_task_by_vpid(shm_cprid);
76331+
76332+ if (unlikely(!task))
76333+ task = find_task_by_vpid(shm_lapid);
76334+
76335+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76336+ (task_pid_nr(task) == shm_lapid)) &&
76337+ (task->acl->mode & GR_PROTSHM) &&
76338+ (task->acl != current->acl))) {
76339+ read_unlock(&tasklist_lock);
76340+ rcu_read_unlock();
76341+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76342+ return 0;
76343+ }
76344+ read_unlock(&tasklist_lock);
76345+ rcu_read_unlock();
76346+
76347+ return 1;
76348+}
76349diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76350new file mode 100644
76351index 0000000..bc0be01
76352--- /dev/null
76353+++ b/grsecurity/grsec_chdir.c
76354@@ -0,0 +1,19 @@
76355+#include <linux/kernel.h>
76356+#include <linux/sched.h>
76357+#include <linux/fs.h>
76358+#include <linux/file.h>
76359+#include <linux/grsecurity.h>
76360+#include <linux/grinternal.h>
76361+
76362+void
76363+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76364+{
76365+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76366+ if ((grsec_enable_chdir && grsec_enable_group &&
76367+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76368+ !grsec_enable_group)) {
76369+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76370+ }
76371+#endif
76372+ return;
76373+}
76374diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76375new file mode 100644
76376index 0000000..114ea4f
76377--- /dev/null
76378+++ b/grsecurity/grsec_chroot.c
76379@@ -0,0 +1,467 @@
76380+#include <linux/kernel.h>
76381+#include <linux/module.h>
76382+#include <linux/sched.h>
76383+#include <linux/file.h>
76384+#include <linux/fs.h>
76385+#include <linux/mount.h>
76386+#include <linux/types.h>
76387+#include "../fs/mount.h"
76388+#include <linux/grsecurity.h>
76389+#include <linux/grinternal.h>
76390+
76391+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76392+int gr_init_ran;
76393+#endif
76394+
76395+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76396+{
76397+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76398+ struct dentry *tmpd = dentry;
76399+
76400+ read_seqlock_excl(&mount_lock);
76401+ write_seqlock(&rename_lock);
76402+
76403+ while (tmpd != mnt->mnt_root) {
76404+ atomic_inc(&tmpd->chroot_refcnt);
76405+ tmpd = tmpd->d_parent;
76406+ }
76407+ atomic_inc(&tmpd->chroot_refcnt);
76408+
76409+ write_sequnlock(&rename_lock);
76410+ read_sequnlock_excl(&mount_lock);
76411+#endif
76412+}
76413+
76414+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76415+{
76416+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76417+ struct dentry *tmpd = dentry;
76418+
76419+ read_seqlock_excl(&mount_lock);
76420+ write_seqlock(&rename_lock);
76421+
76422+ while (tmpd != mnt->mnt_root) {
76423+ atomic_dec(&tmpd->chroot_refcnt);
76424+ tmpd = tmpd->d_parent;
76425+ }
76426+ atomic_dec(&tmpd->chroot_refcnt);
76427+
76428+ write_sequnlock(&rename_lock);
76429+ read_sequnlock_excl(&mount_lock);
76430+#endif
76431+}
76432+
76433+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76434+static struct dentry *get_closest_chroot(struct dentry *dentry)
76435+{
76436+ write_seqlock(&rename_lock);
76437+ do {
76438+ if (atomic_read(&dentry->chroot_refcnt)) {
76439+ write_sequnlock(&rename_lock);
76440+ return dentry;
76441+ }
76442+ dentry = dentry->d_parent;
76443+ } while (!IS_ROOT(dentry));
76444+ write_sequnlock(&rename_lock);
76445+ return NULL;
76446+}
76447+#endif
76448+
76449+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
76450+ struct dentry *newdentry, struct vfsmount *newmnt)
76451+{
76452+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76453+ struct dentry *chroot;
76454+
76455+ if (unlikely(!grsec_enable_chroot_rename))
76456+ return 0;
76457+
76458+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
76459+ return 0;
76460+
76461+ chroot = get_closest_chroot(olddentry);
76462+
76463+ if (chroot == NULL)
76464+ return 0;
76465+
76466+ if (is_subdir(newdentry, chroot))
76467+ return 0;
76468+
76469+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
76470+
76471+ return 1;
76472+#else
76473+ return 0;
76474+#endif
76475+}
76476+
76477+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76478+{
76479+#ifdef CONFIG_GRKERNSEC
76480+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76481+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76482+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76483+ && gr_init_ran
76484+#endif
76485+ )
76486+ task->gr_is_chrooted = 1;
76487+ else {
76488+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76489+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76490+ gr_init_ran = 1;
76491+#endif
76492+ task->gr_is_chrooted = 0;
76493+ }
76494+
76495+ task->gr_chroot_dentry = path->dentry;
76496+#endif
76497+ return;
76498+}
76499+
76500+void gr_clear_chroot_entries(struct task_struct *task)
76501+{
76502+#ifdef CONFIG_GRKERNSEC
76503+ task->gr_is_chrooted = 0;
76504+ task->gr_chroot_dentry = NULL;
76505+#endif
76506+ return;
76507+}
76508+
76509+int
76510+gr_handle_chroot_unix(const pid_t pid)
76511+{
76512+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76513+ struct task_struct *p;
76514+
76515+ if (unlikely(!grsec_enable_chroot_unix))
76516+ return 1;
76517+
76518+ if (likely(!proc_is_chrooted(current)))
76519+ return 1;
76520+
76521+ rcu_read_lock();
76522+ read_lock(&tasklist_lock);
76523+ p = find_task_by_vpid_unrestricted(pid);
76524+ if (unlikely(p && !have_same_root(current, p))) {
76525+ read_unlock(&tasklist_lock);
76526+ rcu_read_unlock();
76527+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76528+ return 0;
76529+ }
76530+ read_unlock(&tasklist_lock);
76531+ rcu_read_unlock();
76532+#endif
76533+ return 1;
76534+}
76535+
76536+int
76537+gr_handle_chroot_nice(void)
76538+{
76539+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76540+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76541+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76542+ return -EPERM;
76543+ }
76544+#endif
76545+ return 0;
76546+}
76547+
76548+int
76549+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76550+{
76551+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76552+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76553+ && proc_is_chrooted(current)) {
76554+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76555+ return -EACCES;
76556+ }
76557+#endif
76558+ return 0;
76559+}
76560+
76561+int
76562+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76563+{
76564+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76565+ struct task_struct *p;
76566+ int ret = 0;
76567+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76568+ return ret;
76569+
76570+ read_lock(&tasklist_lock);
76571+ do_each_pid_task(pid, type, p) {
76572+ if (!have_same_root(current, p)) {
76573+ ret = 1;
76574+ goto out;
76575+ }
76576+ } while_each_pid_task(pid, type, p);
76577+out:
76578+ read_unlock(&tasklist_lock);
76579+ return ret;
76580+#endif
76581+ return 0;
76582+}
76583+
76584+int
76585+gr_pid_is_chrooted(struct task_struct *p)
76586+{
76587+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76588+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76589+ return 0;
76590+
76591+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76592+ !have_same_root(current, p)) {
76593+ return 1;
76594+ }
76595+#endif
76596+ return 0;
76597+}
76598+
76599+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76600+
76601+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76602+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76603+{
76604+ struct path path, currentroot;
76605+ int ret = 0;
76606+
76607+ path.dentry = (struct dentry *)u_dentry;
76608+ path.mnt = (struct vfsmount *)u_mnt;
76609+ get_fs_root(current->fs, &currentroot);
76610+ if (path_is_under(&path, &currentroot))
76611+ ret = 1;
76612+ path_put(&currentroot);
76613+
76614+ return ret;
76615+}
76616+#endif
76617+
76618+int
76619+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76620+{
76621+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76622+ if (!grsec_enable_chroot_fchdir)
76623+ return 1;
76624+
76625+ if (!proc_is_chrooted(current))
76626+ return 1;
76627+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76628+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76629+ return 0;
76630+ }
76631+#endif
76632+ return 1;
76633+}
76634+
76635+int
76636+gr_chroot_fhandle(void)
76637+{
76638+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76639+ if (!grsec_enable_chroot_fchdir)
76640+ return 1;
76641+
76642+ if (!proc_is_chrooted(current))
76643+ return 1;
76644+ else {
76645+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76646+ return 0;
76647+ }
76648+#endif
76649+ return 1;
76650+}
76651+
76652+int
76653+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76654+ const u64 shm_createtime)
76655+{
76656+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76657+ struct task_struct *p;
76658+
76659+ if (unlikely(!grsec_enable_chroot_shmat))
76660+ return 1;
76661+
76662+ if (likely(!proc_is_chrooted(current)))
76663+ return 1;
76664+
76665+ rcu_read_lock();
76666+ read_lock(&tasklist_lock);
76667+
76668+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76669+ if (time_before_eq64(p->start_time, shm_createtime)) {
76670+ if (have_same_root(current, p)) {
76671+ goto allow;
76672+ } else {
76673+ read_unlock(&tasklist_lock);
76674+ rcu_read_unlock();
76675+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76676+ return 0;
76677+ }
76678+ }
76679+ /* creator exited, pid reuse, fall through to next check */
76680+ }
76681+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76682+ if (unlikely(!have_same_root(current, p))) {
76683+ read_unlock(&tasklist_lock);
76684+ rcu_read_unlock();
76685+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76686+ return 0;
76687+ }
76688+ }
76689+
76690+allow:
76691+ read_unlock(&tasklist_lock);
76692+ rcu_read_unlock();
76693+#endif
76694+ return 1;
76695+}
76696+
76697+void
76698+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76699+{
76700+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76701+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76702+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76703+#endif
76704+ return;
76705+}
76706+
76707+int
76708+gr_handle_chroot_mknod(const struct dentry *dentry,
76709+ const struct vfsmount *mnt, const int mode)
76710+{
76711+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76712+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76713+ proc_is_chrooted(current)) {
76714+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76715+ return -EPERM;
76716+ }
76717+#endif
76718+ return 0;
76719+}
76720+
76721+int
76722+gr_handle_chroot_mount(const struct dentry *dentry,
76723+ const struct vfsmount *mnt, const char *dev_name)
76724+{
76725+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76726+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76727+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76728+ return -EPERM;
76729+ }
76730+#endif
76731+ return 0;
76732+}
76733+
76734+int
76735+gr_handle_chroot_pivot(void)
76736+{
76737+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76738+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76739+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76740+ return -EPERM;
76741+ }
76742+#endif
76743+ return 0;
76744+}
76745+
76746+int
76747+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76748+{
76749+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76750+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76751+ !gr_is_outside_chroot(dentry, mnt)) {
76752+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76753+ return -EPERM;
76754+ }
76755+#endif
76756+ return 0;
76757+}
76758+
76759+extern const char *captab_log[];
76760+extern int captab_log_entries;
76761+
76762+int
76763+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76764+{
76765+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76766+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76767+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76768+ if (cap_raised(chroot_caps, cap)) {
76769+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76770+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76771+ }
76772+ return 0;
76773+ }
76774+ }
76775+#endif
76776+ return 1;
76777+}
76778+
76779+int
76780+gr_chroot_is_capable(const int cap)
76781+{
76782+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76783+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76784+#endif
76785+ return 1;
76786+}
76787+
76788+int
76789+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76790+{
76791+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76792+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76793+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76794+ if (cap_raised(chroot_caps, cap)) {
76795+ return 0;
76796+ }
76797+ }
76798+#endif
76799+ return 1;
76800+}
76801+
76802+int
76803+gr_chroot_is_capable_nolog(const int cap)
76804+{
76805+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76806+ return gr_task_chroot_is_capable_nolog(current, cap);
76807+#endif
76808+ return 1;
76809+}
76810+
76811+int
76812+gr_handle_chroot_sysctl(const int op)
76813+{
76814+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76815+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76816+ proc_is_chrooted(current))
76817+ return -EACCES;
76818+#endif
76819+ return 0;
76820+}
76821+
76822+void
76823+gr_handle_chroot_chdir(const struct path *path)
76824+{
76825+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76826+ if (grsec_enable_chroot_chdir)
76827+ set_fs_pwd(current->fs, path);
76828+#endif
76829+ return;
76830+}
76831+
76832+int
76833+gr_handle_chroot_chmod(const struct dentry *dentry,
76834+ const struct vfsmount *mnt, const int mode)
76835+{
76836+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76837+ /* allow chmod +s on directories, but not files */
76838+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76839+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76840+ proc_is_chrooted(current)) {
76841+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76842+ return -EPERM;
76843+ }
76844+#endif
76845+ return 0;
76846+}
76847diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76848new file mode 100644
76849index 0000000..946f750
76850--- /dev/null
76851+++ b/grsecurity/grsec_disabled.c
76852@@ -0,0 +1,445 @@
76853+#include <linux/kernel.h>
76854+#include <linux/module.h>
76855+#include <linux/sched.h>
76856+#include <linux/file.h>
76857+#include <linux/fs.h>
76858+#include <linux/kdev_t.h>
76859+#include <linux/net.h>
76860+#include <linux/in.h>
76861+#include <linux/ip.h>
76862+#include <linux/skbuff.h>
76863+#include <linux/sysctl.h>
76864+
76865+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76866+void
76867+pax_set_initial_flags(struct linux_binprm *bprm)
76868+{
76869+ return;
76870+}
76871+#endif
76872+
76873+#ifdef CONFIG_SYSCTL
76874+__u32
76875+gr_handle_sysctl(const struct ctl_table * table, const int op)
76876+{
76877+ return 0;
76878+}
76879+#endif
76880+
76881+#ifdef CONFIG_TASKSTATS
76882+int gr_is_taskstats_denied(int pid)
76883+{
76884+ return 0;
76885+}
76886+#endif
76887+
76888+int
76889+gr_acl_is_enabled(void)
76890+{
76891+ return 0;
76892+}
76893+
76894+int
76895+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76896+{
76897+ return 0;
76898+}
76899+
76900+void
76901+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76902+{
76903+ return;
76904+}
76905+
76906+int
76907+gr_handle_rawio(const struct inode *inode)
76908+{
76909+ return 0;
76910+}
76911+
76912+void
76913+gr_acl_handle_psacct(struct task_struct *task, const long code)
76914+{
76915+ return;
76916+}
76917+
76918+int
76919+gr_handle_ptrace(struct task_struct *task, const long request)
76920+{
76921+ return 0;
76922+}
76923+
76924+int
76925+gr_handle_proc_ptrace(struct task_struct *task)
76926+{
76927+ return 0;
76928+}
76929+
76930+int
76931+gr_set_acls(const int type)
76932+{
76933+ return 0;
76934+}
76935+
76936+int
76937+gr_check_hidden_task(const struct task_struct *tsk)
76938+{
76939+ return 0;
76940+}
76941+
76942+int
76943+gr_check_protected_task(const struct task_struct *task)
76944+{
76945+ return 0;
76946+}
76947+
76948+int
76949+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76950+{
76951+ return 0;
76952+}
76953+
76954+void
76955+gr_copy_label(struct task_struct *tsk)
76956+{
76957+ return;
76958+}
76959+
76960+void
76961+gr_set_pax_flags(struct task_struct *task)
76962+{
76963+ return;
76964+}
76965+
76966+int
76967+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76968+ const int unsafe_share)
76969+{
76970+ return 0;
76971+}
76972+
76973+void
76974+gr_handle_delete(const u64 ino, const dev_t dev)
76975+{
76976+ return;
76977+}
76978+
76979+void
76980+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76981+{
76982+ return;
76983+}
76984+
76985+void
76986+gr_handle_crash(struct task_struct *task, const int sig)
76987+{
76988+ return;
76989+}
76990+
76991+int
76992+gr_check_crash_exec(const struct file *filp)
76993+{
76994+ return 0;
76995+}
76996+
76997+int
76998+gr_check_crash_uid(const kuid_t uid)
76999+{
77000+ return 0;
77001+}
77002+
77003+void
77004+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77005+ struct dentry *old_dentry,
77006+ struct dentry *new_dentry,
77007+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77008+{
77009+ return;
77010+}
77011+
77012+int
77013+gr_search_socket(const int family, const int type, const int protocol)
77014+{
77015+ return 1;
77016+}
77017+
77018+int
77019+gr_search_connectbind(const int mode, const struct socket *sock,
77020+ const struct sockaddr_in *addr)
77021+{
77022+ return 0;
77023+}
77024+
77025+void
77026+gr_handle_alertkill(struct task_struct *task)
77027+{
77028+ return;
77029+}
77030+
77031+__u32
77032+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77033+{
77034+ return 1;
77035+}
77036+
77037+__u32
77038+gr_acl_handle_hidden_file(const struct dentry * dentry,
77039+ const struct vfsmount * mnt)
77040+{
77041+ return 1;
77042+}
77043+
77044+__u32
77045+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77046+ int acc_mode)
77047+{
77048+ return 1;
77049+}
77050+
77051+__u32
77052+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77053+{
77054+ return 1;
77055+}
77056+
77057+__u32
77058+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77059+{
77060+ return 1;
77061+}
77062+
77063+int
77064+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77065+ unsigned int *vm_flags)
77066+{
77067+ return 1;
77068+}
77069+
77070+__u32
77071+gr_acl_handle_truncate(const struct dentry * dentry,
77072+ const struct vfsmount * mnt)
77073+{
77074+ return 1;
77075+}
77076+
77077+__u32
77078+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77079+{
77080+ return 1;
77081+}
77082+
77083+__u32
77084+gr_acl_handle_access(const struct dentry * dentry,
77085+ const struct vfsmount * mnt, const int fmode)
77086+{
77087+ return 1;
77088+}
77089+
77090+__u32
77091+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77092+ umode_t *mode)
77093+{
77094+ return 1;
77095+}
77096+
77097+__u32
77098+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77099+{
77100+ return 1;
77101+}
77102+
77103+__u32
77104+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77105+{
77106+ return 1;
77107+}
77108+
77109+__u32
77110+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77111+{
77112+ return 1;
77113+}
77114+
77115+void
77116+grsecurity_init(void)
77117+{
77118+ return;
77119+}
77120+
77121+umode_t gr_acl_umask(void)
77122+{
77123+ return 0;
77124+}
77125+
77126+__u32
77127+gr_acl_handle_mknod(const struct dentry * new_dentry,
77128+ const struct dentry * parent_dentry,
77129+ const struct vfsmount * parent_mnt,
77130+ const int mode)
77131+{
77132+ return 1;
77133+}
77134+
77135+__u32
77136+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77137+ const struct dentry * parent_dentry,
77138+ const struct vfsmount * parent_mnt)
77139+{
77140+ return 1;
77141+}
77142+
77143+__u32
77144+gr_acl_handle_symlink(const struct dentry * new_dentry,
77145+ const struct dentry * parent_dentry,
77146+ const struct vfsmount * parent_mnt, const struct filename *from)
77147+{
77148+ return 1;
77149+}
77150+
77151+__u32
77152+gr_acl_handle_link(const struct dentry * new_dentry,
77153+ const struct dentry * parent_dentry,
77154+ const struct vfsmount * parent_mnt,
77155+ const struct dentry * old_dentry,
77156+ const struct vfsmount * old_mnt, const struct filename *to)
77157+{
77158+ return 1;
77159+}
77160+
77161+int
77162+gr_acl_handle_rename(const struct dentry *new_dentry,
77163+ const struct dentry *parent_dentry,
77164+ const struct vfsmount *parent_mnt,
77165+ const struct dentry *old_dentry,
77166+ const struct inode *old_parent_inode,
77167+ const struct vfsmount *old_mnt, const struct filename *newname,
77168+ unsigned int flags)
77169+{
77170+ return 0;
77171+}
77172+
77173+int
77174+gr_acl_handle_filldir(const struct file *file, const char *name,
77175+ const int namelen, const u64 ino)
77176+{
77177+ return 1;
77178+}
77179+
77180+int
77181+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77182+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77183+{
77184+ return 1;
77185+}
77186+
77187+int
77188+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77189+{
77190+ return 0;
77191+}
77192+
77193+int
77194+gr_search_accept(const struct socket *sock)
77195+{
77196+ return 0;
77197+}
77198+
77199+int
77200+gr_search_listen(const struct socket *sock)
77201+{
77202+ return 0;
77203+}
77204+
77205+int
77206+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77207+{
77208+ return 0;
77209+}
77210+
77211+__u32
77212+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77213+{
77214+ return 1;
77215+}
77216+
77217+__u32
77218+gr_acl_handle_creat(const struct dentry * dentry,
77219+ const struct dentry * p_dentry,
77220+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77221+ const int imode)
77222+{
77223+ return 1;
77224+}
77225+
77226+void
77227+gr_acl_handle_exit(void)
77228+{
77229+ return;
77230+}
77231+
77232+int
77233+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77234+{
77235+ return 1;
77236+}
77237+
77238+void
77239+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77240+{
77241+ return;
77242+}
77243+
77244+int
77245+gr_acl_handle_procpidmem(const struct task_struct *task)
77246+{
77247+ return 0;
77248+}
77249+
77250+int
77251+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77252+{
77253+ return 0;
77254+}
77255+
77256+int
77257+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77258+{
77259+ return 0;
77260+}
77261+
77262+int
77263+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77264+{
77265+ return 0;
77266+}
77267+
77268+int
77269+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77270+{
77271+ return 0;
77272+}
77273+
77274+int gr_acl_enable_at_secure(void)
77275+{
77276+ return 0;
77277+}
77278+
77279+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77280+{
77281+ return dentry->d_sb->s_dev;
77282+}
77283+
77284+u64 gr_get_ino_from_dentry(struct dentry *dentry)
77285+{
77286+ return dentry->d_inode->i_ino;
77287+}
77288+
77289+void gr_put_exec_file(struct task_struct *task)
77290+{
77291+ return;
77292+}
77293+
77294+#ifdef CONFIG_SECURITY
77295+EXPORT_SYMBOL_GPL(gr_check_user_change);
77296+EXPORT_SYMBOL_GPL(gr_check_group_change);
77297+#endif
77298diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77299new file mode 100644
77300index 0000000..fb7531e
77301--- /dev/null
77302+++ b/grsecurity/grsec_exec.c
77303@@ -0,0 +1,189 @@
77304+#include <linux/kernel.h>
77305+#include <linux/sched.h>
77306+#include <linux/file.h>
77307+#include <linux/binfmts.h>
77308+#include <linux/fs.h>
77309+#include <linux/types.h>
77310+#include <linux/grdefs.h>
77311+#include <linux/grsecurity.h>
77312+#include <linux/grinternal.h>
77313+#include <linux/capability.h>
77314+#include <linux/module.h>
77315+#include <linux/compat.h>
77316+
77317+#include <asm/uaccess.h>
77318+
77319+#ifdef CONFIG_GRKERNSEC_EXECLOG
77320+static char gr_exec_arg_buf[132];
77321+static DEFINE_MUTEX(gr_exec_arg_mutex);
77322+#endif
77323+
77324+struct user_arg_ptr {
77325+#ifdef CONFIG_COMPAT
77326+ bool is_compat;
77327+#endif
77328+ union {
77329+ const char __user *const __user *native;
77330+#ifdef CONFIG_COMPAT
77331+ const compat_uptr_t __user *compat;
77332+#endif
77333+ } ptr;
77334+};
77335+
77336+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77337+
77338+void
77339+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77340+{
77341+#ifdef CONFIG_GRKERNSEC_EXECLOG
77342+ char *grarg = gr_exec_arg_buf;
77343+ unsigned int i, x, execlen = 0;
77344+ char c;
77345+
77346+ if (!((grsec_enable_execlog && grsec_enable_group &&
77347+ in_group_p(grsec_audit_gid))
77348+ || (grsec_enable_execlog && !grsec_enable_group)))
77349+ return;
77350+
77351+ mutex_lock(&gr_exec_arg_mutex);
77352+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77353+
77354+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77355+ const char __user *p;
77356+ unsigned int len;
77357+
77358+ p = get_user_arg_ptr(argv, i);
77359+ if (IS_ERR(p))
77360+ goto log;
77361+
77362+ len = strnlen_user(p, 128 - execlen);
77363+ if (len > 128 - execlen)
77364+ len = 128 - execlen;
77365+ else if (len > 0)
77366+ len--;
77367+ if (copy_from_user(grarg + execlen, p, len))
77368+ goto log;
77369+
77370+ /* rewrite unprintable characters */
77371+ for (x = 0; x < len; x++) {
77372+ c = *(grarg + execlen + x);
77373+ if (c < 32 || c > 126)
77374+ *(grarg + execlen + x) = ' ';
77375+ }
77376+
77377+ execlen += len;
77378+ *(grarg + execlen) = ' ';
77379+ *(grarg + execlen + 1) = '\0';
77380+ execlen++;
77381+ }
77382+
77383+ log:
77384+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77385+ bprm->file->f_path.mnt, grarg);
77386+ mutex_unlock(&gr_exec_arg_mutex);
77387+#endif
77388+ return;
77389+}
77390+
77391+#ifdef CONFIG_GRKERNSEC
77392+extern int gr_acl_is_capable(const int cap);
77393+extern int gr_acl_is_capable_nolog(const int cap);
77394+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77395+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77396+extern int gr_chroot_is_capable(const int cap);
77397+extern int gr_chroot_is_capable_nolog(const int cap);
77398+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77399+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77400+#endif
77401+
77402+const char *captab_log[] = {
77403+ "CAP_CHOWN",
77404+ "CAP_DAC_OVERRIDE",
77405+ "CAP_DAC_READ_SEARCH",
77406+ "CAP_FOWNER",
77407+ "CAP_FSETID",
77408+ "CAP_KILL",
77409+ "CAP_SETGID",
77410+ "CAP_SETUID",
77411+ "CAP_SETPCAP",
77412+ "CAP_LINUX_IMMUTABLE",
77413+ "CAP_NET_BIND_SERVICE",
77414+ "CAP_NET_BROADCAST",
77415+ "CAP_NET_ADMIN",
77416+ "CAP_NET_RAW",
77417+ "CAP_IPC_LOCK",
77418+ "CAP_IPC_OWNER",
77419+ "CAP_SYS_MODULE",
77420+ "CAP_SYS_RAWIO",
77421+ "CAP_SYS_CHROOT",
77422+ "CAP_SYS_PTRACE",
77423+ "CAP_SYS_PACCT",
77424+ "CAP_SYS_ADMIN",
77425+ "CAP_SYS_BOOT",
77426+ "CAP_SYS_NICE",
77427+ "CAP_SYS_RESOURCE",
77428+ "CAP_SYS_TIME",
77429+ "CAP_SYS_TTY_CONFIG",
77430+ "CAP_MKNOD",
77431+ "CAP_LEASE",
77432+ "CAP_AUDIT_WRITE",
77433+ "CAP_AUDIT_CONTROL",
77434+ "CAP_SETFCAP",
77435+ "CAP_MAC_OVERRIDE",
77436+ "CAP_MAC_ADMIN",
77437+ "CAP_SYSLOG",
77438+ "CAP_WAKE_ALARM",
77439+ "CAP_BLOCK_SUSPEND",
77440+ "CAP_AUDIT_READ"
77441+};
77442+
77443+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77444+
77445+int gr_is_capable(const int cap)
77446+{
77447+#ifdef CONFIG_GRKERNSEC
77448+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77449+ return 1;
77450+ return 0;
77451+#else
77452+ return 1;
77453+#endif
77454+}
77455+
77456+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77457+{
77458+#ifdef CONFIG_GRKERNSEC
77459+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77460+ return 1;
77461+ return 0;
77462+#else
77463+ return 1;
77464+#endif
77465+}
77466+
77467+int gr_is_capable_nolog(const int cap)
77468+{
77469+#ifdef CONFIG_GRKERNSEC
77470+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77471+ return 1;
77472+ return 0;
77473+#else
77474+ return 1;
77475+#endif
77476+}
77477+
77478+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77479+{
77480+#ifdef CONFIG_GRKERNSEC
77481+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77482+ return 1;
77483+ return 0;
77484+#else
77485+ return 1;
77486+#endif
77487+}
77488+
77489+EXPORT_SYMBOL_GPL(gr_is_capable);
77490+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77491+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77492+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77493diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77494new file mode 100644
77495index 0000000..06cc6ea
77496--- /dev/null
77497+++ b/grsecurity/grsec_fifo.c
77498@@ -0,0 +1,24 @@
77499+#include <linux/kernel.h>
77500+#include <linux/sched.h>
77501+#include <linux/fs.h>
77502+#include <linux/file.h>
77503+#include <linux/grinternal.h>
77504+
77505+int
77506+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77507+ const struct dentry *dir, const int flag, const int acc_mode)
77508+{
77509+#ifdef CONFIG_GRKERNSEC_FIFO
77510+ const struct cred *cred = current_cred();
77511+
77512+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77513+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77514+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77515+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77516+ if (!inode_permission(dentry->d_inode, acc_mode))
77517+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77518+ return -EACCES;
77519+ }
77520+#endif
77521+ return 0;
77522+}
77523diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77524new file mode 100644
77525index 0000000..8ca18bf
77526--- /dev/null
77527+++ b/grsecurity/grsec_fork.c
77528@@ -0,0 +1,23 @@
77529+#include <linux/kernel.h>
77530+#include <linux/sched.h>
77531+#include <linux/grsecurity.h>
77532+#include <linux/grinternal.h>
77533+#include <linux/errno.h>
77534+
77535+void
77536+gr_log_forkfail(const int retval)
77537+{
77538+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77539+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77540+ switch (retval) {
77541+ case -EAGAIN:
77542+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77543+ break;
77544+ case -ENOMEM:
77545+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77546+ break;
77547+ }
77548+ }
77549+#endif
77550+ return;
77551+}
77552diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77553new file mode 100644
77554index 0000000..4ed9e7d
77555--- /dev/null
77556+++ b/grsecurity/grsec_init.c
77557@@ -0,0 +1,290 @@
77558+#include <linux/kernel.h>
77559+#include <linux/sched.h>
77560+#include <linux/mm.h>
77561+#include <linux/gracl.h>
77562+#include <linux/slab.h>
77563+#include <linux/vmalloc.h>
77564+#include <linux/percpu.h>
77565+#include <linux/module.h>
77566+
77567+int grsec_enable_ptrace_readexec;
77568+int grsec_enable_setxid;
77569+int grsec_enable_symlinkown;
77570+kgid_t grsec_symlinkown_gid;
77571+int grsec_enable_brute;
77572+int grsec_enable_link;
77573+int grsec_enable_dmesg;
77574+int grsec_enable_harden_ptrace;
77575+int grsec_enable_harden_ipc;
77576+int grsec_enable_fifo;
77577+int grsec_enable_execlog;
77578+int grsec_enable_signal;
77579+int grsec_enable_forkfail;
77580+int grsec_enable_audit_ptrace;
77581+int grsec_enable_time;
77582+int grsec_enable_group;
77583+kgid_t grsec_audit_gid;
77584+int grsec_enable_chdir;
77585+int grsec_enable_mount;
77586+int grsec_enable_rofs;
77587+int grsec_deny_new_usb;
77588+int grsec_enable_chroot_findtask;
77589+int grsec_enable_chroot_mount;
77590+int grsec_enable_chroot_shmat;
77591+int grsec_enable_chroot_fchdir;
77592+int grsec_enable_chroot_double;
77593+int grsec_enable_chroot_pivot;
77594+int grsec_enable_chroot_chdir;
77595+int grsec_enable_chroot_chmod;
77596+int grsec_enable_chroot_mknod;
77597+int grsec_enable_chroot_nice;
77598+int grsec_enable_chroot_execlog;
77599+int grsec_enable_chroot_caps;
77600+int grsec_enable_chroot_rename;
77601+int grsec_enable_chroot_sysctl;
77602+int grsec_enable_chroot_unix;
77603+int grsec_enable_tpe;
77604+kgid_t grsec_tpe_gid;
77605+int grsec_enable_blackhole;
77606+#ifdef CONFIG_IPV6_MODULE
77607+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77608+#endif
77609+int grsec_lastack_retries;
77610+int grsec_enable_tpe_all;
77611+int grsec_enable_tpe_invert;
77612+int grsec_enable_socket_all;
77613+kgid_t grsec_socket_all_gid;
77614+int grsec_enable_socket_client;
77615+kgid_t grsec_socket_client_gid;
77616+int grsec_enable_socket_server;
77617+kgid_t grsec_socket_server_gid;
77618+int grsec_resource_logging;
77619+int grsec_disable_privio;
77620+int grsec_enable_log_rwxmaps;
77621+int grsec_lock;
77622+
77623+DEFINE_SPINLOCK(grsec_alert_lock);
77624+unsigned long grsec_alert_wtime = 0;
77625+unsigned long grsec_alert_fyet = 0;
77626+
77627+DEFINE_SPINLOCK(grsec_audit_lock);
77628+
77629+DEFINE_RWLOCK(grsec_exec_file_lock);
77630+
77631+char *gr_shared_page[4];
77632+
77633+char *gr_alert_log_fmt;
77634+char *gr_audit_log_fmt;
77635+char *gr_alert_log_buf;
77636+char *gr_audit_log_buf;
77637+
77638+extern struct gr_arg *gr_usermode;
77639+extern unsigned char *gr_system_salt;
77640+extern unsigned char *gr_system_sum;
77641+
77642+void __init
77643+grsecurity_init(void)
77644+{
77645+ int j;
77646+ /* create the per-cpu shared pages */
77647+
77648+#ifdef CONFIG_X86
77649+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77650+#endif
77651+
77652+ for (j = 0; j < 4; j++) {
77653+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77654+ if (gr_shared_page[j] == NULL) {
77655+ panic("Unable to allocate grsecurity shared page");
77656+ return;
77657+ }
77658+ }
77659+
77660+ /* allocate log buffers */
77661+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77662+ if (!gr_alert_log_fmt) {
77663+ panic("Unable to allocate grsecurity alert log format buffer");
77664+ return;
77665+ }
77666+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77667+ if (!gr_audit_log_fmt) {
77668+ panic("Unable to allocate grsecurity audit log format buffer");
77669+ return;
77670+ }
77671+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77672+ if (!gr_alert_log_buf) {
77673+ panic("Unable to allocate grsecurity alert log buffer");
77674+ return;
77675+ }
77676+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77677+ if (!gr_audit_log_buf) {
77678+ panic("Unable to allocate grsecurity audit log buffer");
77679+ return;
77680+ }
77681+
77682+ /* allocate memory for authentication structure */
77683+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77684+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77685+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77686+
77687+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77688+ panic("Unable to allocate grsecurity authentication structure");
77689+ return;
77690+ }
77691+
77692+#ifdef CONFIG_GRKERNSEC_IO
77693+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77694+ grsec_disable_privio = 1;
77695+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77696+ grsec_disable_privio = 1;
77697+#else
77698+ grsec_disable_privio = 0;
77699+#endif
77700+#endif
77701+
77702+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77703+ /* for backward compatibility, tpe_invert always defaults to on if
77704+ enabled in the kernel
77705+ */
77706+ grsec_enable_tpe_invert = 1;
77707+#endif
77708+
77709+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77710+#ifndef CONFIG_GRKERNSEC_SYSCTL
77711+ grsec_lock = 1;
77712+#endif
77713+
77714+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77715+ grsec_enable_log_rwxmaps = 1;
77716+#endif
77717+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77718+ grsec_enable_group = 1;
77719+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77720+#endif
77721+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77722+ grsec_enable_ptrace_readexec = 1;
77723+#endif
77724+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77725+ grsec_enable_chdir = 1;
77726+#endif
77727+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77728+ grsec_enable_harden_ptrace = 1;
77729+#endif
77730+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77731+ grsec_enable_harden_ipc = 1;
77732+#endif
77733+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77734+ grsec_enable_mount = 1;
77735+#endif
77736+#ifdef CONFIG_GRKERNSEC_LINK
77737+ grsec_enable_link = 1;
77738+#endif
77739+#ifdef CONFIG_GRKERNSEC_BRUTE
77740+ grsec_enable_brute = 1;
77741+#endif
77742+#ifdef CONFIG_GRKERNSEC_DMESG
77743+ grsec_enable_dmesg = 1;
77744+#endif
77745+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77746+ grsec_enable_blackhole = 1;
77747+ grsec_lastack_retries = 4;
77748+#endif
77749+#ifdef CONFIG_GRKERNSEC_FIFO
77750+ grsec_enable_fifo = 1;
77751+#endif
77752+#ifdef CONFIG_GRKERNSEC_EXECLOG
77753+ grsec_enable_execlog = 1;
77754+#endif
77755+#ifdef CONFIG_GRKERNSEC_SETXID
77756+ grsec_enable_setxid = 1;
77757+#endif
77758+#ifdef CONFIG_GRKERNSEC_SIGNAL
77759+ grsec_enable_signal = 1;
77760+#endif
77761+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77762+ grsec_enable_forkfail = 1;
77763+#endif
77764+#ifdef CONFIG_GRKERNSEC_TIME
77765+ grsec_enable_time = 1;
77766+#endif
77767+#ifdef CONFIG_GRKERNSEC_RESLOG
77768+ grsec_resource_logging = 1;
77769+#endif
77770+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77771+ grsec_enable_chroot_findtask = 1;
77772+#endif
77773+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77774+ grsec_enable_chroot_unix = 1;
77775+#endif
77776+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77777+ grsec_enable_chroot_mount = 1;
77778+#endif
77779+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77780+ grsec_enable_chroot_fchdir = 1;
77781+#endif
77782+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77783+ grsec_enable_chroot_shmat = 1;
77784+#endif
77785+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77786+ grsec_enable_audit_ptrace = 1;
77787+#endif
77788+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77789+ grsec_enable_chroot_double = 1;
77790+#endif
77791+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77792+ grsec_enable_chroot_pivot = 1;
77793+#endif
77794+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77795+ grsec_enable_chroot_chdir = 1;
77796+#endif
77797+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77798+ grsec_enable_chroot_chmod = 1;
77799+#endif
77800+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77801+ grsec_enable_chroot_mknod = 1;
77802+#endif
77803+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77804+ grsec_enable_chroot_nice = 1;
77805+#endif
77806+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77807+ grsec_enable_chroot_execlog = 1;
77808+#endif
77809+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77810+ grsec_enable_chroot_caps = 1;
77811+#endif
77812+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77813+ grsec_enable_chroot_rename = 1;
77814+#endif
77815+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77816+ grsec_enable_chroot_sysctl = 1;
77817+#endif
77818+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77819+ grsec_enable_symlinkown = 1;
77820+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77821+#endif
77822+#ifdef CONFIG_GRKERNSEC_TPE
77823+ grsec_enable_tpe = 1;
77824+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77825+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77826+ grsec_enable_tpe_all = 1;
77827+#endif
77828+#endif
77829+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77830+ grsec_enable_socket_all = 1;
77831+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77832+#endif
77833+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77834+ grsec_enable_socket_client = 1;
77835+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77836+#endif
77837+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77838+ grsec_enable_socket_server = 1;
77839+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77840+#endif
77841+#endif
77842+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77843+ grsec_deny_new_usb = 1;
77844+#endif
77845+
77846+ return;
77847+}
77848diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77849new file mode 100644
77850index 0000000..1773300
77851--- /dev/null
77852+++ b/grsecurity/grsec_ipc.c
77853@@ -0,0 +1,48 @@
77854+#include <linux/kernel.h>
77855+#include <linux/mm.h>
77856+#include <linux/sched.h>
77857+#include <linux/file.h>
77858+#include <linux/ipc.h>
77859+#include <linux/ipc_namespace.h>
77860+#include <linux/grsecurity.h>
77861+#include <linux/grinternal.h>
77862+
77863+int
77864+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77865+{
77866+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77867+ int write;
77868+ int orig_granted_mode;
77869+ kuid_t euid;
77870+ kgid_t egid;
77871+
77872+ if (!grsec_enable_harden_ipc)
77873+ return 1;
77874+
77875+ euid = current_euid();
77876+ egid = current_egid();
77877+
77878+ write = requested_mode & 00002;
77879+ orig_granted_mode = ipcp->mode;
77880+
77881+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77882+ orig_granted_mode >>= 6;
77883+ else {
77884+ /* if likely wrong permissions, lock to user */
77885+ if (orig_granted_mode & 0007)
77886+ orig_granted_mode = 0;
77887+ /* otherwise do a egid-only check */
77888+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77889+ orig_granted_mode >>= 3;
77890+ /* otherwise, no access */
77891+ else
77892+ orig_granted_mode = 0;
77893+ }
77894+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77895+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77896+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77897+ return 0;
77898+ }
77899+#endif
77900+ return 1;
77901+}
77902diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77903new file mode 100644
77904index 0000000..5e05e20
77905--- /dev/null
77906+++ b/grsecurity/grsec_link.c
77907@@ -0,0 +1,58 @@
77908+#include <linux/kernel.h>
77909+#include <linux/sched.h>
77910+#include <linux/fs.h>
77911+#include <linux/file.h>
77912+#include <linux/grinternal.h>
77913+
77914+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77915+{
77916+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77917+ const struct inode *link_inode = link->dentry->d_inode;
77918+
77919+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77920+ /* ignore root-owned links, e.g. /proc/self */
77921+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77922+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77923+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77924+ return 1;
77925+ }
77926+#endif
77927+ return 0;
77928+}
77929+
77930+int
77931+gr_handle_follow_link(const struct inode *parent,
77932+ const struct inode *inode,
77933+ const struct dentry *dentry, const struct vfsmount *mnt)
77934+{
77935+#ifdef CONFIG_GRKERNSEC_LINK
77936+ const struct cred *cred = current_cred();
77937+
77938+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77939+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77940+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77941+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77942+ return -EACCES;
77943+ }
77944+#endif
77945+ return 0;
77946+}
77947+
77948+int
77949+gr_handle_hardlink(const struct dentry *dentry,
77950+ const struct vfsmount *mnt,
77951+ struct inode *inode, const int mode, const struct filename *to)
77952+{
77953+#ifdef CONFIG_GRKERNSEC_LINK
77954+ const struct cred *cred = current_cred();
77955+
77956+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77957+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77958+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77959+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77960+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77961+ return -EPERM;
77962+ }
77963+#endif
77964+ return 0;
77965+}
77966diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77967new file mode 100644
77968index 0000000..dbe0a6b
77969--- /dev/null
77970+++ b/grsecurity/grsec_log.c
77971@@ -0,0 +1,341 @@
77972+#include <linux/kernel.h>
77973+#include <linux/sched.h>
77974+#include <linux/file.h>
77975+#include <linux/tty.h>
77976+#include <linux/fs.h>
77977+#include <linux/mm.h>
77978+#include <linux/grinternal.h>
77979+
77980+#ifdef CONFIG_TREE_PREEMPT_RCU
77981+#define DISABLE_PREEMPT() preempt_disable()
77982+#define ENABLE_PREEMPT() preempt_enable()
77983+#else
77984+#define DISABLE_PREEMPT()
77985+#define ENABLE_PREEMPT()
77986+#endif
77987+
77988+#define BEGIN_LOCKS(x) \
77989+ DISABLE_PREEMPT(); \
77990+ rcu_read_lock(); \
77991+ read_lock(&tasklist_lock); \
77992+ read_lock(&grsec_exec_file_lock); \
77993+ if (x != GR_DO_AUDIT) \
77994+ spin_lock(&grsec_alert_lock); \
77995+ else \
77996+ spin_lock(&grsec_audit_lock)
77997+
77998+#define END_LOCKS(x) \
77999+ if (x != GR_DO_AUDIT) \
78000+ spin_unlock(&grsec_alert_lock); \
78001+ else \
78002+ spin_unlock(&grsec_audit_lock); \
78003+ read_unlock(&grsec_exec_file_lock); \
78004+ read_unlock(&tasklist_lock); \
78005+ rcu_read_unlock(); \
78006+ ENABLE_PREEMPT(); \
78007+ if (x == GR_DONT_AUDIT) \
78008+ gr_handle_alertkill(current)
78009+
78010+enum {
78011+ FLOODING,
78012+ NO_FLOODING
78013+};
78014+
78015+extern char *gr_alert_log_fmt;
78016+extern char *gr_audit_log_fmt;
78017+extern char *gr_alert_log_buf;
78018+extern char *gr_audit_log_buf;
78019+
78020+static int gr_log_start(int audit)
78021+{
78022+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78023+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78024+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78025+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78026+ unsigned long curr_secs = get_seconds();
78027+
78028+ if (audit == GR_DO_AUDIT)
78029+ goto set_fmt;
78030+
78031+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78032+ grsec_alert_wtime = curr_secs;
78033+ grsec_alert_fyet = 0;
78034+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78035+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78036+ grsec_alert_fyet++;
78037+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78038+ grsec_alert_wtime = curr_secs;
78039+ grsec_alert_fyet++;
78040+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78041+ return FLOODING;
78042+ }
78043+ else return FLOODING;
78044+
78045+set_fmt:
78046+#endif
78047+ memset(buf, 0, PAGE_SIZE);
78048+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78049+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78050+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78051+ } else if (current->signal->curr_ip) {
78052+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78053+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78054+ } else if (gr_acl_is_enabled()) {
78055+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78056+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78057+ } else {
78058+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78059+ strcpy(buf, fmt);
78060+ }
78061+
78062+ return NO_FLOODING;
78063+}
78064+
78065+static void gr_log_middle(int audit, const char *msg, va_list ap)
78066+ __attribute__ ((format (printf, 2, 0)));
78067+
78068+static void gr_log_middle(int audit, const char *msg, va_list ap)
78069+{
78070+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78071+ unsigned int len = strlen(buf);
78072+
78073+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78074+
78075+ return;
78076+}
78077+
78078+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78079+ __attribute__ ((format (printf, 2, 3)));
78080+
78081+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78082+{
78083+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78084+ unsigned int len = strlen(buf);
78085+ va_list ap;
78086+
78087+ va_start(ap, msg);
78088+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78089+ va_end(ap);
78090+
78091+ return;
78092+}
78093+
78094+static void gr_log_end(int audit, int append_default)
78095+{
78096+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78097+ if (append_default) {
78098+ struct task_struct *task = current;
78099+ struct task_struct *parent = task->real_parent;
78100+ const struct cred *cred = __task_cred(task);
78101+ const struct cred *pcred = __task_cred(parent);
78102+ unsigned int len = strlen(buf);
78103+
78104+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78105+ }
78106+
78107+ printk("%s\n", buf);
78108+
78109+ return;
78110+}
78111+
78112+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78113+{
78114+ int logtype;
78115+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78116+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78117+ void *voidptr = NULL;
78118+ int num1 = 0, num2 = 0;
78119+ unsigned long ulong1 = 0, ulong2 = 0;
78120+ struct dentry *dentry = NULL;
78121+ struct vfsmount *mnt = NULL;
78122+ struct file *file = NULL;
78123+ struct task_struct *task = NULL;
78124+ struct vm_area_struct *vma = NULL;
78125+ const struct cred *cred, *pcred;
78126+ va_list ap;
78127+
78128+ BEGIN_LOCKS(audit);
78129+ logtype = gr_log_start(audit);
78130+ if (logtype == FLOODING) {
78131+ END_LOCKS(audit);
78132+ return;
78133+ }
78134+ va_start(ap, argtypes);
78135+ switch (argtypes) {
78136+ case GR_TTYSNIFF:
78137+ task = va_arg(ap, struct task_struct *);
78138+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78139+ break;
78140+ case GR_SYSCTL_HIDDEN:
78141+ str1 = va_arg(ap, char *);
78142+ gr_log_middle_varargs(audit, msg, result, str1);
78143+ break;
78144+ case GR_RBAC:
78145+ dentry = va_arg(ap, struct dentry *);
78146+ mnt = va_arg(ap, struct vfsmount *);
78147+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78148+ break;
78149+ case GR_RBAC_STR:
78150+ dentry = va_arg(ap, struct dentry *);
78151+ mnt = va_arg(ap, struct vfsmount *);
78152+ str1 = va_arg(ap, char *);
78153+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78154+ break;
78155+ case GR_STR_RBAC:
78156+ str1 = va_arg(ap, char *);
78157+ dentry = va_arg(ap, struct dentry *);
78158+ mnt = va_arg(ap, struct vfsmount *);
78159+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78160+ break;
78161+ case GR_RBAC_MODE2:
78162+ dentry = va_arg(ap, struct dentry *);
78163+ mnt = va_arg(ap, struct vfsmount *);
78164+ str1 = va_arg(ap, char *);
78165+ str2 = va_arg(ap, char *);
78166+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78167+ break;
78168+ case GR_RBAC_MODE3:
78169+ dentry = va_arg(ap, struct dentry *);
78170+ mnt = va_arg(ap, struct vfsmount *);
78171+ str1 = va_arg(ap, char *);
78172+ str2 = va_arg(ap, char *);
78173+ str3 = va_arg(ap, char *);
78174+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78175+ break;
78176+ case GR_FILENAME:
78177+ dentry = va_arg(ap, struct dentry *);
78178+ mnt = va_arg(ap, struct vfsmount *);
78179+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78180+ break;
78181+ case GR_STR_FILENAME:
78182+ str1 = va_arg(ap, char *);
78183+ dentry = va_arg(ap, struct dentry *);
78184+ mnt = va_arg(ap, struct vfsmount *);
78185+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78186+ break;
78187+ case GR_FILENAME_STR:
78188+ dentry = va_arg(ap, struct dentry *);
78189+ mnt = va_arg(ap, struct vfsmount *);
78190+ str1 = va_arg(ap, char *);
78191+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78192+ break;
78193+ case GR_FILENAME_TWO_INT:
78194+ dentry = va_arg(ap, struct dentry *);
78195+ mnt = va_arg(ap, struct vfsmount *);
78196+ num1 = va_arg(ap, int);
78197+ num2 = va_arg(ap, int);
78198+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78199+ break;
78200+ case GR_FILENAME_TWO_INT_STR:
78201+ dentry = va_arg(ap, struct dentry *);
78202+ mnt = va_arg(ap, struct vfsmount *);
78203+ num1 = va_arg(ap, int);
78204+ num2 = va_arg(ap, int);
78205+ str1 = va_arg(ap, char *);
78206+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78207+ break;
78208+ case GR_TEXTREL:
78209+ file = va_arg(ap, struct file *);
78210+ ulong1 = va_arg(ap, unsigned long);
78211+ ulong2 = va_arg(ap, unsigned long);
78212+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78213+ break;
78214+ case GR_PTRACE:
78215+ task = va_arg(ap, struct task_struct *);
78216+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78217+ break;
78218+ case GR_RESOURCE:
78219+ task = va_arg(ap, struct task_struct *);
78220+ cred = __task_cred(task);
78221+ pcred = __task_cred(task->real_parent);
78222+ ulong1 = va_arg(ap, unsigned long);
78223+ str1 = va_arg(ap, char *);
78224+ ulong2 = va_arg(ap, unsigned long);
78225+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78226+ break;
78227+ case GR_CAP:
78228+ task = va_arg(ap, struct task_struct *);
78229+ cred = __task_cred(task);
78230+ pcred = __task_cred(task->real_parent);
78231+ str1 = va_arg(ap, char *);
78232+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78233+ break;
78234+ case GR_SIG:
78235+ str1 = va_arg(ap, char *);
78236+ voidptr = va_arg(ap, void *);
78237+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78238+ break;
78239+ case GR_SIG2:
78240+ task = va_arg(ap, struct task_struct *);
78241+ cred = __task_cred(task);
78242+ pcred = __task_cred(task->real_parent);
78243+ num1 = va_arg(ap, int);
78244+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78245+ break;
78246+ case GR_CRASH1:
78247+ task = va_arg(ap, struct task_struct *);
78248+ cred = __task_cred(task);
78249+ pcred = __task_cred(task->real_parent);
78250+ ulong1 = va_arg(ap, unsigned long);
78251+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78252+ break;
78253+ case GR_CRASH2:
78254+ task = va_arg(ap, struct task_struct *);
78255+ cred = __task_cred(task);
78256+ pcred = __task_cred(task->real_parent);
78257+ ulong1 = va_arg(ap, unsigned long);
78258+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78259+ break;
78260+ case GR_RWXMAP:
78261+ file = va_arg(ap, struct file *);
78262+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78263+ break;
78264+ case GR_RWXMAPVMA:
78265+ vma = va_arg(ap, struct vm_area_struct *);
78266+ if (vma->vm_file)
78267+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78268+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78269+ str1 = "<stack>";
78270+ else if (vma->vm_start <= current->mm->brk &&
78271+ vma->vm_end >= current->mm->start_brk)
78272+ str1 = "<heap>";
78273+ else
78274+ str1 = "<anonymous mapping>";
78275+ gr_log_middle_varargs(audit, msg, str1);
78276+ break;
78277+ case GR_PSACCT:
78278+ {
78279+ unsigned int wday, cday;
78280+ __u8 whr, chr;
78281+ __u8 wmin, cmin;
78282+ __u8 wsec, csec;
78283+ char cur_tty[64] = { 0 };
78284+ char parent_tty[64] = { 0 };
78285+
78286+ task = va_arg(ap, struct task_struct *);
78287+ wday = va_arg(ap, unsigned int);
78288+ cday = va_arg(ap, unsigned int);
78289+ whr = va_arg(ap, int);
78290+ chr = va_arg(ap, int);
78291+ wmin = va_arg(ap, int);
78292+ cmin = va_arg(ap, int);
78293+ wsec = va_arg(ap, int);
78294+ csec = va_arg(ap, int);
78295+ ulong1 = va_arg(ap, unsigned long);
78296+ cred = __task_cred(task);
78297+ pcred = __task_cred(task->real_parent);
78298+
78299+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78300+ }
78301+ break;
78302+ default:
78303+ gr_log_middle(audit, msg, ap);
78304+ }
78305+ va_end(ap);
78306+ // these don't need DEFAULTSECARGS printed on the end
78307+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78308+ gr_log_end(audit, 0);
78309+ else
78310+ gr_log_end(audit, 1);
78311+ END_LOCKS(audit);
78312+}
78313diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78314new file mode 100644
78315index 0000000..0e39d8c
78316--- /dev/null
78317+++ b/grsecurity/grsec_mem.c
78318@@ -0,0 +1,48 @@
78319+#include <linux/kernel.h>
78320+#include <linux/sched.h>
78321+#include <linux/mm.h>
78322+#include <linux/mman.h>
78323+#include <linux/module.h>
78324+#include <linux/grinternal.h>
78325+
78326+void gr_handle_msr_write(void)
78327+{
78328+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78329+ return;
78330+}
78331+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78332+
78333+void
78334+gr_handle_ioperm(void)
78335+{
78336+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78337+ return;
78338+}
78339+
78340+void
78341+gr_handle_iopl(void)
78342+{
78343+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78344+ return;
78345+}
78346+
78347+void
78348+gr_handle_mem_readwrite(u64 from, u64 to)
78349+{
78350+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78351+ return;
78352+}
78353+
78354+void
78355+gr_handle_vm86(void)
78356+{
78357+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78358+ return;
78359+}
78360+
78361+void
78362+gr_log_badprocpid(const char *entry)
78363+{
78364+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78365+ return;
78366+}
78367diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78368new file mode 100644
78369index 0000000..6f9eb73
78370--- /dev/null
78371+++ b/grsecurity/grsec_mount.c
78372@@ -0,0 +1,65 @@
78373+#include <linux/kernel.h>
78374+#include <linux/sched.h>
78375+#include <linux/mount.h>
78376+#include <linux/major.h>
78377+#include <linux/grsecurity.h>
78378+#include <linux/grinternal.h>
78379+
78380+void
78381+gr_log_remount(const char *devname, const int retval)
78382+{
78383+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78384+ if (grsec_enable_mount && (retval >= 0))
78385+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78386+#endif
78387+ return;
78388+}
78389+
78390+void
78391+gr_log_unmount(const char *devname, const int retval)
78392+{
78393+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78394+ if (grsec_enable_mount && (retval >= 0))
78395+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78396+#endif
78397+ return;
78398+}
78399+
78400+void
78401+gr_log_mount(const char *from, struct path *to, const int retval)
78402+{
78403+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78404+ if (grsec_enable_mount && (retval >= 0))
78405+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
78406+#endif
78407+ return;
78408+}
78409+
78410+int
78411+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78412+{
78413+#ifdef CONFIG_GRKERNSEC_ROFS
78414+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78415+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78416+ return -EPERM;
78417+ } else
78418+ return 0;
78419+#endif
78420+ return 0;
78421+}
78422+
78423+int
78424+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78425+{
78426+#ifdef CONFIG_GRKERNSEC_ROFS
78427+ struct inode *inode = dentry->d_inode;
78428+
78429+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78430+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78431+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78432+ return -EPERM;
78433+ } else
78434+ return 0;
78435+#endif
78436+ return 0;
78437+}
78438diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78439new file mode 100644
78440index 0000000..6ee9d50
78441--- /dev/null
78442+++ b/grsecurity/grsec_pax.c
78443@@ -0,0 +1,45 @@
78444+#include <linux/kernel.h>
78445+#include <linux/sched.h>
78446+#include <linux/mm.h>
78447+#include <linux/file.h>
78448+#include <linux/grinternal.h>
78449+#include <linux/grsecurity.h>
78450+
78451+void
78452+gr_log_textrel(struct vm_area_struct * vma)
78453+{
78454+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78455+ if (grsec_enable_log_rwxmaps)
78456+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78457+#endif
78458+ return;
78459+}
78460+
78461+void gr_log_ptgnustack(struct file *file)
78462+{
78463+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78464+ if (grsec_enable_log_rwxmaps)
78465+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78466+#endif
78467+ return;
78468+}
78469+
78470+void
78471+gr_log_rwxmmap(struct file *file)
78472+{
78473+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78474+ if (grsec_enable_log_rwxmaps)
78475+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78476+#endif
78477+ return;
78478+}
78479+
78480+void
78481+gr_log_rwxmprotect(struct vm_area_struct *vma)
78482+{
78483+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78484+ if (grsec_enable_log_rwxmaps)
78485+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78486+#endif
78487+ return;
78488+}
78489diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78490new file mode 100644
78491index 0000000..2005a3a
78492--- /dev/null
78493+++ b/grsecurity/grsec_proc.c
78494@@ -0,0 +1,20 @@
78495+#include <linux/kernel.h>
78496+#include <linux/sched.h>
78497+#include <linux/grsecurity.h>
78498+#include <linux/grinternal.h>
78499+
78500+int gr_proc_is_restricted(void)
78501+{
78502+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78503+ const struct cred *cred = current_cred();
78504+#endif
78505+
78506+#ifdef CONFIG_GRKERNSEC_PROC_USER
78507+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78508+ return -EACCES;
78509+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78510+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78511+ return -EACCES;
78512+#endif
78513+ return 0;
78514+}
78515diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78516new file mode 100644
78517index 0000000..f7f29aa
78518--- /dev/null
78519+++ b/grsecurity/grsec_ptrace.c
78520@@ -0,0 +1,30 @@
78521+#include <linux/kernel.h>
78522+#include <linux/sched.h>
78523+#include <linux/grinternal.h>
78524+#include <linux/security.h>
78525+
78526+void
78527+gr_audit_ptrace(struct task_struct *task)
78528+{
78529+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78530+ if (grsec_enable_audit_ptrace)
78531+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78532+#endif
78533+ return;
78534+}
78535+
78536+int
78537+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78538+{
78539+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78540+ const struct dentry *dentry = file->f_path.dentry;
78541+ const struct vfsmount *mnt = file->f_path.mnt;
78542+
78543+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78544+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78545+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78546+ return -EACCES;
78547+ }
78548+#endif
78549+ return 0;
78550+}
78551diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78552new file mode 100644
78553index 0000000..3860c7e
78554--- /dev/null
78555+++ b/grsecurity/grsec_sig.c
78556@@ -0,0 +1,236 @@
78557+#include <linux/kernel.h>
78558+#include <linux/sched.h>
78559+#include <linux/fs.h>
78560+#include <linux/delay.h>
78561+#include <linux/grsecurity.h>
78562+#include <linux/grinternal.h>
78563+#include <linux/hardirq.h>
78564+
78565+char *signames[] = {
78566+ [SIGSEGV] = "Segmentation fault",
78567+ [SIGILL] = "Illegal instruction",
78568+ [SIGABRT] = "Abort",
78569+ [SIGBUS] = "Invalid alignment/Bus error"
78570+};
78571+
78572+void
78573+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78574+{
78575+#ifdef CONFIG_GRKERNSEC_SIGNAL
78576+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78577+ (sig == SIGABRT) || (sig == SIGBUS))) {
78578+ if (task_pid_nr(t) == task_pid_nr(current)) {
78579+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78580+ } else {
78581+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78582+ }
78583+ }
78584+#endif
78585+ return;
78586+}
78587+
78588+int
78589+gr_handle_signal(const struct task_struct *p, const int sig)
78590+{
78591+#ifdef CONFIG_GRKERNSEC
78592+ /* ignore the 0 signal for protected task checks */
78593+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78594+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78595+ return -EPERM;
78596+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78597+ return -EPERM;
78598+ }
78599+#endif
78600+ return 0;
78601+}
78602+
78603+#ifdef CONFIG_GRKERNSEC
78604+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78605+
78606+int gr_fake_force_sig(int sig, struct task_struct *t)
78607+{
78608+ unsigned long int flags;
78609+ int ret, blocked, ignored;
78610+ struct k_sigaction *action;
78611+
78612+ spin_lock_irqsave(&t->sighand->siglock, flags);
78613+ action = &t->sighand->action[sig-1];
78614+ ignored = action->sa.sa_handler == SIG_IGN;
78615+ blocked = sigismember(&t->blocked, sig);
78616+ if (blocked || ignored) {
78617+ action->sa.sa_handler = SIG_DFL;
78618+ if (blocked) {
78619+ sigdelset(&t->blocked, sig);
78620+ recalc_sigpending_and_wake(t);
78621+ }
78622+ }
78623+ if (action->sa.sa_handler == SIG_DFL)
78624+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78625+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78626+
78627+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78628+
78629+ return ret;
78630+}
78631+#endif
78632+
78633+#define GR_USER_BAN_TIME (15 * 60)
78634+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78635+
78636+void gr_handle_brute_attach(int dumpable)
78637+{
78638+#ifdef CONFIG_GRKERNSEC_BRUTE
78639+ struct task_struct *p = current;
78640+ kuid_t uid = GLOBAL_ROOT_UID;
78641+ int daemon = 0;
78642+
78643+ if (!grsec_enable_brute)
78644+ return;
78645+
78646+ rcu_read_lock();
78647+ read_lock(&tasklist_lock);
78648+ read_lock(&grsec_exec_file_lock);
78649+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78650+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78651+ p->real_parent->brute = 1;
78652+ daemon = 1;
78653+ } else {
78654+ const struct cred *cred = __task_cred(p), *cred2;
78655+ struct task_struct *tsk, *tsk2;
78656+
78657+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78658+ struct user_struct *user;
78659+
78660+ uid = cred->uid;
78661+
78662+ /* this is put upon execution past expiration */
78663+ user = find_user(uid);
78664+ if (user == NULL)
78665+ goto unlock;
78666+ user->suid_banned = 1;
78667+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78668+ if (user->suid_ban_expires == ~0UL)
78669+ user->suid_ban_expires--;
78670+
78671+ /* only kill other threads of the same binary, from the same user */
78672+ do_each_thread(tsk2, tsk) {
78673+ cred2 = __task_cred(tsk);
78674+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78675+ gr_fake_force_sig(SIGKILL, tsk);
78676+ } while_each_thread(tsk2, tsk);
78677+ }
78678+ }
78679+unlock:
78680+ read_unlock(&grsec_exec_file_lock);
78681+ read_unlock(&tasklist_lock);
78682+ rcu_read_unlock();
78683+
78684+ if (gr_is_global_nonroot(uid))
78685+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78686+ else if (daemon)
78687+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78688+
78689+#endif
78690+ return;
78691+}
78692+
78693+void gr_handle_brute_check(void)
78694+{
78695+#ifdef CONFIG_GRKERNSEC_BRUTE
78696+ struct task_struct *p = current;
78697+
78698+ if (unlikely(p->brute)) {
78699+ if (!grsec_enable_brute)
78700+ p->brute = 0;
78701+ else if (time_before(get_seconds(), p->brute_expires))
78702+ msleep(30 * 1000);
78703+ }
78704+#endif
78705+ return;
78706+}
78707+
78708+void gr_handle_kernel_exploit(void)
78709+{
78710+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78711+ const struct cred *cred;
78712+ struct task_struct *tsk, *tsk2;
78713+ struct user_struct *user;
78714+ kuid_t uid;
78715+
78716+ if (in_irq() || in_serving_softirq() || in_nmi())
78717+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78718+
78719+ uid = current_uid();
78720+
78721+ if (gr_is_global_root(uid))
78722+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78723+ else {
78724+ /* kill all the processes of this user, hold a reference
78725+ to their creds struct, and prevent them from creating
78726+ another process until system reset
78727+ */
78728+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78729+ GR_GLOBAL_UID(uid));
78730+ /* we intentionally leak this ref */
78731+ user = get_uid(current->cred->user);
78732+ if (user)
78733+ user->kernel_banned = 1;
78734+
78735+ /* kill all processes of this user */
78736+ read_lock(&tasklist_lock);
78737+ do_each_thread(tsk2, tsk) {
78738+ cred = __task_cred(tsk);
78739+ if (uid_eq(cred->uid, uid))
78740+ gr_fake_force_sig(SIGKILL, tsk);
78741+ } while_each_thread(tsk2, tsk);
78742+ read_unlock(&tasklist_lock);
78743+ }
78744+#endif
78745+}
78746+
78747+#ifdef CONFIG_GRKERNSEC_BRUTE
78748+static bool suid_ban_expired(struct user_struct *user)
78749+{
78750+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78751+ user->suid_banned = 0;
78752+ user->suid_ban_expires = 0;
78753+ free_uid(user);
78754+ return true;
78755+ }
78756+
78757+ return false;
78758+}
78759+#endif
78760+
78761+int gr_process_kernel_exec_ban(void)
78762+{
78763+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78764+ if (unlikely(current->cred->user->kernel_banned))
78765+ return -EPERM;
78766+#endif
78767+ return 0;
78768+}
78769+
78770+int gr_process_kernel_setuid_ban(struct user_struct *user)
78771+{
78772+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78773+ if (unlikely(user->kernel_banned))
78774+ gr_fake_force_sig(SIGKILL, current);
78775+#endif
78776+ return 0;
78777+}
78778+
78779+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78780+{
78781+#ifdef CONFIG_GRKERNSEC_BRUTE
78782+ struct user_struct *user = current->cred->user;
78783+ if (unlikely(user->suid_banned)) {
78784+ if (suid_ban_expired(user))
78785+ return 0;
78786+ /* disallow execution of suid binaries only */
78787+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78788+ return -EPERM;
78789+ }
78790+#endif
78791+ return 0;
78792+}
78793diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78794new file mode 100644
78795index 0000000..e3650b6
78796--- /dev/null
78797+++ b/grsecurity/grsec_sock.c
78798@@ -0,0 +1,244 @@
78799+#include <linux/kernel.h>
78800+#include <linux/module.h>
78801+#include <linux/sched.h>
78802+#include <linux/file.h>
78803+#include <linux/net.h>
78804+#include <linux/in.h>
78805+#include <linux/ip.h>
78806+#include <net/sock.h>
78807+#include <net/inet_sock.h>
78808+#include <linux/grsecurity.h>
78809+#include <linux/grinternal.h>
78810+#include <linux/gracl.h>
78811+
78812+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78813+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78814+
78815+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78816+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78817+
78818+#ifdef CONFIG_UNIX_MODULE
78819+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78820+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78821+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78822+EXPORT_SYMBOL_GPL(gr_handle_create);
78823+#endif
78824+
78825+#ifdef CONFIG_GRKERNSEC
78826+#define gr_conn_table_size 32749
78827+struct conn_table_entry {
78828+ struct conn_table_entry *next;
78829+ struct signal_struct *sig;
78830+};
78831+
78832+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78833+DEFINE_SPINLOCK(gr_conn_table_lock);
78834+
78835+extern const char * gr_socktype_to_name(unsigned char type);
78836+extern const char * gr_proto_to_name(unsigned char proto);
78837+extern const char * gr_sockfamily_to_name(unsigned char family);
78838+
78839+static __inline__ int
78840+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78841+{
78842+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78843+}
78844+
78845+static __inline__ int
78846+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78847+ __u16 sport, __u16 dport)
78848+{
78849+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78850+ sig->gr_sport == sport && sig->gr_dport == dport))
78851+ return 1;
78852+ else
78853+ return 0;
78854+}
78855+
78856+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78857+{
78858+ struct conn_table_entry **match;
78859+ unsigned int index;
78860+
78861+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78862+ sig->gr_sport, sig->gr_dport,
78863+ gr_conn_table_size);
78864+
78865+ newent->sig = sig;
78866+
78867+ match = &gr_conn_table[index];
78868+ newent->next = *match;
78869+ *match = newent;
78870+
78871+ return;
78872+}
78873+
78874+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78875+{
78876+ struct conn_table_entry *match, *last = NULL;
78877+ unsigned int index;
78878+
78879+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78880+ sig->gr_sport, sig->gr_dport,
78881+ gr_conn_table_size);
78882+
78883+ match = gr_conn_table[index];
78884+ while (match && !conn_match(match->sig,
78885+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78886+ sig->gr_dport)) {
78887+ last = match;
78888+ match = match->next;
78889+ }
78890+
78891+ if (match) {
78892+ if (last)
78893+ last->next = match->next;
78894+ else
78895+ gr_conn_table[index] = NULL;
78896+ kfree(match);
78897+ }
78898+
78899+ return;
78900+}
78901+
78902+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78903+ __u16 sport, __u16 dport)
78904+{
78905+ struct conn_table_entry *match;
78906+ unsigned int index;
78907+
78908+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78909+
78910+ match = gr_conn_table[index];
78911+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78912+ match = match->next;
78913+
78914+ if (match)
78915+ return match->sig;
78916+ else
78917+ return NULL;
78918+}
78919+
78920+#endif
78921+
78922+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78923+{
78924+#ifdef CONFIG_GRKERNSEC
78925+ struct signal_struct *sig = current->signal;
78926+ struct conn_table_entry *newent;
78927+
78928+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78929+ if (newent == NULL)
78930+ return;
78931+ /* no bh lock needed since we are called with bh disabled */
78932+ spin_lock(&gr_conn_table_lock);
78933+ gr_del_task_from_ip_table_nolock(sig);
78934+ sig->gr_saddr = inet->inet_rcv_saddr;
78935+ sig->gr_daddr = inet->inet_daddr;
78936+ sig->gr_sport = inet->inet_sport;
78937+ sig->gr_dport = inet->inet_dport;
78938+ gr_add_to_task_ip_table_nolock(sig, newent);
78939+ spin_unlock(&gr_conn_table_lock);
78940+#endif
78941+ return;
78942+}
78943+
78944+void gr_del_task_from_ip_table(struct task_struct *task)
78945+{
78946+#ifdef CONFIG_GRKERNSEC
78947+ spin_lock_bh(&gr_conn_table_lock);
78948+ gr_del_task_from_ip_table_nolock(task->signal);
78949+ spin_unlock_bh(&gr_conn_table_lock);
78950+#endif
78951+ return;
78952+}
78953+
78954+void
78955+gr_attach_curr_ip(const struct sock *sk)
78956+{
78957+#ifdef CONFIG_GRKERNSEC
78958+ struct signal_struct *p, *set;
78959+ const struct inet_sock *inet = inet_sk(sk);
78960+
78961+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78962+ return;
78963+
78964+ set = current->signal;
78965+
78966+ spin_lock_bh(&gr_conn_table_lock);
78967+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78968+ inet->inet_dport, inet->inet_sport);
78969+ if (unlikely(p != NULL)) {
78970+ set->curr_ip = p->curr_ip;
78971+ set->used_accept = 1;
78972+ gr_del_task_from_ip_table_nolock(p);
78973+ spin_unlock_bh(&gr_conn_table_lock);
78974+ return;
78975+ }
78976+ spin_unlock_bh(&gr_conn_table_lock);
78977+
78978+ set->curr_ip = inet->inet_daddr;
78979+ set->used_accept = 1;
78980+#endif
78981+ return;
78982+}
78983+
78984+int
78985+gr_handle_sock_all(const int family, const int type, const int protocol)
78986+{
78987+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78988+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78989+ (family != AF_UNIX)) {
78990+ if (family == AF_INET)
78991+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78992+ else
78993+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78994+ return -EACCES;
78995+ }
78996+#endif
78997+ return 0;
78998+}
78999+
79000+int
79001+gr_handle_sock_server(const struct sockaddr *sck)
79002+{
79003+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79004+ if (grsec_enable_socket_server &&
79005+ in_group_p(grsec_socket_server_gid) &&
79006+ sck && (sck->sa_family != AF_UNIX) &&
79007+ (sck->sa_family != AF_LOCAL)) {
79008+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79009+ return -EACCES;
79010+ }
79011+#endif
79012+ return 0;
79013+}
79014+
79015+int
79016+gr_handle_sock_server_other(const struct sock *sck)
79017+{
79018+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79019+ if (grsec_enable_socket_server &&
79020+ in_group_p(grsec_socket_server_gid) &&
79021+ sck && (sck->sk_family != AF_UNIX) &&
79022+ (sck->sk_family != AF_LOCAL)) {
79023+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79024+ return -EACCES;
79025+ }
79026+#endif
79027+ return 0;
79028+}
79029+
79030+int
79031+gr_handle_sock_client(const struct sockaddr *sck)
79032+{
79033+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79034+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79035+ sck && (sck->sa_family != AF_UNIX) &&
79036+ (sck->sa_family != AF_LOCAL)) {
79037+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79038+ return -EACCES;
79039+ }
79040+#endif
79041+ return 0;
79042+}
79043diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79044new file mode 100644
79045index 0000000..cce889e
79046--- /dev/null
79047+++ b/grsecurity/grsec_sysctl.c
79048@@ -0,0 +1,488 @@
79049+#include <linux/kernel.h>
79050+#include <linux/sched.h>
79051+#include <linux/sysctl.h>
79052+#include <linux/grsecurity.h>
79053+#include <linux/grinternal.h>
79054+
79055+int
79056+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79057+{
79058+#ifdef CONFIG_GRKERNSEC_SYSCTL
79059+ if (dirname == NULL || name == NULL)
79060+ return 0;
79061+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79062+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79063+ return -EACCES;
79064+ }
79065+#endif
79066+ return 0;
79067+}
79068+
79069+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79070+static int __maybe_unused __read_only one = 1;
79071+#endif
79072+
79073+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79074+ defined(CONFIG_GRKERNSEC_DENYUSB)
79075+struct ctl_table grsecurity_table[] = {
79076+#ifdef CONFIG_GRKERNSEC_SYSCTL
79077+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79078+#ifdef CONFIG_GRKERNSEC_IO
79079+ {
79080+ .procname = "disable_priv_io",
79081+ .data = &grsec_disable_privio,
79082+ .maxlen = sizeof(int),
79083+ .mode = 0600,
79084+ .proc_handler = &proc_dointvec,
79085+ },
79086+#endif
79087+#endif
79088+#ifdef CONFIG_GRKERNSEC_LINK
79089+ {
79090+ .procname = "linking_restrictions",
79091+ .data = &grsec_enable_link,
79092+ .maxlen = sizeof(int),
79093+ .mode = 0600,
79094+ .proc_handler = &proc_dointvec,
79095+ },
79096+#endif
79097+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79098+ {
79099+ .procname = "enforce_symlinksifowner",
79100+ .data = &grsec_enable_symlinkown,
79101+ .maxlen = sizeof(int),
79102+ .mode = 0600,
79103+ .proc_handler = &proc_dointvec,
79104+ },
79105+ {
79106+ .procname = "symlinkown_gid",
79107+ .data = &grsec_symlinkown_gid,
79108+ .maxlen = sizeof(int),
79109+ .mode = 0600,
79110+ .proc_handler = &proc_dointvec,
79111+ },
79112+#endif
79113+#ifdef CONFIG_GRKERNSEC_BRUTE
79114+ {
79115+ .procname = "deter_bruteforce",
79116+ .data = &grsec_enable_brute,
79117+ .maxlen = sizeof(int),
79118+ .mode = 0600,
79119+ .proc_handler = &proc_dointvec,
79120+ },
79121+#endif
79122+#ifdef CONFIG_GRKERNSEC_FIFO
79123+ {
79124+ .procname = "fifo_restrictions",
79125+ .data = &grsec_enable_fifo,
79126+ .maxlen = sizeof(int),
79127+ .mode = 0600,
79128+ .proc_handler = &proc_dointvec,
79129+ },
79130+#endif
79131+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79132+ {
79133+ .procname = "ptrace_readexec",
79134+ .data = &grsec_enable_ptrace_readexec,
79135+ .maxlen = sizeof(int),
79136+ .mode = 0600,
79137+ .proc_handler = &proc_dointvec,
79138+ },
79139+#endif
79140+#ifdef CONFIG_GRKERNSEC_SETXID
79141+ {
79142+ .procname = "consistent_setxid",
79143+ .data = &grsec_enable_setxid,
79144+ .maxlen = sizeof(int),
79145+ .mode = 0600,
79146+ .proc_handler = &proc_dointvec,
79147+ },
79148+#endif
79149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79150+ {
79151+ .procname = "ip_blackhole",
79152+ .data = &grsec_enable_blackhole,
79153+ .maxlen = sizeof(int),
79154+ .mode = 0600,
79155+ .proc_handler = &proc_dointvec,
79156+ },
79157+ {
79158+ .procname = "lastack_retries",
79159+ .data = &grsec_lastack_retries,
79160+ .maxlen = sizeof(int),
79161+ .mode = 0600,
79162+ .proc_handler = &proc_dointvec,
79163+ },
79164+#endif
79165+#ifdef CONFIG_GRKERNSEC_EXECLOG
79166+ {
79167+ .procname = "exec_logging",
79168+ .data = &grsec_enable_execlog,
79169+ .maxlen = sizeof(int),
79170+ .mode = 0600,
79171+ .proc_handler = &proc_dointvec,
79172+ },
79173+#endif
79174+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79175+ {
79176+ .procname = "rwxmap_logging",
79177+ .data = &grsec_enable_log_rwxmaps,
79178+ .maxlen = sizeof(int),
79179+ .mode = 0600,
79180+ .proc_handler = &proc_dointvec,
79181+ },
79182+#endif
79183+#ifdef CONFIG_GRKERNSEC_SIGNAL
79184+ {
79185+ .procname = "signal_logging",
79186+ .data = &grsec_enable_signal,
79187+ .maxlen = sizeof(int),
79188+ .mode = 0600,
79189+ .proc_handler = &proc_dointvec,
79190+ },
79191+#endif
79192+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79193+ {
79194+ .procname = "forkfail_logging",
79195+ .data = &grsec_enable_forkfail,
79196+ .maxlen = sizeof(int),
79197+ .mode = 0600,
79198+ .proc_handler = &proc_dointvec,
79199+ },
79200+#endif
79201+#ifdef CONFIG_GRKERNSEC_TIME
79202+ {
79203+ .procname = "timechange_logging",
79204+ .data = &grsec_enable_time,
79205+ .maxlen = sizeof(int),
79206+ .mode = 0600,
79207+ .proc_handler = &proc_dointvec,
79208+ },
79209+#endif
79210+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79211+ {
79212+ .procname = "chroot_deny_shmat",
79213+ .data = &grsec_enable_chroot_shmat,
79214+ .maxlen = sizeof(int),
79215+ .mode = 0600,
79216+ .proc_handler = &proc_dointvec,
79217+ },
79218+#endif
79219+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79220+ {
79221+ .procname = "chroot_deny_unix",
79222+ .data = &grsec_enable_chroot_unix,
79223+ .maxlen = sizeof(int),
79224+ .mode = 0600,
79225+ .proc_handler = &proc_dointvec,
79226+ },
79227+#endif
79228+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79229+ {
79230+ .procname = "chroot_deny_mount",
79231+ .data = &grsec_enable_chroot_mount,
79232+ .maxlen = sizeof(int),
79233+ .mode = 0600,
79234+ .proc_handler = &proc_dointvec,
79235+ },
79236+#endif
79237+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79238+ {
79239+ .procname = "chroot_deny_fchdir",
79240+ .data = &grsec_enable_chroot_fchdir,
79241+ .maxlen = sizeof(int),
79242+ .mode = 0600,
79243+ .proc_handler = &proc_dointvec,
79244+ },
79245+#endif
79246+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79247+ {
79248+ .procname = "chroot_deny_chroot",
79249+ .data = &grsec_enable_chroot_double,
79250+ .maxlen = sizeof(int),
79251+ .mode = 0600,
79252+ .proc_handler = &proc_dointvec,
79253+ },
79254+#endif
79255+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79256+ {
79257+ .procname = "chroot_deny_pivot",
79258+ .data = &grsec_enable_chroot_pivot,
79259+ .maxlen = sizeof(int),
79260+ .mode = 0600,
79261+ .proc_handler = &proc_dointvec,
79262+ },
79263+#endif
79264+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79265+ {
79266+ .procname = "chroot_enforce_chdir",
79267+ .data = &grsec_enable_chroot_chdir,
79268+ .maxlen = sizeof(int),
79269+ .mode = 0600,
79270+ .proc_handler = &proc_dointvec,
79271+ },
79272+#endif
79273+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79274+ {
79275+ .procname = "chroot_deny_chmod",
79276+ .data = &grsec_enable_chroot_chmod,
79277+ .maxlen = sizeof(int),
79278+ .mode = 0600,
79279+ .proc_handler = &proc_dointvec,
79280+ },
79281+#endif
79282+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79283+ {
79284+ .procname = "chroot_deny_mknod",
79285+ .data = &grsec_enable_chroot_mknod,
79286+ .maxlen = sizeof(int),
79287+ .mode = 0600,
79288+ .proc_handler = &proc_dointvec,
79289+ },
79290+#endif
79291+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79292+ {
79293+ .procname = "chroot_restrict_nice",
79294+ .data = &grsec_enable_chroot_nice,
79295+ .maxlen = sizeof(int),
79296+ .mode = 0600,
79297+ .proc_handler = &proc_dointvec,
79298+ },
79299+#endif
79300+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79301+ {
79302+ .procname = "chroot_execlog",
79303+ .data = &grsec_enable_chroot_execlog,
79304+ .maxlen = sizeof(int),
79305+ .mode = 0600,
79306+ .proc_handler = &proc_dointvec,
79307+ },
79308+#endif
79309+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79310+ {
79311+ .procname = "chroot_caps",
79312+ .data = &grsec_enable_chroot_caps,
79313+ .maxlen = sizeof(int),
79314+ .mode = 0600,
79315+ .proc_handler = &proc_dointvec,
79316+ },
79317+#endif
79318+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
79319+ {
79320+ .procname = "chroot_deny_bad_rename",
79321+ .data = &grsec_enable_chroot_rename,
79322+ .maxlen = sizeof(int),
79323+ .mode = 0600,
79324+ .proc_handler = &proc_dointvec,
79325+ },
79326+#endif
79327+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79328+ {
79329+ .procname = "chroot_deny_sysctl",
79330+ .data = &grsec_enable_chroot_sysctl,
79331+ .maxlen = sizeof(int),
79332+ .mode = 0600,
79333+ .proc_handler = &proc_dointvec,
79334+ },
79335+#endif
79336+#ifdef CONFIG_GRKERNSEC_TPE
79337+ {
79338+ .procname = "tpe",
79339+ .data = &grsec_enable_tpe,
79340+ .maxlen = sizeof(int),
79341+ .mode = 0600,
79342+ .proc_handler = &proc_dointvec,
79343+ },
79344+ {
79345+ .procname = "tpe_gid",
79346+ .data = &grsec_tpe_gid,
79347+ .maxlen = sizeof(int),
79348+ .mode = 0600,
79349+ .proc_handler = &proc_dointvec,
79350+ },
79351+#endif
79352+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79353+ {
79354+ .procname = "tpe_invert",
79355+ .data = &grsec_enable_tpe_invert,
79356+ .maxlen = sizeof(int),
79357+ .mode = 0600,
79358+ .proc_handler = &proc_dointvec,
79359+ },
79360+#endif
79361+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79362+ {
79363+ .procname = "tpe_restrict_all",
79364+ .data = &grsec_enable_tpe_all,
79365+ .maxlen = sizeof(int),
79366+ .mode = 0600,
79367+ .proc_handler = &proc_dointvec,
79368+ },
79369+#endif
79370+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79371+ {
79372+ .procname = "socket_all",
79373+ .data = &grsec_enable_socket_all,
79374+ .maxlen = sizeof(int),
79375+ .mode = 0600,
79376+ .proc_handler = &proc_dointvec,
79377+ },
79378+ {
79379+ .procname = "socket_all_gid",
79380+ .data = &grsec_socket_all_gid,
79381+ .maxlen = sizeof(int),
79382+ .mode = 0600,
79383+ .proc_handler = &proc_dointvec,
79384+ },
79385+#endif
79386+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79387+ {
79388+ .procname = "socket_client",
79389+ .data = &grsec_enable_socket_client,
79390+ .maxlen = sizeof(int),
79391+ .mode = 0600,
79392+ .proc_handler = &proc_dointvec,
79393+ },
79394+ {
79395+ .procname = "socket_client_gid",
79396+ .data = &grsec_socket_client_gid,
79397+ .maxlen = sizeof(int),
79398+ .mode = 0600,
79399+ .proc_handler = &proc_dointvec,
79400+ },
79401+#endif
79402+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79403+ {
79404+ .procname = "socket_server",
79405+ .data = &grsec_enable_socket_server,
79406+ .maxlen = sizeof(int),
79407+ .mode = 0600,
79408+ .proc_handler = &proc_dointvec,
79409+ },
79410+ {
79411+ .procname = "socket_server_gid",
79412+ .data = &grsec_socket_server_gid,
79413+ .maxlen = sizeof(int),
79414+ .mode = 0600,
79415+ .proc_handler = &proc_dointvec,
79416+ },
79417+#endif
79418+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79419+ {
79420+ .procname = "audit_group",
79421+ .data = &grsec_enable_group,
79422+ .maxlen = sizeof(int),
79423+ .mode = 0600,
79424+ .proc_handler = &proc_dointvec,
79425+ },
79426+ {
79427+ .procname = "audit_gid",
79428+ .data = &grsec_audit_gid,
79429+ .maxlen = sizeof(int),
79430+ .mode = 0600,
79431+ .proc_handler = &proc_dointvec,
79432+ },
79433+#endif
79434+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79435+ {
79436+ .procname = "audit_chdir",
79437+ .data = &grsec_enable_chdir,
79438+ .maxlen = sizeof(int),
79439+ .mode = 0600,
79440+ .proc_handler = &proc_dointvec,
79441+ },
79442+#endif
79443+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79444+ {
79445+ .procname = "audit_mount",
79446+ .data = &grsec_enable_mount,
79447+ .maxlen = sizeof(int),
79448+ .mode = 0600,
79449+ .proc_handler = &proc_dointvec,
79450+ },
79451+#endif
79452+#ifdef CONFIG_GRKERNSEC_DMESG
79453+ {
79454+ .procname = "dmesg",
79455+ .data = &grsec_enable_dmesg,
79456+ .maxlen = sizeof(int),
79457+ .mode = 0600,
79458+ .proc_handler = &proc_dointvec,
79459+ },
79460+#endif
79461+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79462+ {
79463+ .procname = "chroot_findtask",
79464+ .data = &grsec_enable_chroot_findtask,
79465+ .maxlen = sizeof(int),
79466+ .mode = 0600,
79467+ .proc_handler = &proc_dointvec,
79468+ },
79469+#endif
79470+#ifdef CONFIG_GRKERNSEC_RESLOG
79471+ {
79472+ .procname = "resource_logging",
79473+ .data = &grsec_resource_logging,
79474+ .maxlen = sizeof(int),
79475+ .mode = 0600,
79476+ .proc_handler = &proc_dointvec,
79477+ },
79478+#endif
79479+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79480+ {
79481+ .procname = "audit_ptrace",
79482+ .data = &grsec_enable_audit_ptrace,
79483+ .maxlen = sizeof(int),
79484+ .mode = 0600,
79485+ .proc_handler = &proc_dointvec,
79486+ },
79487+#endif
79488+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79489+ {
79490+ .procname = "harden_ptrace",
79491+ .data = &grsec_enable_harden_ptrace,
79492+ .maxlen = sizeof(int),
79493+ .mode = 0600,
79494+ .proc_handler = &proc_dointvec,
79495+ },
79496+#endif
79497+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79498+ {
79499+ .procname = "harden_ipc",
79500+ .data = &grsec_enable_harden_ipc,
79501+ .maxlen = sizeof(int),
79502+ .mode = 0600,
79503+ .proc_handler = &proc_dointvec,
79504+ },
79505+#endif
79506+ {
79507+ .procname = "grsec_lock",
79508+ .data = &grsec_lock,
79509+ .maxlen = sizeof(int),
79510+ .mode = 0600,
79511+ .proc_handler = &proc_dointvec,
79512+ },
79513+#endif
79514+#ifdef CONFIG_GRKERNSEC_ROFS
79515+ {
79516+ .procname = "romount_protect",
79517+ .data = &grsec_enable_rofs,
79518+ .maxlen = sizeof(int),
79519+ .mode = 0600,
79520+ .proc_handler = &proc_dointvec_minmax,
79521+ .extra1 = &one,
79522+ .extra2 = &one,
79523+ },
79524+#endif
79525+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79526+ {
79527+ .procname = "deny_new_usb",
79528+ .data = &grsec_deny_new_usb,
79529+ .maxlen = sizeof(int),
79530+ .mode = 0600,
79531+ .proc_handler = &proc_dointvec,
79532+ },
79533+#endif
79534+ { }
79535+};
79536+#endif
79537diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79538new file mode 100644
79539index 0000000..61b514e
79540--- /dev/null
79541+++ b/grsecurity/grsec_time.c
79542@@ -0,0 +1,16 @@
79543+#include <linux/kernel.h>
79544+#include <linux/sched.h>
79545+#include <linux/grinternal.h>
79546+#include <linux/module.h>
79547+
79548+void
79549+gr_log_timechange(void)
79550+{
79551+#ifdef CONFIG_GRKERNSEC_TIME
79552+ if (grsec_enable_time)
79553+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79554+#endif
79555+ return;
79556+}
79557+
79558+EXPORT_SYMBOL_GPL(gr_log_timechange);
79559diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79560new file mode 100644
79561index 0000000..d1953de
79562--- /dev/null
79563+++ b/grsecurity/grsec_tpe.c
79564@@ -0,0 +1,78 @@
79565+#include <linux/kernel.h>
79566+#include <linux/sched.h>
79567+#include <linux/file.h>
79568+#include <linux/fs.h>
79569+#include <linux/grinternal.h>
79570+
79571+extern int gr_acl_tpe_check(void);
79572+
79573+int
79574+gr_tpe_allow(const struct file *file)
79575+{
79576+#ifdef CONFIG_GRKERNSEC
79577+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79578+ struct inode *file_inode = file->f_path.dentry->d_inode;
79579+ const struct cred *cred = current_cred();
79580+ char *msg = NULL;
79581+ char *msg2 = NULL;
79582+
79583+ // never restrict root
79584+ if (gr_is_global_root(cred->uid))
79585+ return 1;
79586+
79587+ if (grsec_enable_tpe) {
79588+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79589+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79590+ msg = "not being in trusted group";
79591+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79592+ msg = "being in untrusted group";
79593+#else
79594+ if (in_group_p(grsec_tpe_gid))
79595+ msg = "being in untrusted group";
79596+#endif
79597+ }
79598+ if (!msg && gr_acl_tpe_check())
79599+ msg = "being in untrusted role";
79600+
79601+ // not in any affected group/role
79602+ if (!msg)
79603+ goto next_check;
79604+
79605+ if (gr_is_global_nonroot(inode->i_uid))
79606+ msg2 = "file in non-root-owned directory";
79607+ else if (inode->i_mode & S_IWOTH)
79608+ msg2 = "file in world-writable directory";
79609+ else if (inode->i_mode & S_IWGRP)
79610+ msg2 = "file in group-writable directory";
79611+ else if (file_inode->i_mode & S_IWOTH)
79612+ msg2 = "file is world-writable";
79613+
79614+ if (msg && msg2) {
79615+ char fullmsg[70] = {0};
79616+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79617+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79618+ return 0;
79619+ }
79620+ msg = NULL;
79621+next_check:
79622+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79623+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79624+ return 1;
79625+
79626+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79627+ msg = "directory not owned by user";
79628+ else if (inode->i_mode & S_IWOTH)
79629+ msg = "file in world-writable directory";
79630+ else if (inode->i_mode & S_IWGRP)
79631+ msg = "file in group-writable directory";
79632+ else if (file_inode->i_mode & S_IWOTH)
79633+ msg = "file is world-writable";
79634+
79635+ if (msg) {
79636+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79637+ return 0;
79638+ }
79639+#endif
79640+#endif
79641+ return 1;
79642+}
79643diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79644new file mode 100644
79645index 0000000..ae02d8e
79646--- /dev/null
79647+++ b/grsecurity/grsec_usb.c
79648@@ -0,0 +1,15 @@
79649+#include <linux/kernel.h>
79650+#include <linux/grinternal.h>
79651+#include <linux/module.h>
79652+
79653+int gr_handle_new_usb(void)
79654+{
79655+#ifdef CONFIG_GRKERNSEC_DENYUSB
79656+ if (grsec_deny_new_usb) {
79657+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79658+ return 1;
79659+ }
79660+#endif
79661+ return 0;
79662+}
79663+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79664diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79665new file mode 100644
79666index 0000000..158b330
79667--- /dev/null
79668+++ b/grsecurity/grsum.c
79669@@ -0,0 +1,64 @@
79670+#include <linux/err.h>
79671+#include <linux/kernel.h>
79672+#include <linux/sched.h>
79673+#include <linux/mm.h>
79674+#include <linux/scatterlist.h>
79675+#include <linux/crypto.h>
79676+#include <linux/gracl.h>
79677+
79678+
79679+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79680+#error "crypto and sha256 must be built into the kernel"
79681+#endif
79682+
79683+int
79684+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79685+{
79686+ struct crypto_hash *tfm;
79687+ struct hash_desc desc;
79688+ struct scatterlist sg[2];
79689+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79690+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79691+ unsigned long *sumptr = (unsigned long *)sum;
79692+ int cryptres;
79693+ int retval = 1;
79694+ volatile int mismatched = 0;
79695+ volatile int dummy = 0;
79696+ unsigned int i;
79697+
79698+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79699+ if (IS_ERR(tfm)) {
79700+ /* should never happen, since sha256 should be built in */
79701+ memset(entry->pw, 0, GR_PW_LEN);
79702+ return 1;
79703+ }
79704+
79705+ sg_init_table(sg, 2);
79706+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79707+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79708+
79709+ desc.tfm = tfm;
79710+ desc.flags = 0;
79711+
79712+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79713+ temp_sum);
79714+
79715+ memset(entry->pw, 0, GR_PW_LEN);
79716+
79717+ if (cryptres)
79718+ goto out;
79719+
79720+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79721+ if (sumptr[i] != tmpsumptr[i])
79722+ mismatched = 1;
79723+ else
79724+ dummy = 1; // waste a cycle
79725+
79726+ if (!mismatched)
79727+ retval = dummy - 1;
79728+
79729+out:
79730+ crypto_free_hash(tfm);
79731+
79732+ return retval;
79733+}
79734diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79735index 5bdab6b..9ae82fe 100644
79736--- a/include/asm-generic/4level-fixup.h
79737+++ b/include/asm-generic/4level-fixup.h
79738@@ -14,8 +14,10 @@
79739 #define pmd_alloc(mm, pud, address) \
79740 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79741 NULL: pmd_offset(pud, address))
79742+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79743
79744 #define pud_alloc(mm, pgd, address) (pgd)
79745+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79746 #define pud_offset(pgd, start) (pgd)
79747 #define pud_none(pud) 0
79748 #define pud_bad(pud) 0
79749diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79750index b7babf0..1e4b4f1 100644
79751--- a/include/asm-generic/atomic-long.h
79752+++ b/include/asm-generic/atomic-long.h
79753@@ -22,6 +22,12 @@
79754
79755 typedef atomic64_t atomic_long_t;
79756
79757+#ifdef CONFIG_PAX_REFCOUNT
79758+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79759+#else
79760+typedef atomic64_t atomic_long_unchecked_t;
79761+#endif
79762+
79763 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79764
79765 static inline long atomic_long_read(atomic_long_t *l)
79766@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79767 return (long)atomic64_read(v);
79768 }
79769
79770+#ifdef CONFIG_PAX_REFCOUNT
79771+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79772+{
79773+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79774+
79775+ return (long)atomic64_read_unchecked(v);
79776+}
79777+#endif
79778+
79779 static inline void atomic_long_set(atomic_long_t *l, long i)
79780 {
79781 atomic64_t *v = (atomic64_t *)l;
79782@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79783 atomic64_set(v, i);
79784 }
79785
79786+#ifdef CONFIG_PAX_REFCOUNT
79787+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79788+{
79789+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79790+
79791+ atomic64_set_unchecked(v, i);
79792+}
79793+#endif
79794+
79795 static inline void atomic_long_inc(atomic_long_t *l)
79796 {
79797 atomic64_t *v = (atomic64_t *)l;
79798@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79799 atomic64_inc(v);
79800 }
79801
79802+#ifdef CONFIG_PAX_REFCOUNT
79803+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79804+{
79805+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79806+
79807+ atomic64_inc_unchecked(v);
79808+}
79809+#endif
79810+
79811 static inline void atomic_long_dec(atomic_long_t *l)
79812 {
79813 atomic64_t *v = (atomic64_t *)l;
79814@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79815 atomic64_dec(v);
79816 }
79817
79818+#ifdef CONFIG_PAX_REFCOUNT
79819+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79820+{
79821+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79822+
79823+ atomic64_dec_unchecked(v);
79824+}
79825+#endif
79826+
79827 static inline void atomic_long_add(long i, atomic_long_t *l)
79828 {
79829 atomic64_t *v = (atomic64_t *)l;
79830@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79831 atomic64_add(i, v);
79832 }
79833
79834+#ifdef CONFIG_PAX_REFCOUNT
79835+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79836+{
79837+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79838+
79839+ atomic64_add_unchecked(i, v);
79840+}
79841+#endif
79842+
79843 static inline void atomic_long_sub(long i, atomic_long_t *l)
79844 {
79845 atomic64_t *v = (atomic64_t *)l;
79846@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79847 atomic64_sub(i, v);
79848 }
79849
79850+#ifdef CONFIG_PAX_REFCOUNT
79851+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79852+{
79853+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79854+
79855+ atomic64_sub_unchecked(i, v);
79856+}
79857+#endif
79858+
79859 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79860 {
79861 atomic64_t *v = (atomic64_t *)l;
79862@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79863 return atomic64_add_negative(i, v);
79864 }
79865
79866-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79867+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79868 {
79869 atomic64_t *v = (atomic64_t *)l;
79870
79871 return (long)atomic64_add_return(i, v);
79872 }
79873
79874+#ifdef CONFIG_PAX_REFCOUNT
79875+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79876+{
79877+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79878+
79879+ return (long)atomic64_add_return_unchecked(i, v);
79880+}
79881+#endif
79882+
79883 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79884 {
79885 atomic64_t *v = (atomic64_t *)l;
79886@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79887 return (long)atomic64_inc_return(v);
79888 }
79889
79890+#ifdef CONFIG_PAX_REFCOUNT
79891+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79892+{
79893+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79894+
79895+ return (long)atomic64_inc_return_unchecked(v);
79896+}
79897+#endif
79898+
79899 static inline long atomic_long_dec_return(atomic_long_t *l)
79900 {
79901 atomic64_t *v = (atomic64_t *)l;
79902@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79903
79904 typedef atomic_t atomic_long_t;
79905
79906+#ifdef CONFIG_PAX_REFCOUNT
79907+typedef atomic_unchecked_t atomic_long_unchecked_t;
79908+#else
79909+typedef atomic_t atomic_long_unchecked_t;
79910+#endif
79911+
79912 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79913 static inline long atomic_long_read(atomic_long_t *l)
79914 {
79915@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79916 return (long)atomic_read(v);
79917 }
79918
79919+#ifdef CONFIG_PAX_REFCOUNT
79920+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79921+{
79922+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79923+
79924+ return (long)atomic_read_unchecked(v);
79925+}
79926+#endif
79927+
79928 static inline void atomic_long_set(atomic_long_t *l, long i)
79929 {
79930 atomic_t *v = (atomic_t *)l;
79931@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79932 atomic_set(v, i);
79933 }
79934
79935+#ifdef CONFIG_PAX_REFCOUNT
79936+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79937+{
79938+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79939+
79940+ atomic_set_unchecked(v, i);
79941+}
79942+#endif
79943+
79944 static inline void atomic_long_inc(atomic_long_t *l)
79945 {
79946 atomic_t *v = (atomic_t *)l;
79947@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79948 atomic_inc(v);
79949 }
79950
79951+#ifdef CONFIG_PAX_REFCOUNT
79952+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79953+{
79954+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79955+
79956+ atomic_inc_unchecked(v);
79957+}
79958+#endif
79959+
79960 static inline void atomic_long_dec(atomic_long_t *l)
79961 {
79962 atomic_t *v = (atomic_t *)l;
79963@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79964 atomic_dec(v);
79965 }
79966
79967+#ifdef CONFIG_PAX_REFCOUNT
79968+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79969+{
79970+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79971+
79972+ atomic_dec_unchecked(v);
79973+}
79974+#endif
79975+
79976 static inline void atomic_long_add(long i, atomic_long_t *l)
79977 {
79978 atomic_t *v = (atomic_t *)l;
79979@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79980 atomic_add(i, v);
79981 }
79982
79983+#ifdef CONFIG_PAX_REFCOUNT
79984+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79985+{
79986+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79987+
79988+ atomic_add_unchecked(i, v);
79989+}
79990+#endif
79991+
79992 static inline void atomic_long_sub(long i, atomic_long_t *l)
79993 {
79994 atomic_t *v = (atomic_t *)l;
79995@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79996 atomic_sub(i, v);
79997 }
79998
79999+#ifdef CONFIG_PAX_REFCOUNT
80000+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80001+{
80002+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80003+
80004+ atomic_sub_unchecked(i, v);
80005+}
80006+#endif
80007+
80008 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80009 {
80010 atomic_t *v = (atomic_t *)l;
80011@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80012 return atomic_add_negative(i, v);
80013 }
80014
80015-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80016+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80017 {
80018 atomic_t *v = (atomic_t *)l;
80019
80020 return (long)atomic_add_return(i, v);
80021 }
80022
80023+#ifdef CONFIG_PAX_REFCOUNT
80024+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80025+{
80026+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80027+
80028+ return (long)atomic_add_return_unchecked(i, v);
80029+}
80030+
80031+#endif
80032+
80033 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80034 {
80035 atomic_t *v = (atomic_t *)l;
80036@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80037 return (long)atomic_inc_return(v);
80038 }
80039
80040+#ifdef CONFIG_PAX_REFCOUNT
80041+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80042+{
80043+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80044+
80045+ return (long)atomic_inc_return_unchecked(v);
80046+}
80047+#endif
80048+
80049 static inline long atomic_long_dec_return(atomic_long_t *l)
80050 {
80051 atomic_t *v = (atomic_t *)l;
80052@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80053
80054 #endif /* BITS_PER_LONG == 64 */
80055
80056+#ifdef CONFIG_PAX_REFCOUNT
80057+static inline void pax_refcount_needs_these_functions(void)
80058+{
80059+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80060+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80061+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80062+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80063+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80064+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80065+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80066+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80067+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80068+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80069+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80070+#ifdef CONFIG_X86
80071+ atomic_clear_mask_unchecked(0, NULL);
80072+ atomic_set_mask_unchecked(0, NULL);
80073+#endif
80074+
80075+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80076+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80077+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80078+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80079+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80080+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80081+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80082+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80083+}
80084+#else
80085+#define atomic_read_unchecked(v) atomic_read(v)
80086+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80087+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80088+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80089+#define atomic_inc_unchecked(v) atomic_inc(v)
80090+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80091+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80092+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80093+#define atomic_dec_unchecked(v) atomic_dec(v)
80094+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80095+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80096+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80097+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80098+
80099+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80100+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80101+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80102+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80103+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80104+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80105+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80106+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80107+#endif
80108+
80109 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80110diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80111index 30ad9c8..c70c170 100644
80112--- a/include/asm-generic/atomic64.h
80113+++ b/include/asm-generic/atomic64.h
80114@@ -16,6 +16,8 @@ typedef struct {
80115 long long counter;
80116 } atomic64_t;
80117
80118+typedef atomic64_t atomic64_unchecked_t;
80119+
80120 #define ATOMIC64_INIT(i) { (i) }
80121
80122 extern long long atomic64_read(const atomic64_t *v);
80123@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80124 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80125 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80126
80127+#define atomic64_read_unchecked(v) atomic64_read(v)
80128+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80129+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80130+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80131+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80132+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80133+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80134+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80135+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80136+
80137 #endif /* _ASM_GENERIC_ATOMIC64_H */
80138diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80139index f5c40b0..e902f9d 100644
80140--- a/include/asm-generic/barrier.h
80141+++ b/include/asm-generic/barrier.h
80142@@ -82,7 +82,7 @@
80143 do { \
80144 compiletime_assert_atomic_type(*p); \
80145 smp_mb(); \
80146- ACCESS_ONCE(*p) = (v); \
80147+ ACCESS_ONCE_RW(*p) = (v); \
80148 } while (0)
80149
80150 #define smp_load_acquire(p) \
80151diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80152index a60a7cc..0fe12f2 100644
80153--- a/include/asm-generic/bitops/__fls.h
80154+++ b/include/asm-generic/bitops/__fls.h
80155@@ -9,7 +9,7 @@
80156 *
80157 * Undefined if no set bit exists, so code should check against 0 first.
80158 */
80159-static __always_inline unsigned long __fls(unsigned long word)
80160+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80161 {
80162 int num = BITS_PER_LONG - 1;
80163
80164diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80165index 0576d1f..dad6c71 100644
80166--- a/include/asm-generic/bitops/fls.h
80167+++ b/include/asm-generic/bitops/fls.h
80168@@ -9,7 +9,7 @@
80169 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80170 */
80171
80172-static __always_inline int fls(int x)
80173+static __always_inline int __intentional_overflow(-1) fls(int x)
80174 {
80175 int r = 32;
80176
80177diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80178index b097cf8..3d40e14 100644
80179--- a/include/asm-generic/bitops/fls64.h
80180+++ b/include/asm-generic/bitops/fls64.h
80181@@ -15,7 +15,7 @@
80182 * at position 64.
80183 */
80184 #if BITS_PER_LONG == 32
80185-static __always_inline int fls64(__u64 x)
80186+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80187 {
80188 __u32 h = x >> 32;
80189 if (h)
80190@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80191 return fls(x);
80192 }
80193 #elif BITS_PER_LONG == 64
80194-static __always_inline int fls64(__u64 x)
80195+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80196 {
80197 if (x == 0)
80198 return 0;
80199diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80200index 1bfcfe5..e04c5c9 100644
80201--- a/include/asm-generic/cache.h
80202+++ b/include/asm-generic/cache.h
80203@@ -6,7 +6,7 @@
80204 * cache lines need to provide their own cache.h.
80205 */
80206
80207-#define L1_CACHE_SHIFT 5
80208-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80209+#define L1_CACHE_SHIFT 5UL
80210+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80211
80212 #endif /* __ASM_GENERIC_CACHE_H */
80213diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80214index 0d68a1e..b74a761 100644
80215--- a/include/asm-generic/emergency-restart.h
80216+++ b/include/asm-generic/emergency-restart.h
80217@@ -1,7 +1,7 @@
80218 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80219 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80220
80221-static inline void machine_emergency_restart(void)
80222+static inline __noreturn void machine_emergency_restart(void)
80223 {
80224 machine_restart(NULL);
80225 }
80226diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80227index 90f99c7..00ce236 100644
80228--- a/include/asm-generic/kmap_types.h
80229+++ b/include/asm-generic/kmap_types.h
80230@@ -2,9 +2,9 @@
80231 #define _ASM_GENERIC_KMAP_TYPES_H
80232
80233 #ifdef __WITH_KM_FENCE
80234-# define KM_TYPE_NR 41
80235+# define KM_TYPE_NR 42
80236 #else
80237-# define KM_TYPE_NR 20
80238+# define KM_TYPE_NR 21
80239 #endif
80240
80241 #endif
80242diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80243index 9ceb03b..62b0b8f 100644
80244--- a/include/asm-generic/local.h
80245+++ b/include/asm-generic/local.h
80246@@ -23,24 +23,37 @@ typedef struct
80247 atomic_long_t a;
80248 } local_t;
80249
80250+typedef struct {
80251+ atomic_long_unchecked_t a;
80252+} local_unchecked_t;
80253+
80254 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80255
80256 #define local_read(l) atomic_long_read(&(l)->a)
80257+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80258 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80259+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80260 #define local_inc(l) atomic_long_inc(&(l)->a)
80261+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80262 #define local_dec(l) atomic_long_dec(&(l)->a)
80263+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80264 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80265+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80266 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80267+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80268
80269 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80270 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80271 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80272 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80273 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80274+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80275 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80276 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80277+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80278
80279 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80280+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80281 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80282 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80283 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80284diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80285index 725612b..9cc513a 100644
80286--- a/include/asm-generic/pgtable-nopmd.h
80287+++ b/include/asm-generic/pgtable-nopmd.h
80288@@ -1,14 +1,19 @@
80289 #ifndef _PGTABLE_NOPMD_H
80290 #define _PGTABLE_NOPMD_H
80291
80292-#ifndef __ASSEMBLY__
80293-
80294 #include <asm-generic/pgtable-nopud.h>
80295
80296-struct mm_struct;
80297-
80298 #define __PAGETABLE_PMD_FOLDED
80299
80300+#define PMD_SHIFT PUD_SHIFT
80301+#define PTRS_PER_PMD 1
80302+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80303+#define PMD_MASK (~(PMD_SIZE-1))
80304+
80305+#ifndef __ASSEMBLY__
80306+
80307+struct mm_struct;
80308+
80309 /*
80310 * Having the pmd type consist of a pud gets the size right, and allows
80311 * us to conceptually access the pud entry that this pmd is folded into
80312@@ -16,11 +21,6 @@ struct mm_struct;
80313 */
80314 typedef struct { pud_t pud; } pmd_t;
80315
80316-#define PMD_SHIFT PUD_SHIFT
80317-#define PTRS_PER_PMD 1
80318-#define PMD_SIZE (1UL << PMD_SHIFT)
80319-#define PMD_MASK (~(PMD_SIZE-1))
80320-
80321 /*
80322 * The "pud_xxx()" functions here are trivial for a folded two-level
80323 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80324diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80325index 810431d..0ec4804f 100644
80326--- a/include/asm-generic/pgtable-nopud.h
80327+++ b/include/asm-generic/pgtable-nopud.h
80328@@ -1,10 +1,15 @@
80329 #ifndef _PGTABLE_NOPUD_H
80330 #define _PGTABLE_NOPUD_H
80331
80332-#ifndef __ASSEMBLY__
80333-
80334 #define __PAGETABLE_PUD_FOLDED
80335
80336+#define PUD_SHIFT PGDIR_SHIFT
80337+#define PTRS_PER_PUD 1
80338+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80339+#define PUD_MASK (~(PUD_SIZE-1))
80340+
80341+#ifndef __ASSEMBLY__
80342+
80343 /*
80344 * Having the pud type consist of a pgd gets the size right, and allows
80345 * us to conceptually access the pgd entry that this pud is folded into
80346@@ -12,11 +17,6 @@
80347 */
80348 typedef struct { pgd_t pgd; } pud_t;
80349
80350-#define PUD_SHIFT PGDIR_SHIFT
80351-#define PTRS_PER_PUD 1
80352-#define PUD_SIZE (1UL << PUD_SHIFT)
80353-#define PUD_MASK (~(PUD_SIZE-1))
80354-
80355 /*
80356 * The "pgd_xxx()" functions here are trivial for a folded two-level
80357 * setup: the pud is never bad, and a pud always exists (as it's folded
80358@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80359 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80360
80361 #define pgd_populate(mm, pgd, pud) do { } while (0)
80362+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80363 /*
80364 * (puds are folded into pgds so this doesn't get actually called,
80365 * but the define is needed for a generic inline function.)
80366diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80367index 4d46085..f4e92ef 100644
80368--- a/include/asm-generic/pgtable.h
80369+++ b/include/asm-generic/pgtable.h
80370@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
80371 }
80372 #endif /* CONFIG_NUMA_BALANCING */
80373
80374+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80375+#ifdef CONFIG_PAX_KERNEXEC
80376+#error KERNEXEC requires pax_open_kernel
80377+#else
80378+static inline unsigned long pax_open_kernel(void) { return 0; }
80379+#endif
80380+#endif
80381+
80382+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80383+#ifdef CONFIG_PAX_KERNEXEC
80384+#error KERNEXEC requires pax_close_kernel
80385+#else
80386+static inline unsigned long pax_close_kernel(void) { return 0; }
80387+#endif
80388+#endif
80389+
80390 #endif /* CONFIG_MMU */
80391
80392 #endif /* !__ASSEMBLY__ */
80393diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80394index 72d8803..cb9749c 100644
80395--- a/include/asm-generic/uaccess.h
80396+++ b/include/asm-generic/uaccess.h
80397@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80398 return __clear_user(to, n);
80399 }
80400
80401+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80402+#ifdef CONFIG_PAX_MEMORY_UDEREF
80403+#error UDEREF requires pax_open_userland
80404+#else
80405+static inline unsigned long pax_open_userland(void) { return 0; }
80406+#endif
80407+#endif
80408+
80409+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80410+#ifdef CONFIG_PAX_MEMORY_UDEREF
80411+#error UDEREF requires pax_close_userland
80412+#else
80413+static inline unsigned long pax_close_userland(void) { return 0; }
80414+#endif
80415+#endif
80416+
80417 #endif /* __ASM_GENERIC_UACCESS_H */
80418diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80419index ac78910..775a306 100644
80420--- a/include/asm-generic/vmlinux.lds.h
80421+++ b/include/asm-generic/vmlinux.lds.h
80422@@ -234,6 +234,7 @@
80423 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80424 VMLINUX_SYMBOL(__start_rodata) = .; \
80425 *(.rodata) *(.rodata.*) \
80426+ *(.data..read_only) \
80427 *(__vermagic) /* Kernel version magic */ \
80428 . = ALIGN(8); \
80429 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80430@@ -727,17 +728,18 @@
80431 * section in the linker script will go there too. @phdr should have
80432 * a leading colon.
80433 *
80434- * Note that this macros defines __per_cpu_load as an absolute symbol.
80435+ * Note that this macros defines per_cpu_load as an absolute symbol.
80436 * If there is no need to put the percpu section at a predetermined
80437 * address, use PERCPU_SECTION.
80438 */
80439 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80440- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80441- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80442+ per_cpu_load = .; \
80443+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80444 - LOAD_OFFSET) { \
80445+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80446 PERCPU_INPUT(cacheline) \
80447 } phdr \
80448- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80449+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80450
80451 /**
80452 * PERCPU_SECTION - define output section for percpu area, simple version
80453diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80454index 623a59c..1e79ab9 100644
80455--- a/include/crypto/algapi.h
80456+++ b/include/crypto/algapi.h
80457@@ -34,7 +34,7 @@ struct crypto_type {
80458 unsigned int maskclear;
80459 unsigned int maskset;
80460 unsigned int tfmsize;
80461-};
80462+} __do_const;
80463
80464 struct crypto_instance {
80465 struct crypto_alg alg;
80466diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80467index e928625..78c2c05 100644
80468--- a/include/drm/drmP.h
80469+++ b/include/drm/drmP.h
80470@@ -59,6 +59,7 @@
80471
80472 #include <asm/mman.h>
80473 #include <asm/pgalloc.h>
80474+#include <asm/local.h>
80475 #include <asm/uaccess.h>
80476
80477 #include <uapi/drm/drm.h>
80478@@ -224,10 +225,12 @@ void drm_err(const char *format, ...);
80479 * \param cmd command.
80480 * \param arg argument.
80481 */
80482-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80483+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80484+ struct drm_file *file_priv);
80485+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80486 struct drm_file *file_priv);
80487
80488-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80489+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80490 unsigned long arg);
80491
80492 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80493@@ -243,10 +246,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80494 struct drm_ioctl_desc {
80495 unsigned int cmd;
80496 int flags;
80497- drm_ioctl_t *func;
80498+ drm_ioctl_t func;
80499 unsigned int cmd_drv;
80500 const char *name;
80501-};
80502+} __do_const;
80503
80504 /**
80505 * Creates a driver or general drm_ioctl_desc array entry for the given
80506@@ -632,7 +635,8 @@ struct drm_info_list {
80507 int (*show)(struct seq_file*, void*); /** show callback */
80508 u32 driver_features; /**< Required driver features for this entry */
80509 void *data;
80510-};
80511+} __do_const;
80512+typedef struct drm_info_list __no_const drm_info_list_no_const;
80513
80514 /**
80515 * debugfs node structure. This structure represents a debugfs file.
80516@@ -716,7 +720,7 @@ struct drm_device {
80517
80518 /** \name Usage Counters */
80519 /*@{ */
80520- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80521+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80522 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80523 int buf_use; /**< Buffers in use -- cannot alloc */
80524 atomic_t buf_alloc; /**< Buffer allocation in progress */
80525diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80526index c250a22..59d2094 100644
80527--- a/include/drm/drm_crtc_helper.h
80528+++ b/include/drm/drm_crtc_helper.h
80529@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
80530 int (*atomic_check)(struct drm_encoder *encoder,
80531 struct drm_crtc_state *crtc_state,
80532 struct drm_connector_state *conn_state);
80533-};
80534+} __no_const;
80535
80536 /**
80537 * struct drm_connector_helper_funcs - helper operations for connectors
80538diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80539index d016dc5..3951fe0 100644
80540--- a/include/drm/i915_pciids.h
80541+++ b/include/drm/i915_pciids.h
80542@@ -37,7 +37,7 @@
80543 */
80544 #define INTEL_VGA_DEVICE(id, info) { \
80545 0x8086, id, \
80546- ~0, ~0, \
80547+ PCI_ANY_ID, PCI_ANY_ID, \
80548 0x030000, 0xff0000, \
80549 (unsigned long) info }
80550
80551diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80552index 72dcbe8..8db58d7 100644
80553--- a/include/drm/ttm/ttm_memory.h
80554+++ b/include/drm/ttm/ttm_memory.h
80555@@ -48,7 +48,7 @@
80556
80557 struct ttm_mem_shrink {
80558 int (*do_shrink) (struct ttm_mem_shrink *);
80559-};
80560+} __no_const;
80561
80562 /**
80563 * struct ttm_mem_global - Global memory accounting structure.
80564diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80565index 49a8284..9643967 100644
80566--- a/include/drm/ttm/ttm_page_alloc.h
80567+++ b/include/drm/ttm/ttm_page_alloc.h
80568@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80569 */
80570 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80571
80572+struct device;
80573 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80574 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80575
80576diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80577index 4b840e8..155d235 100644
80578--- a/include/keys/asymmetric-subtype.h
80579+++ b/include/keys/asymmetric-subtype.h
80580@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80581 /* Verify the signature on a key of this subtype (optional) */
80582 int (*verify_signature)(const struct key *key,
80583 const struct public_key_signature *sig);
80584-};
80585+} __do_const;
80586
80587 /**
80588 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80589diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80590index c1da539..1dcec55 100644
80591--- a/include/linux/atmdev.h
80592+++ b/include/linux/atmdev.h
80593@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80594 #endif
80595
80596 struct k_atm_aal_stats {
80597-#define __HANDLE_ITEM(i) atomic_t i
80598+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80599 __AAL_STAT_ITEMS
80600 #undef __HANDLE_ITEM
80601 };
80602@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80603 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80604 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80605 struct module *owner;
80606-};
80607+} __do_const ;
80608
80609 struct atmphy_ops {
80610 int (*start)(struct atm_dev *dev);
80611diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80612index 5b08a85..60922fb 100644
80613--- a/include/linux/atomic.h
80614+++ b/include/linux/atomic.h
80615@@ -12,7 +12,7 @@
80616 * Atomically adds @a to @v, so long as @v was not already @u.
80617 * Returns non-zero if @v was not @u, and zero otherwise.
80618 */
80619-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80620+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80621 {
80622 return __atomic_add_unless(v, a, u) != u;
80623 }
80624diff --git a/include/linux/audit.h b/include/linux/audit.h
80625index c2e7e3a..8bfc0e1 100644
80626--- a/include/linux/audit.h
80627+++ b/include/linux/audit.h
80628@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
80629 extern unsigned int audit_serial(void);
80630 extern int auditsc_get_stamp(struct audit_context *ctx,
80631 struct timespec *t, unsigned int *serial);
80632-extern int audit_set_loginuid(kuid_t loginuid);
80633+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80634
80635 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80636 {
80637diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80638index 576e463..28fd926 100644
80639--- a/include/linux/binfmts.h
80640+++ b/include/linux/binfmts.h
80641@@ -44,7 +44,7 @@ struct linux_binprm {
80642 unsigned interp_flags;
80643 unsigned interp_data;
80644 unsigned long loader, exec;
80645-};
80646+} __randomize_layout;
80647
80648 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80649 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80650@@ -77,8 +77,10 @@ struct linux_binfmt {
80651 int (*load_binary)(struct linux_binprm *);
80652 int (*load_shlib)(struct file *);
80653 int (*core_dump)(struct coredump_params *cprm);
80654+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80655+ void (*handle_mmap)(struct file *);
80656 unsigned long min_coredump; /* minimal dump size */
80657-};
80658+} __do_const __randomize_layout;
80659
80660 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80661
80662diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80663index dbfbf49..10be372 100644
80664--- a/include/linux/bitmap.h
80665+++ b/include/linux/bitmap.h
80666@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80667 return __bitmap_full(src, nbits);
80668 }
80669
80670-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80671+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80672 {
80673 if (small_const_nbits(nbits))
80674 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80675diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80676index 5d858e0..336c1d9 100644
80677--- a/include/linux/bitops.h
80678+++ b/include/linux/bitops.h
80679@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80680 * @word: value to rotate
80681 * @shift: bits to roll
80682 */
80683-static inline __u32 rol32(__u32 word, unsigned int shift)
80684+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80685 {
80686 return (word << shift) | (word >> (32 - shift));
80687 }
80688@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80689 * @word: value to rotate
80690 * @shift: bits to roll
80691 */
80692-static inline __u32 ror32(__u32 word, unsigned int shift)
80693+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80694 {
80695 return (word >> shift) | (word << (32 - shift));
80696 }
80697@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80698 return (__s32)(value << shift) >> shift;
80699 }
80700
80701-static inline unsigned fls_long(unsigned long l)
80702+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80703 {
80704 if (sizeof(l) == 4)
80705 return fls(l);
80706diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80707index 7f9a516..8889453 100644
80708--- a/include/linux/blkdev.h
80709+++ b/include/linux/blkdev.h
80710@@ -1616,7 +1616,7 @@ struct block_device_operations {
80711 /* this callback is with swap_lock and sometimes page table lock held */
80712 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80713 struct module *owner;
80714-};
80715+} __do_const;
80716
80717 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80718 unsigned long);
80719diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80720index afc1343..9735539 100644
80721--- a/include/linux/blktrace_api.h
80722+++ b/include/linux/blktrace_api.h
80723@@ -25,7 +25,7 @@ struct blk_trace {
80724 struct dentry *dropped_file;
80725 struct dentry *msg_file;
80726 struct list_head running_list;
80727- atomic_t dropped;
80728+ atomic_unchecked_t dropped;
80729 };
80730
80731 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80732diff --git a/include/linux/cache.h b/include/linux/cache.h
80733index 17e7e82..1d7da26 100644
80734--- a/include/linux/cache.h
80735+++ b/include/linux/cache.h
80736@@ -16,6 +16,14 @@
80737 #define __read_mostly
80738 #endif
80739
80740+#ifndef __read_only
80741+#ifdef CONFIG_PAX_KERNEXEC
80742+#error KERNEXEC requires __read_only
80743+#else
80744+#define __read_only __read_mostly
80745+#endif
80746+#endif
80747+
80748 #ifndef ____cacheline_aligned
80749 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80750 #endif
80751diff --git a/include/linux/capability.h b/include/linux/capability.h
80752index aa93e5e..985a1b0 100644
80753--- a/include/linux/capability.h
80754+++ b/include/linux/capability.h
80755@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80756 extern bool capable(int cap);
80757 extern bool ns_capable(struct user_namespace *ns, int cap);
80758 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80759+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80760 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80761+extern bool capable_nolog(int cap);
80762+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80763
80764 /* audit system wants to get cap info from files as well */
80765 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80766
80767+extern int is_privileged_binary(const struct dentry *dentry);
80768+
80769 #endif /* !_LINUX_CAPABILITY_H */
80770diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80771index 8609d57..86e4d79 100644
80772--- a/include/linux/cdrom.h
80773+++ b/include/linux/cdrom.h
80774@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80775
80776 /* driver specifications */
80777 const int capability; /* capability flags */
80778- int n_minors; /* number of active minor devices */
80779 /* handle uniform packets for scsi type devices (scsi,atapi) */
80780 int (*generic_packet) (struct cdrom_device_info *,
80781 struct packet_command *);
80782diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80783index 4ce9056..86caac6 100644
80784--- a/include/linux/cleancache.h
80785+++ b/include/linux/cleancache.h
80786@@ -31,7 +31,7 @@ struct cleancache_ops {
80787 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80788 void (*invalidate_inode)(int, struct cleancache_filekey);
80789 void (*invalidate_fs)(int);
80790-};
80791+} __no_const;
80792
80793 extern struct cleancache_ops *
80794 cleancache_register_ops(struct cleancache_ops *ops);
80795diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80796index 5591ea7..61b77ce 100644
80797--- a/include/linux/clk-provider.h
80798+++ b/include/linux/clk-provider.h
80799@@ -195,6 +195,7 @@ struct clk_ops {
80800 void (*init)(struct clk_hw *hw);
80801 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80802 };
80803+typedef struct clk_ops __no_const clk_ops_no_const;
80804
80805 /**
80806 * struct clk_init_data - holds init data that's common to all clocks and is
80807diff --git a/include/linux/compat.h b/include/linux/compat.h
80808index ab25814..9026bca 100644
80809--- a/include/linux/compat.h
80810+++ b/include/linux/compat.h
80811@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80812 compat_size_t __user *len_ptr);
80813
80814 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80815-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80816+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80817 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80818 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80819 compat_ssize_t msgsz, int msgflg);
80820@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80821 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80822 compat_ulong_t addr, compat_ulong_t data);
80823 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80824- compat_long_t addr, compat_long_t data);
80825+ compat_ulong_t addr, compat_ulong_t data);
80826
80827 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80828 /*
80829diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80830index 769e198..f670585 100644
80831--- a/include/linux/compiler-gcc4.h
80832+++ b/include/linux/compiler-gcc4.h
80833@@ -39,9 +39,34 @@
80834 # define __compiletime_warning(message) __attribute__((warning(message)))
80835 # define __compiletime_error(message) __attribute__((error(message)))
80836 #endif /* __CHECKER__ */
80837+
80838+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80839+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80840+#define __bos0(ptr) __bos((ptr), 0)
80841+#define __bos1(ptr) __bos((ptr), 1)
80842 #endif /* GCC_VERSION >= 40300 */
80843
80844 #if GCC_VERSION >= 40500
80845+
80846+#ifdef RANDSTRUCT_PLUGIN
80847+#define __randomize_layout __attribute__((randomize_layout))
80848+#define __no_randomize_layout __attribute__((no_randomize_layout))
80849+#endif
80850+
80851+#ifdef CONSTIFY_PLUGIN
80852+#define __no_const __attribute__((no_const))
80853+#define __do_const __attribute__((do_const))
80854+#endif
80855+
80856+#ifdef SIZE_OVERFLOW_PLUGIN
80857+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80858+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80859+#endif
80860+
80861+#ifdef LATENT_ENTROPY_PLUGIN
80862+#define __latent_entropy __attribute__((latent_entropy))
80863+#endif
80864+
80865 /*
80866 * Mark a position in code as unreachable. This can be used to
80867 * suppress control flow warnings after asm blocks that transfer
80868diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80869index efee493..c388661 100644
80870--- a/include/linux/compiler-gcc5.h
80871+++ b/include/linux/compiler-gcc5.h
80872@@ -28,6 +28,25 @@
80873 # define __compiletime_error(message) __attribute__((error(message)))
80874 #endif /* __CHECKER__ */
80875
80876+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80877+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80878+#define __bos0(ptr) __bos((ptr), 0)
80879+#define __bos1(ptr) __bos((ptr), 1)
80880+
80881+#ifdef CONSTIFY_PLUGIN
80882+#define __no_const __attribute__((no_const))
80883+#define __do_const __attribute__((do_const))
80884+#endif
80885+
80886+#ifdef SIZE_OVERFLOW_PLUGIN
80887+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80888+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80889+#endif
80890+
80891+#ifdef LATENT_ENTROPY_PLUGIN
80892+#define __latent_entropy __attribute__((latent_entropy))
80893+#endif
80894+
80895 /*
80896 * Mark a position in code as unreachable. This can be used to
80897 * suppress control flow warnings after asm blocks that transfer
80898diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80899index 1b45e4a..33028cd 100644
80900--- a/include/linux/compiler.h
80901+++ b/include/linux/compiler.h
80902@@ -5,11 +5,14 @@
80903
80904 #ifdef __CHECKER__
80905 # define __user __attribute__((noderef, address_space(1)))
80906+# define __force_user __force __user
80907 # define __kernel __attribute__((address_space(0)))
80908+# define __force_kernel __force __kernel
80909 # define __safe __attribute__((safe))
80910 # define __force __attribute__((force))
80911 # define __nocast __attribute__((nocast))
80912 # define __iomem __attribute__((noderef, address_space(2)))
80913+# define __force_iomem __force __iomem
80914 # define __must_hold(x) __attribute__((context(x,1,1)))
80915 # define __acquires(x) __attribute__((context(x,0,1)))
80916 # define __releases(x) __attribute__((context(x,1,0)))
80917@@ -17,20 +20,37 @@
80918 # define __release(x) __context__(x,-1)
80919 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80920 # define __percpu __attribute__((noderef, address_space(3)))
80921+# define __force_percpu __force __percpu
80922 #ifdef CONFIG_SPARSE_RCU_POINTER
80923 # define __rcu __attribute__((noderef, address_space(4)))
80924+# define __force_rcu __force __rcu
80925 #else
80926 # define __rcu
80927+# define __force_rcu
80928 #endif
80929 extern void __chk_user_ptr(const volatile void __user *);
80930 extern void __chk_io_ptr(const volatile void __iomem *);
80931 #else
80932-# define __user
80933-# define __kernel
80934+# ifdef CHECKER_PLUGIN
80935+//# define __user
80936+//# define __force_user
80937+//# define __kernel
80938+//# define __force_kernel
80939+# else
80940+# ifdef STRUCTLEAK_PLUGIN
80941+# define __user __attribute__((user))
80942+# else
80943+# define __user
80944+# endif
80945+# define __force_user
80946+# define __kernel
80947+# define __force_kernel
80948+# endif
80949 # define __safe
80950 # define __force
80951 # define __nocast
80952 # define __iomem
80953+# define __force_iomem
80954 # define __chk_user_ptr(x) (void)0
80955 # define __chk_io_ptr(x) (void)0
80956 # define __builtin_warning(x, y...) (1)
80957@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80958 # define __release(x) (void)0
80959 # define __cond_lock(x,c) (c)
80960 # define __percpu
80961+# define __force_percpu
80962 # define __rcu
80963+# define __force_rcu
80964 #endif
80965
80966 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80967@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80968 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80969 {
80970 switch (size) {
80971- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80972- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80973- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80974+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80975+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80976+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80977 #ifdef CONFIG_64BIT
80978- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80979+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80980 #endif
80981 default:
80982 barrier();
80983- __builtin_memcpy((void *)res, (const void *)p, size);
80984+ __builtin_memcpy(res, (const void *)p, size);
80985 data_access_exceeds_word_size();
80986 barrier();
80987 }
80988 }
80989
80990-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80991+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80992 {
80993 switch (size) {
80994- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80995- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80996- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80997+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80998+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80999+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
81000 #ifdef CONFIG_64BIT
81001- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
81002+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
81003 #endif
81004 default:
81005 barrier();
81006- __builtin_memcpy((void *)p, (const void *)res, size);
81007+ __builtin_memcpy((void *)p, res, size);
81008 data_access_exceeds_word_size();
81009 barrier();
81010 }
81011@@ -364,6 +386,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81012 # define __attribute_const__ /* unimplemented */
81013 #endif
81014
81015+#ifndef __randomize_layout
81016+# define __randomize_layout
81017+#endif
81018+
81019+#ifndef __no_randomize_layout
81020+# define __no_randomize_layout
81021+#endif
81022+
81023+#ifndef __no_const
81024+# define __no_const
81025+#endif
81026+
81027+#ifndef __do_const
81028+# define __do_const
81029+#endif
81030+
81031+#ifndef __size_overflow
81032+# define __size_overflow(...)
81033+#endif
81034+
81035+#ifndef __intentional_overflow
81036+# define __intentional_overflow(...)
81037+#endif
81038+
81039+#ifndef __latent_entropy
81040+# define __latent_entropy
81041+#endif
81042+
81043 /*
81044 * Tell gcc if a function is cold. The compiler will assume any path
81045 * directly leading to the call is unlikely.
81046@@ -373,6 +423,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81047 #define __cold
81048 #endif
81049
81050+#ifndef __alloc_size
81051+#define __alloc_size(...)
81052+#endif
81053+
81054+#ifndef __bos
81055+#define __bos(ptr, arg)
81056+#endif
81057+
81058+#ifndef __bos0
81059+#define __bos0(ptr)
81060+#endif
81061+
81062+#ifndef __bos1
81063+#define __bos1(ptr)
81064+#endif
81065+
81066 /* Simple shorthand for a section definition */
81067 #ifndef __section
81068 # define __section(S) __attribute__ ((__section__(#S)))
81069@@ -387,6 +453,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81070 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
81071 #endif
81072
81073+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
81074+
81075 /* Is this type a native word size -- useful for atomic operations */
81076 #ifndef __native_word
81077 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
81078@@ -466,8 +534,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81079 */
81080 #define __ACCESS_ONCE(x) ({ \
81081 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
81082- (volatile typeof(x) *)&(x); })
81083+ (volatile const typeof(x) *)&(x); })
81084 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
81085+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81086
81087 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81088 #ifdef CONFIG_KPROBES
81089diff --git a/include/linux/completion.h b/include/linux/completion.h
81090index 5d5aaae..0ea9b84 100644
81091--- a/include/linux/completion.h
81092+++ b/include/linux/completion.h
81093@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81094
81095 extern void wait_for_completion(struct completion *);
81096 extern void wait_for_completion_io(struct completion *);
81097-extern int wait_for_completion_interruptible(struct completion *x);
81098-extern int wait_for_completion_killable(struct completion *x);
81099+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81100+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81101 extern unsigned long wait_for_completion_timeout(struct completion *x,
81102- unsigned long timeout);
81103+ unsigned long timeout) __intentional_overflow(-1);
81104 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81105- unsigned long timeout);
81106+ unsigned long timeout) __intentional_overflow(-1);
81107 extern long wait_for_completion_interruptible_timeout(
81108- struct completion *x, unsigned long timeout);
81109+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81110 extern long wait_for_completion_killable_timeout(
81111- struct completion *x, unsigned long timeout);
81112+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81113 extern bool try_wait_for_completion(struct completion *x);
81114 extern bool completion_done(struct completion *x);
81115
81116diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81117index 34025df..d94bbbc 100644
81118--- a/include/linux/configfs.h
81119+++ b/include/linux/configfs.h
81120@@ -125,7 +125,7 @@ struct configfs_attribute {
81121 const char *ca_name;
81122 struct module *ca_owner;
81123 umode_t ca_mode;
81124-};
81125+} __do_const;
81126
81127 /*
81128 * Users often need to create attribute structures for their configurable
81129diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81130index 2ee4888..0451f5e 100644
81131--- a/include/linux/cpufreq.h
81132+++ b/include/linux/cpufreq.h
81133@@ -207,6 +207,7 @@ struct global_attr {
81134 ssize_t (*store)(struct kobject *a, struct attribute *b,
81135 const char *c, size_t count);
81136 };
81137+typedef struct global_attr __no_const global_attr_no_const;
81138
81139 #define define_one_global_ro(_name) \
81140 static struct global_attr _name = \
81141@@ -278,7 +279,7 @@ struct cpufreq_driver {
81142 bool boost_supported;
81143 bool boost_enabled;
81144 int (*set_boost)(int state);
81145-};
81146+} __do_const;
81147
81148 /* flags */
81149 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81150diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81151index 9c5e892..feb34e0 100644
81152--- a/include/linux/cpuidle.h
81153+++ b/include/linux/cpuidle.h
81154@@ -59,7 +59,8 @@ struct cpuidle_state {
81155 void (*enter_freeze) (struct cpuidle_device *dev,
81156 struct cpuidle_driver *drv,
81157 int index);
81158-};
81159+} __do_const;
81160+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81161
81162 /* Idle State Flags */
81163 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
81164@@ -227,7 +228,7 @@ struct cpuidle_governor {
81165 void (*reflect) (struct cpuidle_device *dev, int index);
81166
81167 struct module *owner;
81168-};
81169+} __do_const;
81170
81171 #ifdef CONFIG_CPU_IDLE
81172 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81173diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81174index 086549a..a572d94 100644
81175--- a/include/linux/cpumask.h
81176+++ b/include/linux/cpumask.h
81177@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81178 }
81179
81180 /* Valid inputs for n are -1 and 0. */
81181-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81182+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81183 {
81184 return n+1;
81185 }
81186
81187-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81188+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81189 {
81190 return n+1;
81191 }
81192
81193-static inline unsigned int cpumask_next_and(int n,
81194+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81195 const struct cpumask *srcp,
81196 const struct cpumask *andp)
81197 {
81198@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81199 *
81200 * Returns >= nr_cpu_ids if no further cpus set.
81201 */
81202-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81203+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81204 {
81205 /* -1 is a legal arg here. */
81206 if (n != -1)
81207@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81208 *
81209 * Returns >= nr_cpu_ids if no further cpus unset.
81210 */
81211-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81212+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81213 {
81214 /* -1 is a legal arg here. */
81215 if (n != -1)
81216@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81217 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81218 }
81219
81220-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81221+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81222 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81223 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81224
81225@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
81226 * cpumask_weight - Count of bits in *srcp
81227 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
81228 */
81229-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
81230+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
81231 {
81232 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
81233 }
81234diff --git a/include/linux/cred.h b/include/linux/cred.h
81235index 2fb2ca2..d6a3340 100644
81236--- a/include/linux/cred.h
81237+++ b/include/linux/cred.h
81238@@ -35,7 +35,7 @@ struct group_info {
81239 int nblocks;
81240 kgid_t small_block[NGROUPS_SMALL];
81241 kgid_t *blocks[0];
81242-};
81243+} __randomize_layout;
81244
81245 /**
81246 * get_group_info - Get a reference to a group info structure
81247@@ -137,7 +137,7 @@ struct cred {
81248 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81249 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81250 struct rcu_head rcu; /* RCU deletion hook */
81251-};
81252+} __randomize_layout;
81253
81254 extern void __put_cred(struct cred *);
81255 extern void exit_creds(struct task_struct *);
81256@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81257 static inline void validate_process_creds(void)
81258 {
81259 }
81260+static inline void validate_task_creds(struct task_struct *task)
81261+{
81262+}
81263 #endif
81264
81265 /**
81266@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
81267
81268 #define task_uid(task) (task_cred_xxx((task), uid))
81269 #define task_euid(task) (task_cred_xxx((task), euid))
81270+#define task_securebits(task) (task_cred_xxx((task), securebits))
81271
81272 #define current_cred_xxx(xxx) \
81273 ({ \
81274diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81275index fb5ef16..05d1e59 100644
81276--- a/include/linux/crypto.h
81277+++ b/include/linux/crypto.h
81278@@ -626,7 +626,7 @@ struct cipher_tfm {
81279 const u8 *key, unsigned int keylen);
81280 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81281 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81282-};
81283+} __no_const;
81284
81285 struct hash_tfm {
81286 int (*init)(struct hash_desc *desc);
81287@@ -647,13 +647,13 @@ struct compress_tfm {
81288 int (*cot_decompress)(struct crypto_tfm *tfm,
81289 const u8 *src, unsigned int slen,
81290 u8 *dst, unsigned int *dlen);
81291-};
81292+} __no_const;
81293
81294 struct rng_tfm {
81295 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81296 unsigned int dlen);
81297 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81298-};
81299+} __no_const;
81300
81301 #define crt_ablkcipher crt_u.ablkcipher
81302 #define crt_aead crt_u.aead
81303diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81304index 653589e..4ef254a 100644
81305--- a/include/linux/ctype.h
81306+++ b/include/linux/ctype.h
81307@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81308 * Fast implementation of tolower() for internal usage. Do not use in your
81309 * code.
81310 */
81311-static inline char _tolower(const char c)
81312+static inline unsigned char _tolower(const unsigned char c)
81313 {
81314 return c | 0x20;
81315 }
81316diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81317index d835879..c8e5b92 100644
81318--- a/include/linux/dcache.h
81319+++ b/include/linux/dcache.h
81320@@ -123,6 +123,9 @@ struct dentry {
81321 unsigned long d_time; /* used by d_revalidate */
81322 void *d_fsdata; /* fs-specific data */
81323
81324+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
81325+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
81326+#endif
81327 struct list_head d_lru; /* LRU list */
81328 struct list_head d_child; /* child of parent list */
81329 struct list_head d_subdirs; /* our children */
81330@@ -133,7 +136,7 @@ struct dentry {
81331 struct hlist_node d_alias; /* inode alias list */
81332 struct rcu_head d_rcu;
81333 } d_u;
81334-};
81335+} __randomize_layout;
81336
81337 /*
81338 * dentry->d_lock spinlock nesting subclasses:
81339@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
81340
81341 static inline unsigned d_count(const struct dentry *dentry)
81342 {
81343- return dentry->d_lockref.count;
81344+ return __lockref_read(&dentry->d_lockref);
81345 }
81346
81347 /*
81348@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
81349 static inline struct dentry *dget_dlock(struct dentry *dentry)
81350 {
81351 if (dentry)
81352- dentry->d_lockref.count++;
81353+ __lockref_inc(&dentry->d_lockref);
81354 return dentry;
81355 }
81356
81357diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81358index 7925bf0..d5143d2 100644
81359--- a/include/linux/decompress/mm.h
81360+++ b/include/linux/decompress/mm.h
81361@@ -77,7 +77,7 @@ static void free(void *where)
81362 * warnings when not needed (indeed large_malloc / large_free are not
81363 * needed by inflate */
81364
81365-#define malloc(a) kmalloc(a, GFP_KERNEL)
81366+#define malloc(a) kmalloc((a), GFP_KERNEL)
81367 #define free(a) kfree(a)
81368
81369 #define large_malloc(a) vmalloc(a)
81370diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81371index ce447f0..83c66bd 100644
81372--- a/include/linux/devfreq.h
81373+++ b/include/linux/devfreq.h
81374@@ -114,7 +114,7 @@ struct devfreq_governor {
81375 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81376 int (*event_handler)(struct devfreq *devfreq,
81377 unsigned int event, void *data);
81378-};
81379+} __do_const;
81380
81381 /**
81382 * struct devfreq - Device devfreq structure
81383diff --git a/include/linux/device.h b/include/linux/device.h
81384index 0eb8ee2..c603b6a 100644
81385--- a/include/linux/device.h
81386+++ b/include/linux/device.h
81387@@ -311,7 +311,7 @@ struct subsys_interface {
81388 struct list_head node;
81389 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81390 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81391-};
81392+} __do_const;
81393
81394 int subsys_interface_register(struct subsys_interface *sif);
81395 void subsys_interface_unregister(struct subsys_interface *sif);
81396@@ -507,7 +507,7 @@ struct device_type {
81397 void (*release)(struct device *dev);
81398
81399 const struct dev_pm_ops *pm;
81400-};
81401+} __do_const;
81402
81403 /* interface for exporting device attributes */
81404 struct device_attribute {
81405@@ -517,11 +517,12 @@ struct device_attribute {
81406 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81407 const char *buf, size_t count);
81408 };
81409+typedef struct device_attribute __no_const device_attribute_no_const;
81410
81411 struct dev_ext_attribute {
81412 struct device_attribute attr;
81413 void *var;
81414-};
81415+} __do_const;
81416
81417 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81418 char *buf);
81419diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81420index c3007cb..43efc8c 100644
81421--- a/include/linux/dma-mapping.h
81422+++ b/include/linux/dma-mapping.h
81423@@ -60,7 +60,7 @@ struct dma_map_ops {
81424 u64 (*get_required_mask)(struct device *dev);
81425 #endif
81426 int is_phys;
81427-};
81428+} __do_const;
81429
81430 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81431
81432diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81433index b6997a0..108be6c 100644
81434--- a/include/linux/dmaengine.h
81435+++ b/include/linux/dmaengine.h
81436@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
81437 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81438 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81439
81440-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81441+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81442 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81443-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81444+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81445 struct dma_pinned_list *pinned_list, struct page *page,
81446 unsigned int offset, size_t len);
81447
81448diff --git a/include/linux/efi.h b/include/linux/efi.h
81449index cf7e431..d239dce 100644
81450--- a/include/linux/efi.h
81451+++ b/include/linux/efi.h
81452@@ -1056,6 +1056,7 @@ struct efivar_operations {
81453 efi_set_variable_nonblocking_t *set_variable_nonblocking;
81454 efi_query_variable_store_t *query_variable_store;
81455 };
81456+typedef struct efivar_operations __no_const efivar_operations_no_const;
81457
81458 struct efivars {
81459 /*
81460diff --git a/include/linux/elf.h b/include/linux/elf.h
81461index 20fa8d8..3d0dd18 100644
81462--- a/include/linux/elf.h
81463+++ b/include/linux/elf.h
81464@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
81465 #define elf_note elf32_note
81466 #define elf_addr_t Elf32_Off
81467 #define Elf_Half Elf32_Half
81468+#define elf_dyn Elf32_Dyn
81469
81470 #else
81471
81472@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
81473 #define elf_note elf64_note
81474 #define elf_addr_t Elf64_Off
81475 #define Elf_Half Elf64_Half
81476+#define elf_dyn Elf64_Dyn
81477
81478 #endif
81479
81480diff --git a/include/linux/err.h b/include/linux/err.h
81481index a729120..6ede2c9 100644
81482--- a/include/linux/err.h
81483+++ b/include/linux/err.h
81484@@ -20,12 +20,12 @@
81485
81486 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81487
81488-static inline void * __must_check ERR_PTR(long error)
81489+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81490 {
81491 return (void *) error;
81492 }
81493
81494-static inline long __must_check PTR_ERR(__force const void *ptr)
81495+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81496 {
81497 return (long) ptr;
81498 }
81499diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81500index 36f49c4..a2a1f4c 100644
81501--- a/include/linux/extcon.h
81502+++ b/include/linux/extcon.h
81503@@ -135,7 +135,7 @@ struct extcon_dev {
81504 /* /sys/class/extcon/.../mutually_exclusive/... */
81505 struct attribute_group attr_g_muex;
81506 struct attribute **attrs_muex;
81507- struct device_attribute *d_attrs_muex;
81508+ device_attribute_no_const *d_attrs_muex;
81509 };
81510
81511 /**
81512diff --git a/include/linux/fb.h b/include/linux/fb.h
81513index 043f328..180ccbf 100644
81514--- a/include/linux/fb.h
81515+++ b/include/linux/fb.h
81516@@ -305,7 +305,8 @@ struct fb_ops {
81517 /* called at KDB enter and leave time to prepare the console */
81518 int (*fb_debug_enter)(struct fb_info *info);
81519 int (*fb_debug_leave)(struct fb_info *info);
81520-};
81521+} __do_const;
81522+typedef struct fb_ops __no_const fb_ops_no_const;
81523
81524 #ifdef CONFIG_FB_TILEBLITTING
81525 #define FB_TILE_CURSOR_NONE 0
81526diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81527index 230f87b..1fd0485 100644
81528--- a/include/linux/fdtable.h
81529+++ b/include/linux/fdtable.h
81530@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81531 void put_files_struct(struct files_struct *fs);
81532 void reset_files_struct(struct files_struct *);
81533 int unshare_files(struct files_struct **);
81534-struct files_struct *dup_fd(struct files_struct *, int *);
81535+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81536 void do_close_on_exec(struct files_struct *);
81537 int iterate_fd(struct files_struct *, unsigned,
81538 int (*)(const void *, struct file *, unsigned),
81539diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81540index 8293262..2b3b8bd 100644
81541--- a/include/linux/frontswap.h
81542+++ b/include/linux/frontswap.h
81543@@ -11,7 +11,7 @@ struct frontswap_ops {
81544 int (*load)(unsigned, pgoff_t, struct page *);
81545 void (*invalidate_page)(unsigned, pgoff_t);
81546 void (*invalidate_area)(unsigned);
81547-};
81548+} __no_const;
81549
81550 extern bool frontswap_enabled;
81551 extern struct frontswap_ops *
81552diff --git a/include/linux/fs.h b/include/linux/fs.h
81553index 52cc449..58b25c9 100644
81554--- a/include/linux/fs.h
81555+++ b/include/linux/fs.h
81556@@ -410,7 +410,7 @@ struct address_space {
81557 spinlock_t private_lock; /* for use by the address_space */
81558 struct list_head private_list; /* ditto */
81559 void *private_data; /* ditto */
81560-} __attribute__((aligned(sizeof(long))));
81561+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81562 /*
81563 * On most architectures that alignment is already the case; but
81564 * must be enforced here for CRIS, to let the least significant bit
81565@@ -453,7 +453,7 @@ struct block_device {
81566 int bd_fsfreeze_count;
81567 /* Mutex for freeze */
81568 struct mutex bd_fsfreeze_mutex;
81569-};
81570+} __randomize_layout;
81571
81572 /*
81573 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81574@@ -639,7 +639,7 @@ struct inode {
81575 #endif
81576
81577 void *i_private; /* fs or device private pointer */
81578-};
81579+} __randomize_layout;
81580
81581 static inline int inode_unhashed(struct inode *inode)
81582 {
81583@@ -834,7 +834,7 @@ struct file {
81584 struct list_head f_tfile_llink;
81585 #endif /* #ifdef CONFIG_EPOLL */
81586 struct address_space *f_mapping;
81587-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81588+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81589
81590 struct file_handle {
81591 __u32 handle_bytes;
81592@@ -962,7 +962,7 @@ struct file_lock {
81593 int state; /* state of grant or error if -ve */
81594 } afs;
81595 } fl_u;
81596-};
81597+} __randomize_layout;
81598
81599 struct file_lock_context {
81600 spinlock_t flc_lock;
81601@@ -1316,7 +1316,7 @@ struct super_block {
81602 * Indicates how deep in a filesystem stack this SB is
81603 */
81604 int s_stack_depth;
81605-};
81606+} __randomize_layout;
81607
81608 extern struct timespec current_fs_time(struct super_block *sb);
81609
81610@@ -1570,7 +1570,8 @@ struct file_operations {
81611 #ifndef CONFIG_MMU
81612 unsigned (*mmap_capabilities)(struct file *);
81613 #endif
81614-};
81615+} __do_const __randomize_layout;
81616+typedef struct file_operations __no_const file_operations_no_const;
81617
81618 struct inode_operations {
81619 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81620@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
81621 return !IS_DEADDIR(inode);
81622 }
81623
81624+static inline bool is_sidechannel_device(const struct inode *inode)
81625+{
81626+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81627+ umode_t mode = inode->i_mode;
81628+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81629+#else
81630+ return false;
81631+#endif
81632+}
81633+
81634 #endif /* _LINUX_FS_H */
81635diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81636index 0efc3e6..fd23610 100644
81637--- a/include/linux/fs_struct.h
81638+++ b/include/linux/fs_struct.h
81639@@ -6,13 +6,13 @@
81640 #include <linux/seqlock.h>
81641
81642 struct fs_struct {
81643- int users;
81644+ atomic_t users;
81645 spinlock_t lock;
81646 seqcount_t seq;
81647 int umask;
81648 int in_exec;
81649 struct path root, pwd;
81650-};
81651+} __randomize_layout;
81652
81653 extern struct kmem_cache *fs_cachep;
81654
81655diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81656index 7714849..a4a5c7a 100644
81657--- a/include/linux/fscache-cache.h
81658+++ b/include/linux/fscache-cache.h
81659@@ -113,7 +113,7 @@ struct fscache_operation {
81660 fscache_operation_release_t release;
81661 };
81662
81663-extern atomic_t fscache_op_debug_id;
81664+extern atomic_unchecked_t fscache_op_debug_id;
81665 extern void fscache_op_work_func(struct work_struct *work);
81666
81667 extern void fscache_enqueue_operation(struct fscache_operation *);
81668@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81669 INIT_WORK(&op->work, fscache_op_work_func);
81670 atomic_set(&op->usage, 1);
81671 op->state = FSCACHE_OP_ST_INITIALISED;
81672- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81673+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81674 op->processor = processor;
81675 op->release = release;
81676 INIT_LIST_HEAD(&op->pend_link);
81677diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81678index 115bb81..e7b812b 100644
81679--- a/include/linux/fscache.h
81680+++ b/include/linux/fscache.h
81681@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81682 * - this is mandatory for any object that may have data
81683 */
81684 void (*now_uncached)(void *cookie_netfs_data);
81685-};
81686+} __do_const;
81687
81688 /*
81689 * fscache cached network filesystem type
81690diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81691index 7ee1774..72505b8 100644
81692--- a/include/linux/fsnotify.h
81693+++ b/include/linux/fsnotify.h
81694@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81695 struct inode *inode = file_inode(file);
81696 __u32 mask = FS_ACCESS;
81697
81698+ if (is_sidechannel_device(inode))
81699+ return;
81700+
81701 if (S_ISDIR(inode->i_mode))
81702 mask |= FS_ISDIR;
81703
81704@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81705 struct inode *inode = file_inode(file);
81706 __u32 mask = FS_MODIFY;
81707
81708+ if (is_sidechannel_device(inode))
81709+ return;
81710+
81711 if (S_ISDIR(inode->i_mode))
81712 mask |= FS_ISDIR;
81713
81714@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81715 */
81716 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81717 {
81718- return kstrdup(name, GFP_KERNEL);
81719+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81720 }
81721
81722 /*
81723diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81724index ec274e0..e678159 100644
81725--- a/include/linux/genhd.h
81726+++ b/include/linux/genhd.h
81727@@ -194,7 +194,7 @@ struct gendisk {
81728 struct kobject *slave_dir;
81729
81730 struct timer_rand_state *random;
81731- atomic_t sync_io; /* RAID */
81732+ atomic_unchecked_t sync_io; /* RAID */
81733 struct disk_events *ev;
81734 #ifdef CONFIG_BLK_DEV_INTEGRITY
81735 struct blk_integrity *integrity;
81736@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81737 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81738
81739 /* drivers/char/random.c */
81740-extern void add_disk_randomness(struct gendisk *disk);
81741+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81742 extern void rand_initialize_disk(struct gendisk *disk);
81743
81744 static inline sector_t get_start_sect(struct block_device *bdev)
81745diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81746index 667c311..abac2a7 100644
81747--- a/include/linux/genl_magic_func.h
81748+++ b/include/linux/genl_magic_func.h
81749@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81750 },
81751
81752 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81753-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81754+static struct genl_ops ZZZ_genl_ops[] = {
81755 #include GENL_MAGIC_INCLUDE_FILE
81756 };
81757
81758diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81759index 51bd1e7..0486343 100644
81760--- a/include/linux/gfp.h
81761+++ b/include/linux/gfp.h
81762@@ -34,6 +34,13 @@ struct vm_area_struct;
81763 #define ___GFP_NO_KSWAPD 0x400000u
81764 #define ___GFP_OTHER_NODE 0x800000u
81765 #define ___GFP_WRITE 0x1000000u
81766+
81767+#ifdef CONFIG_PAX_USERCOPY_SLABS
81768+#define ___GFP_USERCOPY 0x2000000u
81769+#else
81770+#define ___GFP_USERCOPY 0
81771+#endif
81772+
81773 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81774
81775 /*
81776@@ -90,6 +97,7 @@ struct vm_area_struct;
81777 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81778 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81779 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81780+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81781
81782 /*
81783 * This may seem redundant, but it's a way of annotating false positives vs.
81784@@ -97,7 +105,7 @@ struct vm_area_struct;
81785 */
81786 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81787
81788-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81789+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81790 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81791
81792 /* This equals 0, but use constants in case they ever change */
81793@@ -152,6 +160,8 @@ struct vm_area_struct;
81794 /* 4GB DMA on some platforms */
81795 #define GFP_DMA32 __GFP_DMA32
81796
81797+#define GFP_USERCOPY __GFP_USERCOPY
81798+
81799 /* Convert GFP flags to their corresponding migrate type */
81800 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81801 {
81802diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81803new file mode 100644
81804index 0000000..91858e4
81805--- /dev/null
81806+++ b/include/linux/gracl.h
81807@@ -0,0 +1,342 @@
81808+#ifndef GR_ACL_H
81809+#define GR_ACL_H
81810+
81811+#include <linux/grdefs.h>
81812+#include <linux/resource.h>
81813+#include <linux/capability.h>
81814+#include <linux/dcache.h>
81815+#include <asm/resource.h>
81816+
81817+/* Major status information */
81818+
81819+#define GR_VERSION "grsecurity 3.1"
81820+#define GRSECURITY_VERSION 0x3100
81821+
81822+enum {
81823+ GR_SHUTDOWN = 0,
81824+ GR_ENABLE = 1,
81825+ GR_SPROLE = 2,
81826+ GR_OLDRELOAD = 3,
81827+ GR_SEGVMOD = 4,
81828+ GR_STATUS = 5,
81829+ GR_UNSPROLE = 6,
81830+ GR_PASSSET = 7,
81831+ GR_SPROLEPAM = 8,
81832+ GR_RELOAD = 9,
81833+};
81834+
81835+/* Password setup definitions
81836+ * kernel/grhash.c */
81837+enum {
81838+ GR_PW_LEN = 128,
81839+ GR_SALT_LEN = 16,
81840+ GR_SHA_LEN = 32,
81841+};
81842+
81843+enum {
81844+ GR_SPROLE_LEN = 64,
81845+};
81846+
81847+enum {
81848+ GR_NO_GLOB = 0,
81849+ GR_REG_GLOB,
81850+ GR_CREATE_GLOB
81851+};
81852+
81853+#define GR_NLIMITS 32
81854+
81855+/* Begin Data Structures */
81856+
81857+struct sprole_pw {
81858+ unsigned char *rolename;
81859+ unsigned char salt[GR_SALT_LEN];
81860+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81861+};
81862+
81863+struct name_entry {
81864+ __u32 key;
81865+ u64 inode;
81866+ dev_t device;
81867+ char *name;
81868+ __u16 len;
81869+ __u8 deleted;
81870+ struct name_entry *prev;
81871+ struct name_entry *next;
81872+};
81873+
81874+struct inodev_entry {
81875+ struct name_entry *nentry;
81876+ struct inodev_entry *prev;
81877+ struct inodev_entry *next;
81878+};
81879+
81880+struct acl_role_db {
81881+ struct acl_role_label **r_hash;
81882+ __u32 r_size;
81883+};
81884+
81885+struct inodev_db {
81886+ struct inodev_entry **i_hash;
81887+ __u32 i_size;
81888+};
81889+
81890+struct name_db {
81891+ struct name_entry **n_hash;
81892+ __u32 n_size;
81893+};
81894+
81895+struct crash_uid {
81896+ uid_t uid;
81897+ unsigned long expires;
81898+};
81899+
81900+struct gr_hash_struct {
81901+ void **table;
81902+ void **nametable;
81903+ void *first;
81904+ __u32 table_size;
81905+ __u32 used_size;
81906+ int type;
81907+};
81908+
81909+/* Userspace Grsecurity ACL data structures */
81910+
81911+struct acl_subject_label {
81912+ char *filename;
81913+ u64 inode;
81914+ dev_t device;
81915+ __u32 mode;
81916+ kernel_cap_t cap_mask;
81917+ kernel_cap_t cap_lower;
81918+ kernel_cap_t cap_invert_audit;
81919+
81920+ struct rlimit res[GR_NLIMITS];
81921+ __u32 resmask;
81922+
81923+ __u8 user_trans_type;
81924+ __u8 group_trans_type;
81925+ uid_t *user_transitions;
81926+ gid_t *group_transitions;
81927+ __u16 user_trans_num;
81928+ __u16 group_trans_num;
81929+
81930+ __u32 sock_families[2];
81931+ __u32 ip_proto[8];
81932+ __u32 ip_type;
81933+ struct acl_ip_label **ips;
81934+ __u32 ip_num;
81935+ __u32 inaddr_any_override;
81936+
81937+ __u32 crashes;
81938+ unsigned long expires;
81939+
81940+ struct acl_subject_label *parent_subject;
81941+ struct gr_hash_struct *hash;
81942+ struct acl_subject_label *prev;
81943+ struct acl_subject_label *next;
81944+
81945+ struct acl_object_label **obj_hash;
81946+ __u32 obj_hash_size;
81947+ __u16 pax_flags;
81948+};
81949+
81950+struct role_allowed_ip {
81951+ __u32 addr;
81952+ __u32 netmask;
81953+
81954+ struct role_allowed_ip *prev;
81955+ struct role_allowed_ip *next;
81956+};
81957+
81958+struct role_transition {
81959+ char *rolename;
81960+
81961+ struct role_transition *prev;
81962+ struct role_transition *next;
81963+};
81964+
81965+struct acl_role_label {
81966+ char *rolename;
81967+ uid_t uidgid;
81968+ __u16 roletype;
81969+
81970+ __u16 auth_attempts;
81971+ unsigned long expires;
81972+
81973+ struct acl_subject_label *root_label;
81974+ struct gr_hash_struct *hash;
81975+
81976+ struct acl_role_label *prev;
81977+ struct acl_role_label *next;
81978+
81979+ struct role_transition *transitions;
81980+ struct role_allowed_ip *allowed_ips;
81981+ uid_t *domain_children;
81982+ __u16 domain_child_num;
81983+
81984+ umode_t umask;
81985+
81986+ struct acl_subject_label **subj_hash;
81987+ __u32 subj_hash_size;
81988+};
81989+
81990+struct user_acl_role_db {
81991+ struct acl_role_label **r_table;
81992+ __u32 num_pointers; /* Number of allocations to track */
81993+ __u32 num_roles; /* Number of roles */
81994+ __u32 num_domain_children; /* Number of domain children */
81995+ __u32 num_subjects; /* Number of subjects */
81996+ __u32 num_objects; /* Number of objects */
81997+};
81998+
81999+struct acl_object_label {
82000+ char *filename;
82001+ u64 inode;
82002+ dev_t device;
82003+ __u32 mode;
82004+
82005+ struct acl_subject_label *nested;
82006+ struct acl_object_label *globbed;
82007+
82008+ /* next two structures not used */
82009+
82010+ struct acl_object_label *prev;
82011+ struct acl_object_label *next;
82012+};
82013+
82014+struct acl_ip_label {
82015+ char *iface;
82016+ __u32 addr;
82017+ __u32 netmask;
82018+ __u16 low, high;
82019+ __u8 mode;
82020+ __u32 type;
82021+ __u32 proto[8];
82022+
82023+ /* next two structures not used */
82024+
82025+ struct acl_ip_label *prev;
82026+ struct acl_ip_label *next;
82027+};
82028+
82029+struct gr_arg {
82030+ struct user_acl_role_db role_db;
82031+ unsigned char pw[GR_PW_LEN];
82032+ unsigned char salt[GR_SALT_LEN];
82033+ unsigned char sum[GR_SHA_LEN];
82034+ unsigned char sp_role[GR_SPROLE_LEN];
82035+ struct sprole_pw *sprole_pws;
82036+ dev_t segv_device;
82037+ u64 segv_inode;
82038+ uid_t segv_uid;
82039+ __u16 num_sprole_pws;
82040+ __u16 mode;
82041+};
82042+
82043+struct gr_arg_wrapper {
82044+ struct gr_arg *arg;
82045+ __u32 version;
82046+ __u32 size;
82047+};
82048+
82049+struct subject_map {
82050+ struct acl_subject_label *user;
82051+ struct acl_subject_label *kernel;
82052+ struct subject_map *prev;
82053+ struct subject_map *next;
82054+};
82055+
82056+struct acl_subj_map_db {
82057+ struct subject_map **s_hash;
82058+ __u32 s_size;
82059+};
82060+
82061+struct gr_policy_state {
82062+ struct sprole_pw **acl_special_roles;
82063+ __u16 num_sprole_pws;
82064+ struct acl_role_label *kernel_role;
82065+ struct acl_role_label *role_list;
82066+ struct acl_role_label *default_role;
82067+ struct acl_role_db acl_role_set;
82068+ struct acl_subj_map_db subj_map_set;
82069+ struct name_db name_set;
82070+ struct inodev_db inodev_set;
82071+};
82072+
82073+struct gr_alloc_state {
82074+ unsigned long alloc_stack_next;
82075+ unsigned long alloc_stack_size;
82076+ void **alloc_stack;
82077+};
82078+
82079+struct gr_reload_state {
82080+ struct gr_policy_state oldpolicy;
82081+ struct gr_alloc_state oldalloc;
82082+ struct gr_policy_state newpolicy;
82083+ struct gr_alloc_state newalloc;
82084+ struct gr_policy_state *oldpolicy_ptr;
82085+ struct gr_alloc_state *oldalloc_ptr;
82086+ unsigned char oldmode;
82087+};
82088+
82089+/* End Data Structures Section */
82090+
82091+/* Hash functions generated by empirical testing by Brad Spengler
82092+ Makes good use of the low bits of the inode. Generally 0-1 times
82093+ in loop for successful match. 0-3 for unsuccessful match.
82094+ Shift/add algorithm with modulus of table size and an XOR*/
82095+
82096+static __inline__ unsigned int
82097+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
82098+{
82099+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
82100+}
82101+
82102+ static __inline__ unsigned int
82103+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
82104+{
82105+ return ((const unsigned long)userp % sz);
82106+}
82107+
82108+static __inline__ unsigned int
82109+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
82110+{
82111+ unsigned int rem;
82112+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
82113+ return rem;
82114+}
82115+
82116+static __inline__ unsigned int
82117+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
82118+{
82119+ return full_name_hash((const unsigned char *)name, len) % sz;
82120+}
82121+
82122+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
82123+ subj = NULL; \
82124+ iter = 0; \
82125+ while (iter < role->subj_hash_size) { \
82126+ if (subj == NULL) \
82127+ subj = role->subj_hash[iter]; \
82128+ if (subj == NULL) { \
82129+ iter++; \
82130+ continue; \
82131+ }
82132+
82133+#define FOR_EACH_SUBJECT_END(subj,iter) \
82134+ subj = subj->next; \
82135+ if (subj == NULL) \
82136+ iter++; \
82137+ }
82138+
82139+
82140+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
82141+ subj = role->hash->first; \
82142+ while (subj != NULL) {
82143+
82144+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
82145+ subj = subj->next; \
82146+ }
82147+
82148+#endif
82149+
82150diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
82151new file mode 100644
82152index 0000000..af64092
82153--- /dev/null
82154+++ b/include/linux/gracl_compat.h
82155@@ -0,0 +1,156 @@
82156+#ifndef GR_ACL_COMPAT_H
82157+#define GR_ACL_COMPAT_H
82158+
82159+#include <linux/resource.h>
82160+#include <asm/resource.h>
82161+
82162+struct sprole_pw_compat {
82163+ compat_uptr_t rolename;
82164+ unsigned char salt[GR_SALT_LEN];
82165+ unsigned char sum[GR_SHA_LEN];
82166+};
82167+
82168+struct gr_hash_struct_compat {
82169+ compat_uptr_t table;
82170+ compat_uptr_t nametable;
82171+ compat_uptr_t first;
82172+ __u32 table_size;
82173+ __u32 used_size;
82174+ int type;
82175+};
82176+
82177+struct acl_subject_label_compat {
82178+ compat_uptr_t filename;
82179+ compat_u64 inode;
82180+ __u32 device;
82181+ __u32 mode;
82182+ kernel_cap_t cap_mask;
82183+ kernel_cap_t cap_lower;
82184+ kernel_cap_t cap_invert_audit;
82185+
82186+ struct compat_rlimit res[GR_NLIMITS];
82187+ __u32 resmask;
82188+
82189+ __u8 user_trans_type;
82190+ __u8 group_trans_type;
82191+ compat_uptr_t user_transitions;
82192+ compat_uptr_t group_transitions;
82193+ __u16 user_trans_num;
82194+ __u16 group_trans_num;
82195+
82196+ __u32 sock_families[2];
82197+ __u32 ip_proto[8];
82198+ __u32 ip_type;
82199+ compat_uptr_t ips;
82200+ __u32 ip_num;
82201+ __u32 inaddr_any_override;
82202+
82203+ __u32 crashes;
82204+ compat_ulong_t expires;
82205+
82206+ compat_uptr_t parent_subject;
82207+ compat_uptr_t hash;
82208+ compat_uptr_t prev;
82209+ compat_uptr_t next;
82210+
82211+ compat_uptr_t obj_hash;
82212+ __u32 obj_hash_size;
82213+ __u16 pax_flags;
82214+};
82215+
82216+struct role_allowed_ip_compat {
82217+ __u32 addr;
82218+ __u32 netmask;
82219+
82220+ compat_uptr_t prev;
82221+ compat_uptr_t next;
82222+};
82223+
82224+struct role_transition_compat {
82225+ compat_uptr_t rolename;
82226+
82227+ compat_uptr_t prev;
82228+ compat_uptr_t next;
82229+};
82230+
82231+struct acl_role_label_compat {
82232+ compat_uptr_t rolename;
82233+ uid_t uidgid;
82234+ __u16 roletype;
82235+
82236+ __u16 auth_attempts;
82237+ compat_ulong_t expires;
82238+
82239+ compat_uptr_t root_label;
82240+ compat_uptr_t hash;
82241+
82242+ compat_uptr_t prev;
82243+ compat_uptr_t next;
82244+
82245+ compat_uptr_t transitions;
82246+ compat_uptr_t allowed_ips;
82247+ compat_uptr_t domain_children;
82248+ __u16 domain_child_num;
82249+
82250+ umode_t umask;
82251+
82252+ compat_uptr_t subj_hash;
82253+ __u32 subj_hash_size;
82254+};
82255+
82256+struct user_acl_role_db_compat {
82257+ compat_uptr_t r_table;
82258+ __u32 num_pointers;
82259+ __u32 num_roles;
82260+ __u32 num_domain_children;
82261+ __u32 num_subjects;
82262+ __u32 num_objects;
82263+};
82264+
82265+struct acl_object_label_compat {
82266+ compat_uptr_t filename;
82267+ compat_u64 inode;
82268+ __u32 device;
82269+ __u32 mode;
82270+
82271+ compat_uptr_t nested;
82272+ compat_uptr_t globbed;
82273+
82274+ compat_uptr_t prev;
82275+ compat_uptr_t next;
82276+};
82277+
82278+struct acl_ip_label_compat {
82279+ compat_uptr_t iface;
82280+ __u32 addr;
82281+ __u32 netmask;
82282+ __u16 low, high;
82283+ __u8 mode;
82284+ __u32 type;
82285+ __u32 proto[8];
82286+
82287+ compat_uptr_t prev;
82288+ compat_uptr_t next;
82289+};
82290+
82291+struct gr_arg_compat {
82292+ struct user_acl_role_db_compat role_db;
82293+ unsigned char pw[GR_PW_LEN];
82294+ unsigned char salt[GR_SALT_LEN];
82295+ unsigned char sum[GR_SHA_LEN];
82296+ unsigned char sp_role[GR_SPROLE_LEN];
82297+ compat_uptr_t sprole_pws;
82298+ __u32 segv_device;
82299+ compat_u64 segv_inode;
82300+ uid_t segv_uid;
82301+ __u16 num_sprole_pws;
82302+ __u16 mode;
82303+};
82304+
82305+struct gr_arg_wrapper_compat {
82306+ compat_uptr_t arg;
82307+ __u32 version;
82308+ __u32 size;
82309+};
82310+
82311+#endif
82312diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
82313new file mode 100644
82314index 0000000..323ecf2
82315--- /dev/null
82316+++ b/include/linux/gralloc.h
82317@@ -0,0 +1,9 @@
82318+#ifndef __GRALLOC_H
82319+#define __GRALLOC_H
82320+
82321+void acl_free_all(void);
82322+int acl_alloc_stack_init(unsigned long size);
82323+void *acl_alloc(unsigned long len);
82324+void *acl_alloc_num(unsigned long num, unsigned long len);
82325+
82326+#endif
82327diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
82328new file mode 100644
82329index 0000000..be66033
82330--- /dev/null
82331+++ b/include/linux/grdefs.h
82332@@ -0,0 +1,140 @@
82333+#ifndef GRDEFS_H
82334+#define GRDEFS_H
82335+
82336+/* Begin grsecurity status declarations */
82337+
82338+enum {
82339+ GR_READY = 0x01,
82340+ GR_STATUS_INIT = 0x00 // disabled state
82341+};
82342+
82343+/* Begin ACL declarations */
82344+
82345+/* Role flags */
82346+
82347+enum {
82348+ GR_ROLE_USER = 0x0001,
82349+ GR_ROLE_GROUP = 0x0002,
82350+ GR_ROLE_DEFAULT = 0x0004,
82351+ GR_ROLE_SPECIAL = 0x0008,
82352+ GR_ROLE_AUTH = 0x0010,
82353+ GR_ROLE_NOPW = 0x0020,
82354+ GR_ROLE_GOD = 0x0040,
82355+ GR_ROLE_LEARN = 0x0080,
82356+ GR_ROLE_TPE = 0x0100,
82357+ GR_ROLE_DOMAIN = 0x0200,
82358+ GR_ROLE_PAM = 0x0400,
82359+ GR_ROLE_PERSIST = 0x0800
82360+};
82361+
82362+/* ACL Subject and Object mode flags */
82363+enum {
82364+ GR_DELETED = 0x80000000
82365+};
82366+
82367+/* ACL Object-only mode flags */
82368+enum {
82369+ GR_READ = 0x00000001,
82370+ GR_APPEND = 0x00000002,
82371+ GR_WRITE = 0x00000004,
82372+ GR_EXEC = 0x00000008,
82373+ GR_FIND = 0x00000010,
82374+ GR_INHERIT = 0x00000020,
82375+ GR_SETID = 0x00000040,
82376+ GR_CREATE = 0x00000080,
82377+ GR_DELETE = 0x00000100,
82378+ GR_LINK = 0x00000200,
82379+ GR_AUDIT_READ = 0x00000400,
82380+ GR_AUDIT_APPEND = 0x00000800,
82381+ GR_AUDIT_WRITE = 0x00001000,
82382+ GR_AUDIT_EXEC = 0x00002000,
82383+ GR_AUDIT_FIND = 0x00004000,
82384+ GR_AUDIT_INHERIT= 0x00008000,
82385+ GR_AUDIT_SETID = 0x00010000,
82386+ GR_AUDIT_CREATE = 0x00020000,
82387+ GR_AUDIT_DELETE = 0x00040000,
82388+ GR_AUDIT_LINK = 0x00080000,
82389+ GR_PTRACERD = 0x00100000,
82390+ GR_NOPTRACE = 0x00200000,
82391+ GR_SUPPRESS = 0x00400000,
82392+ GR_NOLEARN = 0x00800000,
82393+ GR_INIT_TRANSFER= 0x01000000
82394+};
82395+
82396+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
82397+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
82398+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
82399+
82400+/* ACL subject-only mode flags */
82401+enum {
82402+ GR_KILL = 0x00000001,
82403+ GR_VIEW = 0x00000002,
82404+ GR_PROTECTED = 0x00000004,
82405+ GR_LEARN = 0x00000008,
82406+ GR_OVERRIDE = 0x00000010,
82407+ /* just a placeholder, this mode is only used in userspace */
82408+ GR_DUMMY = 0x00000020,
82409+ GR_PROTSHM = 0x00000040,
82410+ GR_KILLPROC = 0x00000080,
82411+ GR_KILLIPPROC = 0x00000100,
82412+ /* just a placeholder, this mode is only used in userspace */
82413+ GR_NOTROJAN = 0x00000200,
82414+ GR_PROTPROCFD = 0x00000400,
82415+ GR_PROCACCT = 0x00000800,
82416+ GR_RELAXPTRACE = 0x00001000,
82417+ //GR_NESTED = 0x00002000,
82418+ GR_INHERITLEARN = 0x00004000,
82419+ GR_PROCFIND = 0x00008000,
82420+ GR_POVERRIDE = 0x00010000,
82421+ GR_KERNELAUTH = 0x00020000,
82422+ GR_ATSECURE = 0x00040000,
82423+ GR_SHMEXEC = 0x00080000
82424+};
82425+
82426+enum {
82427+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82428+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82429+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82430+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82431+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82432+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82433+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82434+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82435+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82436+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82437+};
82438+
82439+enum {
82440+ GR_ID_USER = 0x01,
82441+ GR_ID_GROUP = 0x02,
82442+};
82443+
82444+enum {
82445+ GR_ID_ALLOW = 0x01,
82446+ GR_ID_DENY = 0x02,
82447+};
82448+
82449+#define GR_CRASH_RES 31
82450+#define GR_UIDTABLE_MAX 500
82451+
82452+/* begin resource learning section */
82453+enum {
82454+ GR_RLIM_CPU_BUMP = 60,
82455+ GR_RLIM_FSIZE_BUMP = 50000,
82456+ GR_RLIM_DATA_BUMP = 10000,
82457+ GR_RLIM_STACK_BUMP = 1000,
82458+ GR_RLIM_CORE_BUMP = 10000,
82459+ GR_RLIM_RSS_BUMP = 500000,
82460+ GR_RLIM_NPROC_BUMP = 1,
82461+ GR_RLIM_NOFILE_BUMP = 5,
82462+ GR_RLIM_MEMLOCK_BUMP = 50000,
82463+ GR_RLIM_AS_BUMP = 500000,
82464+ GR_RLIM_LOCKS_BUMP = 2,
82465+ GR_RLIM_SIGPENDING_BUMP = 5,
82466+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82467+ GR_RLIM_NICE_BUMP = 1,
82468+ GR_RLIM_RTPRIO_BUMP = 1,
82469+ GR_RLIM_RTTIME_BUMP = 1000000
82470+};
82471+
82472+#endif
82473diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82474new file mode 100644
82475index 0000000..fb1de5d
82476--- /dev/null
82477+++ b/include/linux/grinternal.h
82478@@ -0,0 +1,230 @@
82479+#ifndef __GRINTERNAL_H
82480+#define __GRINTERNAL_H
82481+
82482+#ifdef CONFIG_GRKERNSEC
82483+
82484+#include <linux/fs.h>
82485+#include <linux/mnt_namespace.h>
82486+#include <linux/nsproxy.h>
82487+#include <linux/gracl.h>
82488+#include <linux/grdefs.h>
82489+#include <linux/grmsg.h>
82490+
82491+void gr_add_learn_entry(const char *fmt, ...)
82492+ __attribute__ ((format (printf, 1, 2)));
82493+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82494+ const struct vfsmount *mnt);
82495+__u32 gr_check_create(const struct dentry *new_dentry,
82496+ const struct dentry *parent,
82497+ const struct vfsmount *mnt, const __u32 mode);
82498+int gr_check_protected_task(const struct task_struct *task);
82499+__u32 to_gr_audit(const __u32 reqmode);
82500+int gr_set_acls(const int type);
82501+int gr_acl_is_enabled(void);
82502+char gr_roletype_to_char(void);
82503+
82504+void gr_handle_alertkill(struct task_struct *task);
82505+char *gr_to_filename(const struct dentry *dentry,
82506+ const struct vfsmount *mnt);
82507+char *gr_to_filename1(const struct dentry *dentry,
82508+ const struct vfsmount *mnt);
82509+char *gr_to_filename2(const struct dentry *dentry,
82510+ const struct vfsmount *mnt);
82511+char *gr_to_filename3(const struct dentry *dentry,
82512+ const struct vfsmount *mnt);
82513+
82514+extern int grsec_enable_ptrace_readexec;
82515+extern int grsec_enable_harden_ptrace;
82516+extern int grsec_enable_link;
82517+extern int grsec_enable_fifo;
82518+extern int grsec_enable_execve;
82519+extern int grsec_enable_shm;
82520+extern int grsec_enable_execlog;
82521+extern int grsec_enable_signal;
82522+extern int grsec_enable_audit_ptrace;
82523+extern int grsec_enable_forkfail;
82524+extern int grsec_enable_time;
82525+extern int grsec_enable_rofs;
82526+extern int grsec_deny_new_usb;
82527+extern int grsec_enable_chroot_shmat;
82528+extern int grsec_enable_chroot_mount;
82529+extern int grsec_enable_chroot_double;
82530+extern int grsec_enable_chroot_pivot;
82531+extern int grsec_enable_chroot_chdir;
82532+extern int grsec_enable_chroot_chmod;
82533+extern int grsec_enable_chroot_mknod;
82534+extern int grsec_enable_chroot_fchdir;
82535+extern int grsec_enable_chroot_nice;
82536+extern int grsec_enable_chroot_execlog;
82537+extern int grsec_enable_chroot_caps;
82538+extern int grsec_enable_chroot_rename;
82539+extern int grsec_enable_chroot_sysctl;
82540+extern int grsec_enable_chroot_unix;
82541+extern int grsec_enable_symlinkown;
82542+extern kgid_t grsec_symlinkown_gid;
82543+extern int grsec_enable_tpe;
82544+extern kgid_t grsec_tpe_gid;
82545+extern int grsec_enable_tpe_all;
82546+extern int grsec_enable_tpe_invert;
82547+extern int grsec_enable_socket_all;
82548+extern kgid_t grsec_socket_all_gid;
82549+extern int grsec_enable_socket_client;
82550+extern kgid_t grsec_socket_client_gid;
82551+extern int grsec_enable_socket_server;
82552+extern kgid_t grsec_socket_server_gid;
82553+extern kgid_t grsec_audit_gid;
82554+extern int grsec_enable_group;
82555+extern int grsec_enable_log_rwxmaps;
82556+extern int grsec_enable_mount;
82557+extern int grsec_enable_chdir;
82558+extern int grsec_resource_logging;
82559+extern int grsec_enable_blackhole;
82560+extern int grsec_lastack_retries;
82561+extern int grsec_enable_brute;
82562+extern int grsec_enable_harden_ipc;
82563+extern int grsec_lock;
82564+
82565+extern spinlock_t grsec_alert_lock;
82566+extern unsigned long grsec_alert_wtime;
82567+extern unsigned long grsec_alert_fyet;
82568+
82569+extern spinlock_t grsec_audit_lock;
82570+
82571+extern rwlock_t grsec_exec_file_lock;
82572+
82573+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82574+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82575+ (tsk)->exec_file->f_path.mnt) : "/")
82576+
82577+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82578+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82579+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82580+
82581+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82582+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82583+ (tsk)->exec_file->f_path.mnt) : "/")
82584+
82585+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82586+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82587+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82588+
82589+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82590+
82591+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82592+
82593+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82594+{
82595+ if (file1 && file2) {
82596+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82597+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82598+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82599+ return true;
82600+ }
82601+
82602+ return false;
82603+}
82604+
82605+#define GR_CHROOT_CAPS {{ \
82606+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82607+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82608+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82609+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82610+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82611+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82612+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82613+
82614+#define security_learn(normal_msg,args...) \
82615+({ \
82616+ read_lock(&grsec_exec_file_lock); \
82617+ gr_add_learn_entry(normal_msg "\n", ## args); \
82618+ read_unlock(&grsec_exec_file_lock); \
82619+})
82620+
82621+enum {
82622+ GR_DO_AUDIT,
82623+ GR_DONT_AUDIT,
82624+ /* used for non-audit messages that we shouldn't kill the task on */
82625+ GR_DONT_AUDIT_GOOD
82626+};
82627+
82628+enum {
82629+ GR_TTYSNIFF,
82630+ GR_RBAC,
82631+ GR_RBAC_STR,
82632+ GR_STR_RBAC,
82633+ GR_RBAC_MODE2,
82634+ GR_RBAC_MODE3,
82635+ GR_FILENAME,
82636+ GR_SYSCTL_HIDDEN,
82637+ GR_NOARGS,
82638+ GR_ONE_INT,
82639+ GR_ONE_INT_TWO_STR,
82640+ GR_ONE_STR,
82641+ GR_STR_INT,
82642+ GR_TWO_STR_INT,
82643+ GR_TWO_INT,
82644+ GR_TWO_U64,
82645+ GR_THREE_INT,
82646+ GR_FIVE_INT_TWO_STR,
82647+ GR_TWO_STR,
82648+ GR_THREE_STR,
82649+ GR_FOUR_STR,
82650+ GR_STR_FILENAME,
82651+ GR_FILENAME_STR,
82652+ GR_FILENAME_TWO_INT,
82653+ GR_FILENAME_TWO_INT_STR,
82654+ GR_TEXTREL,
82655+ GR_PTRACE,
82656+ GR_RESOURCE,
82657+ GR_CAP,
82658+ GR_SIG,
82659+ GR_SIG2,
82660+ GR_CRASH1,
82661+ GR_CRASH2,
82662+ GR_PSACCT,
82663+ GR_RWXMAP,
82664+ GR_RWXMAPVMA
82665+};
82666+
82667+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82668+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82669+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82670+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82671+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82672+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82673+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82674+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82675+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82676+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82677+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82678+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82679+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82680+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82681+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82682+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82683+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82684+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82685+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82686+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82687+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82688+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82689+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82690+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82691+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82692+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82693+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82694+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82695+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82696+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82697+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82698+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82699+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82700+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82701+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82702+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82703+
82704+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82705+
82706+#endif
82707+
82708+#endif
82709diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82710new file mode 100644
82711index 0000000..26ef560
82712--- /dev/null
82713+++ b/include/linux/grmsg.h
82714@@ -0,0 +1,118 @@
82715+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82716+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82717+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82718+#define GR_STOPMOD_MSG "denied modification of module state by "
82719+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82720+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82721+#define GR_IOPERM_MSG "denied use of ioperm() by "
82722+#define GR_IOPL_MSG "denied use of iopl() by "
82723+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82724+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82725+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82726+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82727+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82728+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82729+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82730+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82731+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82732+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82733+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82734+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82735+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82736+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82737+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82738+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82739+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82740+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82741+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82742+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82743+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82744+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82745+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82746+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82747+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82748+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82749+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82750+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82751+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82752+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82753+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82754+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82755+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82756+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82757+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82758+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82759+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82760+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82761+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82762+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82763+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82764+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82765+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82766+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82767+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82768+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82769+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82770+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82771+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82772+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82773+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82774+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82775+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82776+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82777+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82778+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82779+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82780+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82781+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82782+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82783+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82784+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82785+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82786+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82787+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82788+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82789+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82790+#define GR_NICE_CHROOT_MSG "denied priority change by "
82791+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82792+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82793+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82794+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82795+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82796+#define GR_TIME_MSG "time set by "
82797+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82798+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82799+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82800+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82801+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82802+#define GR_BIND_MSG "denied bind() by "
82803+#define GR_CONNECT_MSG "denied connect() by "
82804+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82805+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82806+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82807+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82808+#define GR_CAP_ACL_MSG "use of %s denied for "
82809+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82810+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82811+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82812+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82813+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82814+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82815+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82816+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82817+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82818+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82819+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82820+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82821+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82822+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82823+#define GR_VM86_MSG "denied use of vm86 by "
82824+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82825+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82826+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82827+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82828+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82829+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82830+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82831+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82832+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82833diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82834new file mode 100644
82835index 0000000..63c1850
82836--- /dev/null
82837+++ b/include/linux/grsecurity.h
82838@@ -0,0 +1,250 @@
82839+#ifndef GR_SECURITY_H
82840+#define GR_SECURITY_H
82841+#include <linux/fs.h>
82842+#include <linux/fs_struct.h>
82843+#include <linux/binfmts.h>
82844+#include <linux/gracl.h>
82845+
82846+/* notify of brain-dead configs */
82847+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82848+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82849+#endif
82850+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82851+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82852+#endif
82853+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82854+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82855+#endif
82856+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82857+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82858+#endif
82859+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82860+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82861+#endif
82862+
82863+int gr_handle_new_usb(void);
82864+
82865+void gr_handle_brute_attach(int dumpable);
82866+void gr_handle_brute_check(void);
82867+void gr_handle_kernel_exploit(void);
82868+
82869+char gr_roletype_to_char(void);
82870+
82871+int gr_proc_is_restricted(void);
82872+
82873+int gr_acl_enable_at_secure(void);
82874+
82875+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82876+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82877+
82878+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82879+
82880+void gr_del_task_from_ip_table(struct task_struct *p);
82881+
82882+int gr_pid_is_chrooted(struct task_struct *p);
82883+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82884+int gr_handle_chroot_nice(void);
82885+int gr_handle_chroot_sysctl(const int op);
82886+int gr_handle_chroot_setpriority(struct task_struct *p,
82887+ const int niceval);
82888+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82889+int gr_chroot_fhandle(void);
82890+int gr_handle_chroot_chroot(const struct dentry *dentry,
82891+ const struct vfsmount *mnt);
82892+void gr_handle_chroot_chdir(const struct path *path);
82893+int gr_handle_chroot_chmod(const struct dentry *dentry,
82894+ const struct vfsmount *mnt, const int mode);
82895+int gr_handle_chroot_mknod(const struct dentry *dentry,
82896+ const struct vfsmount *mnt, const int mode);
82897+int gr_handle_chroot_mount(const struct dentry *dentry,
82898+ const struct vfsmount *mnt,
82899+ const char *dev_name);
82900+int gr_handle_chroot_pivot(void);
82901+int gr_handle_chroot_unix(const pid_t pid);
82902+
82903+int gr_handle_rawio(const struct inode *inode);
82904+
82905+void gr_handle_ioperm(void);
82906+void gr_handle_iopl(void);
82907+void gr_handle_msr_write(void);
82908+
82909+umode_t gr_acl_umask(void);
82910+
82911+int gr_tpe_allow(const struct file *file);
82912+
82913+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82914+void gr_clear_chroot_entries(struct task_struct *task);
82915+
82916+void gr_log_forkfail(const int retval);
82917+void gr_log_timechange(void);
82918+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82919+void gr_log_chdir(const struct dentry *dentry,
82920+ const struct vfsmount *mnt);
82921+void gr_log_chroot_exec(const struct dentry *dentry,
82922+ const struct vfsmount *mnt);
82923+void gr_log_remount(const char *devname, const int retval);
82924+void gr_log_unmount(const char *devname, const int retval);
82925+void gr_log_mount(const char *from, struct path *to, const int retval);
82926+void gr_log_textrel(struct vm_area_struct *vma);
82927+void gr_log_ptgnustack(struct file *file);
82928+void gr_log_rwxmmap(struct file *file);
82929+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82930+
82931+int gr_handle_follow_link(const struct inode *parent,
82932+ const struct inode *inode,
82933+ const struct dentry *dentry,
82934+ const struct vfsmount *mnt);
82935+int gr_handle_fifo(const struct dentry *dentry,
82936+ const struct vfsmount *mnt,
82937+ const struct dentry *dir, const int flag,
82938+ const int acc_mode);
82939+int gr_handle_hardlink(const struct dentry *dentry,
82940+ const struct vfsmount *mnt,
82941+ struct inode *inode,
82942+ const int mode, const struct filename *to);
82943+
82944+int gr_is_capable(const int cap);
82945+int gr_is_capable_nolog(const int cap);
82946+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82947+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82948+
82949+void gr_copy_label(struct task_struct *tsk);
82950+void gr_handle_crash(struct task_struct *task, const int sig);
82951+int gr_handle_signal(const struct task_struct *p, const int sig);
82952+int gr_check_crash_uid(const kuid_t uid);
82953+int gr_check_protected_task(const struct task_struct *task);
82954+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82955+int gr_acl_handle_mmap(const struct file *file,
82956+ const unsigned long prot);
82957+int gr_acl_handle_mprotect(const struct file *file,
82958+ const unsigned long prot);
82959+int gr_check_hidden_task(const struct task_struct *tsk);
82960+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82961+ const struct vfsmount *mnt);
82962+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82963+ const struct vfsmount *mnt);
82964+__u32 gr_acl_handle_access(const struct dentry *dentry,
82965+ const struct vfsmount *mnt, const int fmode);
82966+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82967+ const struct vfsmount *mnt, umode_t *mode);
82968+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82969+ const struct vfsmount *mnt);
82970+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82971+ const struct vfsmount *mnt);
82972+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82973+ const struct vfsmount *mnt);
82974+int gr_handle_ptrace(struct task_struct *task, const long request);
82975+int gr_handle_proc_ptrace(struct task_struct *task);
82976+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82977+ const struct vfsmount *mnt);
82978+int gr_check_crash_exec(const struct file *filp);
82979+int gr_acl_is_enabled(void);
82980+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82981+ const kgid_t gid);
82982+int gr_set_proc_label(const struct dentry *dentry,
82983+ const struct vfsmount *mnt,
82984+ const int unsafe_flags);
82985+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82986+ const struct vfsmount *mnt);
82987+__u32 gr_acl_handle_open(const struct dentry *dentry,
82988+ const struct vfsmount *mnt, int acc_mode);
82989+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82990+ const struct dentry *p_dentry,
82991+ const struct vfsmount *p_mnt,
82992+ int open_flags, int acc_mode, const int imode);
82993+void gr_handle_create(const struct dentry *dentry,
82994+ const struct vfsmount *mnt);
82995+void gr_handle_proc_create(const struct dentry *dentry,
82996+ const struct inode *inode);
82997+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82998+ const struct dentry *parent_dentry,
82999+ const struct vfsmount *parent_mnt,
83000+ const int mode);
83001+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
83002+ const struct dentry *parent_dentry,
83003+ const struct vfsmount *parent_mnt);
83004+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
83005+ const struct vfsmount *mnt);
83006+void gr_handle_delete(const u64 ino, const dev_t dev);
83007+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
83008+ const struct vfsmount *mnt);
83009+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
83010+ const struct dentry *parent_dentry,
83011+ const struct vfsmount *parent_mnt,
83012+ const struct filename *from);
83013+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
83014+ const struct dentry *parent_dentry,
83015+ const struct vfsmount *parent_mnt,
83016+ const struct dentry *old_dentry,
83017+ const struct vfsmount *old_mnt, const struct filename *to);
83018+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
83019+int gr_acl_handle_rename(struct dentry *new_dentry,
83020+ struct dentry *parent_dentry,
83021+ const struct vfsmount *parent_mnt,
83022+ struct dentry *old_dentry,
83023+ struct inode *old_parent_inode,
83024+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
83025+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
83026+ struct dentry *old_dentry,
83027+ struct dentry *new_dentry,
83028+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
83029+__u32 gr_check_link(const struct dentry *new_dentry,
83030+ const struct dentry *parent_dentry,
83031+ const struct vfsmount *parent_mnt,
83032+ const struct dentry *old_dentry,
83033+ const struct vfsmount *old_mnt);
83034+int gr_acl_handle_filldir(const struct file *file, const char *name,
83035+ const unsigned int namelen, const u64 ino);
83036+
83037+__u32 gr_acl_handle_unix(const struct dentry *dentry,
83038+ const struct vfsmount *mnt);
83039+void gr_acl_handle_exit(void);
83040+void gr_acl_handle_psacct(struct task_struct *task, const long code);
83041+int gr_acl_handle_procpidmem(const struct task_struct *task);
83042+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
83043+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
83044+void gr_audit_ptrace(struct task_struct *task);
83045+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
83046+u64 gr_get_ino_from_dentry(struct dentry *dentry);
83047+void gr_put_exec_file(struct task_struct *task);
83048+
83049+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
83050+
83051+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
83052+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
83053+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
83054+ struct dentry *newdentry, struct vfsmount *newmnt);
83055+
83056+#ifdef CONFIG_GRKERNSEC_RESLOG
83057+extern void gr_log_resource(const struct task_struct *task, const int res,
83058+ const unsigned long wanted, const int gt);
83059+#else
83060+static inline void gr_log_resource(const struct task_struct *task, const int res,
83061+ const unsigned long wanted, const int gt)
83062+{
83063+}
83064+#endif
83065+
83066+#ifdef CONFIG_GRKERNSEC
83067+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
83068+void gr_handle_vm86(void);
83069+void gr_handle_mem_readwrite(u64 from, u64 to);
83070+
83071+void gr_log_badprocpid(const char *entry);
83072+
83073+extern int grsec_enable_dmesg;
83074+extern int grsec_disable_privio;
83075+
83076+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83077+extern kgid_t grsec_proc_gid;
83078+#endif
83079+
83080+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83081+extern int grsec_enable_chroot_findtask;
83082+#endif
83083+#ifdef CONFIG_GRKERNSEC_SETXID
83084+extern int grsec_enable_setxid;
83085+#endif
83086+#endif
83087+
83088+#endif
83089diff --git a/include/linux/grsock.h b/include/linux/grsock.h
83090new file mode 100644
83091index 0000000..e7ffaaf
83092--- /dev/null
83093+++ b/include/linux/grsock.h
83094@@ -0,0 +1,19 @@
83095+#ifndef __GRSOCK_H
83096+#define __GRSOCK_H
83097+
83098+extern void gr_attach_curr_ip(const struct sock *sk);
83099+extern int gr_handle_sock_all(const int family, const int type,
83100+ const int protocol);
83101+extern int gr_handle_sock_server(const struct sockaddr *sck);
83102+extern int gr_handle_sock_server_other(const struct sock *sck);
83103+extern int gr_handle_sock_client(const struct sockaddr *sck);
83104+extern int gr_search_connect(struct socket * sock,
83105+ struct sockaddr_in * addr);
83106+extern int gr_search_bind(struct socket * sock,
83107+ struct sockaddr_in * addr);
83108+extern int gr_search_listen(struct socket * sock);
83109+extern int gr_search_accept(struct socket * sock);
83110+extern int gr_search_socket(const int domain, const int type,
83111+ const int protocol);
83112+
83113+#endif
83114diff --git a/include/linux/highmem.h b/include/linux/highmem.h
83115index 9286a46..373f27f 100644
83116--- a/include/linux/highmem.h
83117+++ b/include/linux/highmem.h
83118@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
83119 kunmap_atomic(kaddr);
83120 }
83121
83122+static inline void sanitize_highpage(struct page *page)
83123+{
83124+ void *kaddr;
83125+ unsigned long flags;
83126+
83127+ local_irq_save(flags);
83128+ kaddr = kmap_atomic(page);
83129+ clear_page(kaddr);
83130+ kunmap_atomic(kaddr);
83131+ local_irq_restore(flags);
83132+}
83133+
83134 static inline void zero_user_segments(struct page *page,
83135 unsigned start1, unsigned end1,
83136 unsigned start2, unsigned end2)
83137diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
83138index 1c7b89a..7dda400 100644
83139--- a/include/linux/hwmon-sysfs.h
83140+++ b/include/linux/hwmon-sysfs.h
83141@@ -25,7 +25,8 @@
83142 struct sensor_device_attribute{
83143 struct device_attribute dev_attr;
83144 int index;
83145-};
83146+} __do_const;
83147+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
83148 #define to_sensor_dev_attr(_dev_attr) \
83149 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
83150
83151@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
83152 struct device_attribute dev_attr;
83153 u8 index;
83154 u8 nr;
83155-};
83156+} __do_const;
83157+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
83158 #define to_sensor_dev_attr_2(_dev_attr) \
83159 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
83160
83161diff --git a/include/linux/i2c.h b/include/linux/i2c.h
83162index f17da50..2f8b203 100644
83163--- a/include/linux/i2c.h
83164+++ b/include/linux/i2c.h
83165@@ -409,6 +409,7 @@ struct i2c_algorithm {
83166 int (*unreg_slave)(struct i2c_client *client);
83167 #endif
83168 };
83169+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
83170
83171 /**
83172 * struct i2c_bus_recovery_info - I2C bus recovery information
83173diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
83174index aff7ad8..3942bbd 100644
83175--- a/include/linux/if_pppox.h
83176+++ b/include/linux/if_pppox.h
83177@@ -76,7 +76,7 @@ struct pppox_proto {
83178 int (*ioctl)(struct socket *sock, unsigned int cmd,
83179 unsigned long arg);
83180 struct module *owner;
83181-};
83182+} __do_const;
83183
83184 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
83185 extern void unregister_pppox_proto(int proto_num);
83186diff --git a/include/linux/init.h b/include/linux/init.h
83187index 2df8e8d..3e1280d 100644
83188--- a/include/linux/init.h
83189+++ b/include/linux/init.h
83190@@ -37,9 +37,17 @@
83191 * section.
83192 */
83193
83194+#define add_init_latent_entropy __latent_entropy
83195+
83196+#ifdef CONFIG_MEMORY_HOTPLUG
83197+#define add_meminit_latent_entropy
83198+#else
83199+#define add_meminit_latent_entropy __latent_entropy
83200+#endif
83201+
83202 /* These are for everybody (although not all archs will actually
83203 discard it in modules) */
83204-#define __init __section(.init.text) __cold notrace
83205+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
83206 #define __initdata __section(.init.data)
83207 #define __initconst __constsection(.init.rodata)
83208 #define __exitdata __section(.exit.data)
83209@@ -100,7 +108,7 @@
83210 #define __cpuexitconst
83211
83212 /* Used for MEMORY_HOTPLUG */
83213-#define __meminit __section(.meminit.text) __cold notrace
83214+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
83215 #define __meminitdata __section(.meminit.data)
83216 #define __meminitconst __constsection(.meminit.rodata)
83217 #define __memexit __section(.memexit.text) __exitused __cold notrace
83218diff --git a/include/linux/init_task.h b/include/linux/init_task.h
83219index 696d223..6d6b39f 100644
83220--- a/include/linux/init_task.h
83221+++ b/include/linux/init_task.h
83222@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
83223
83224 #define INIT_TASK_COMM "swapper"
83225
83226+#ifdef CONFIG_X86
83227+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
83228+#else
83229+#define INIT_TASK_THREAD_INFO
83230+#endif
83231+
83232 #ifdef CONFIG_RT_MUTEXES
83233 # define INIT_RT_MUTEXES(tsk) \
83234 .pi_waiters = RB_ROOT, \
83235@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
83236 RCU_POINTER_INITIALIZER(cred, &init_cred), \
83237 .comm = INIT_TASK_COMM, \
83238 .thread = INIT_THREAD, \
83239+ INIT_TASK_THREAD_INFO \
83240 .fs = &init_fs, \
83241 .files = &init_files, \
83242 .signal = &init_signals, \
83243diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
83244index 2e88580..f6a99a0 100644
83245--- a/include/linux/interrupt.h
83246+++ b/include/linux/interrupt.h
83247@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
83248
83249 struct softirq_action
83250 {
83251- void (*action)(struct softirq_action *);
83252-};
83253+ void (*action)(void);
83254+} __no_const;
83255
83256 asmlinkage void do_softirq(void);
83257 asmlinkage void __do_softirq(void);
83258@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
83259 }
83260 #endif
83261
83262-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
83263+extern void open_softirq(int nr, void (*action)(void));
83264 extern void softirq_init(void);
83265 extern void __raise_softirq_irqoff(unsigned int nr);
83266
83267diff --git a/include/linux/iommu.h b/include/linux/iommu.h
83268index 38daa45..4de4317 100644
83269--- a/include/linux/iommu.h
83270+++ b/include/linux/iommu.h
83271@@ -147,7 +147,7 @@ struct iommu_ops {
83272
83273 unsigned long pgsize_bitmap;
83274 void *priv;
83275-};
83276+} __do_const;
83277
83278 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
83279 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
83280diff --git a/include/linux/ioport.h b/include/linux/ioport.h
83281index 2c525022..345b106 100644
83282--- a/include/linux/ioport.h
83283+++ b/include/linux/ioport.h
83284@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
83285 int adjust_resource(struct resource *res, resource_size_t start,
83286 resource_size_t size);
83287 resource_size_t resource_alignment(struct resource *res);
83288-static inline resource_size_t resource_size(const struct resource *res)
83289+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
83290 {
83291 return res->end - res->start + 1;
83292 }
83293diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
83294index 1eee6bc..9cf4912 100644
83295--- a/include/linux/ipc_namespace.h
83296+++ b/include/linux/ipc_namespace.h
83297@@ -60,7 +60,7 @@ struct ipc_namespace {
83298 struct user_namespace *user_ns;
83299
83300 struct ns_common ns;
83301-};
83302+} __randomize_layout;
83303
83304 extern struct ipc_namespace init_ipc_ns;
83305 extern atomic_t nr_ipc_ns;
83306diff --git a/include/linux/irq.h b/include/linux/irq.h
83307index d09ec7a..f373eb5 100644
83308--- a/include/linux/irq.h
83309+++ b/include/linux/irq.h
83310@@ -364,7 +364,8 @@ struct irq_chip {
83311 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
83312
83313 unsigned long flags;
83314-};
83315+} __do_const;
83316+typedef struct irq_chip __no_const irq_chip_no_const;
83317
83318 /*
83319 * irq_chip specific flags
83320diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
83321index 71d706d..817cdec 100644
83322--- a/include/linux/irqchip/arm-gic.h
83323+++ b/include/linux/irqchip/arm-gic.h
83324@@ -95,7 +95,7 @@
83325
83326 struct device_node;
83327
83328-extern struct irq_chip gic_arch_extn;
83329+extern irq_chip_no_const gic_arch_extn;
83330
83331 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
83332 u32 offset, struct device_node *);
83333diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
83334index dd1109f..4f4fdda 100644
83335--- a/include/linux/irqdesc.h
83336+++ b/include/linux/irqdesc.h
83337@@ -61,7 +61,7 @@ struct irq_desc {
83338 unsigned int irq_count; /* For detecting broken IRQs */
83339 unsigned long last_unhandled; /* Aging timer for unhandled count */
83340 unsigned int irqs_unhandled;
83341- atomic_t threads_handled;
83342+ atomic_unchecked_t threads_handled;
83343 int threads_handled_last;
83344 raw_spinlock_t lock;
83345 struct cpumask *percpu_enabled;
83346diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
83347index c367cbd..c9b79e6 100644
83348--- a/include/linux/jiffies.h
83349+++ b/include/linux/jiffies.h
83350@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
83351 /*
83352 * Convert various time units to each other:
83353 */
83354-extern unsigned int jiffies_to_msecs(const unsigned long j);
83355-extern unsigned int jiffies_to_usecs(const unsigned long j);
83356+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
83357+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
83358
83359-static inline u64 jiffies_to_nsecs(const unsigned long j)
83360+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
83361 {
83362 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
83363 }
83364
83365-extern unsigned long msecs_to_jiffies(const unsigned int m);
83366-extern unsigned long usecs_to_jiffies(const unsigned int u);
83367+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
83368+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
83369 extern unsigned long timespec_to_jiffies(const struct timespec *value);
83370 extern void jiffies_to_timespec(const unsigned long jiffies,
83371- struct timespec *value);
83372-extern unsigned long timeval_to_jiffies(const struct timeval *value);
83373+ struct timespec *value) __intentional_overflow(-1);
83374+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
83375 extern void jiffies_to_timeval(const unsigned long jiffies,
83376 struct timeval *value);
83377
83378diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
83379index 6883e19..e854fcb 100644
83380--- a/include/linux/kallsyms.h
83381+++ b/include/linux/kallsyms.h
83382@@ -15,7 +15,8 @@
83383
83384 struct module;
83385
83386-#ifdef CONFIG_KALLSYMS
83387+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
83388+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83389 /* Lookup the address for a symbol. Returns 0 if not found. */
83390 unsigned long kallsyms_lookup_name(const char *name);
83391
83392@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
83393 /* Stupid that this does nothing, but I didn't create this mess. */
83394 #define __print_symbol(fmt, addr)
83395 #endif /*CONFIG_KALLSYMS*/
83396+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
83397+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
83398+extern unsigned long kallsyms_lookup_name(const char *name);
83399+extern void __print_symbol(const char *fmt, unsigned long address);
83400+extern int sprint_backtrace(char *buffer, unsigned long address);
83401+extern int sprint_symbol(char *buffer, unsigned long address);
83402+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
83403+const char *kallsyms_lookup(unsigned long addr,
83404+ unsigned long *symbolsize,
83405+ unsigned long *offset,
83406+ char **modname, char *namebuf);
83407+extern int kallsyms_lookup_size_offset(unsigned long addr,
83408+ unsigned long *symbolsize,
83409+ unsigned long *offset);
83410+#endif
83411
83412 /* This macro allows us to keep printk typechecking */
83413 static __printf(1, 2)
83414diff --git a/include/linux/kernel.h b/include/linux/kernel.h
83415index d6d630d..feea1f5 100644
83416--- a/include/linux/kernel.h
83417+++ b/include/linux/kernel.h
83418@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
83419 /* Obsolete, do not use. Use kstrto<foo> instead */
83420
83421 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
83422-extern long simple_strtol(const char *,char **,unsigned int);
83423+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
83424 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
83425 extern long long simple_strtoll(const char *,char **,unsigned int);
83426
83427diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83428index ff9f1d3..6712be5 100644
83429--- a/include/linux/key-type.h
83430+++ b/include/linux/key-type.h
83431@@ -152,7 +152,7 @@ struct key_type {
83432 /* internal fields */
83433 struct list_head link; /* link in types list */
83434 struct lock_class_key lock_class; /* key->sem lock class */
83435-};
83436+} __do_const;
83437
83438 extern struct key_type key_type_keyring;
83439
83440diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83441index e465bb1..19f605fd 100644
83442--- a/include/linux/kgdb.h
83443+++ b/include/linux/kgdb.h
83444@@ -52,7 +52,7 @@ extern int kgdb_connected;
83445 extern int kgdb_io_module_registered;
83446
83447 extern atomic_t kgdb_setting_breakpoint;
83448-extern atomic_t kgdb_cpu_doing_single_step;
83449+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83450
83451 extern struct task_struct *kgdb_usethread;
83452 extern struct task_struct *kgdb_contthread;
83453@@ -254,7 +254,7 @@ struct kgdb_arch {
83454 void (*correct_hw_break)(void);
83455
83456 void (*enable_nmi)(bool on);
83457-};
83458+} __do_const;
83459
83460 /**
83461 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83462@@ -279,7 +279,7 @@ struct kgdb_io {
83463 void (*pre_exception) (void);
83464 void (*post_exception) (void);
83465 int is_console;
83466-};
83467+} __do_const;
83468
83469 extern struct kgdb_arch arch_kgdb_ops;
83470
83471diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
83472index e705467..a92471d 100644
83473--- a/include/linux/kmemleak.h
83474+++ b/include/linux/kmemleak.h
83475@@ -27,7 +27,7 @@
83476
83477 extern void kmemleak_init(void) __ref;
83478 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83479- gfp_t gfp) __ref;
83480+ gfp_t gfp) __ref __size_overflow(2);
83481 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
83482 extern void kmemleak_free(const void *ptr) __ref;
83483 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
83484@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
83485 static inline void kmemleak_init(void)
83486 {
83487 }
83488-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83489+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
83490 gfp_t gfp)
83491 {
83492 }
83493diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83494index 0555cc6..40116ce 100644
83495--- a/include/linux/kmod.h
83496+++ b/include/linux/kmod.h
83497@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83498 * usually useless though. */
83499 extern __printf(2, 3)
83500 int __request_module(bool wait, const char *name, ...);
83501+extern __printf(3, 4)
83502+int ___request_module(bool wait, char *param_name, const char *name, ...);
83503 #define request_module(mod...) __request_module(true, mod)
83504 #define request_module_nowait(mod...) __request_module(false, mod)
83505 #define try_then_request_module(x, mod...) \
83506@@ -57,6 +59,9 @@ struct subprocess_info {
83507 struct work_struct work;
83508 struct completion *complete;
83509 char *path;
83510+#ifdef CONFIG_GRKERNSEC
83511+ char *origpath;
83512+#endif
83513 char **argv;
83514 char **envp;
83515 int wait;
83516diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83517index 2d61b90..a1d0a13 100644
83518--- a/include/linux/kobject.h
83519+++ b/include/linux/kobject.h
83520@@ -118,7 +118,7 @@ struct kobj_type {
83521 struct attribute **default_attrs;
83522 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83523 const void *(*namespace)(struct kobject *kobj);
83524-};
83525+} __do_const;
83526
83527 struct kobj_uevent_env {
83528 char *argv[3];
83529@@ -142,6 +142,7 @@ struct kobj_attribute {
83530 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83531 const char *buf, size_t count);
83532 };
83533+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83534
83535 extern const struct sysfs_ops kobj_sysfs_ops;
83536
83537@@ -169,7 +170,7 @@ struct kset {
83538 spinlock_t list_lock;
83539 struct kobject kobj;
83540 const struct kset_uevent_ops *uevent_ops;
83541-};
83542+} __randomize_layout;
83543
83544 extern void kset_init(struct kset *kset);
83545 extern int __must_check kset_register(struct kset *kset);
83546diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83547index df32d25..fb52e27 100644
83548--- a/include/linux/kobject_ns.h
83549+++ b/include/linux/kobject_ns.h
83550@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83551 const void *(*netlink_ns)(struct sock *sk);
83552 const void *(*initial_ns)(void);
83553 void (*drop_ns)(void *);
83554-};
83555+} __do_const;
83556
83557 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83558 int kobj_ns_type_registered(enum kobj_ns_type type);
83559diff --git a/include/linux/kref.h b/include/linux/kref.h
83560index 484604d..0f6c5b6 100644
83561--- a/include/linux/kref.h
83562+++ b/include/linux/kref.h
83563@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83564 static inline int kref_sub(struct kref *kref, unsigned int count,
83565 void (*release)(struct kref *kref))
83566 {
83567- WARN_ON(release == NULL);
83568+ BUG_ON(release == NULL);
83569
83570 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83571 release(kref);
83572diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83573index d12b210..d91fd76 100644
83574--- a/include/linux/kvm_host.h
83575+++ b/include/linux/kvm_host.h
83576@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
83577 {
83578 }
83579 #endif
83580-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83581+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83582 struct module *module);
83583 void kvm_exit(void);
83584
83585@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83586 struct kvm_guest_debug *dbg);
83587 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83588
83589-int kvm_arch_init(void *opaque);
83590+int kvm_arch_init(const void *opaque);
83591 void kvm_arch_exit(void);
83592
83593 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83594diff --git a/include/linux/libata.h b/include/linux/libata.h
83595index 6b08cc1..248c5e9 100644
83596--- a/include/linux/libata.h
83597+++ b/include/linux/libata.h
83598@@ -980,7 +980,7 @@ struct ata_port_operations {
83599 * fields must be pointers.
83600 */
83601 const struct ata_port_operations *inherits;
83602-};
83603+} __do_const;
83604
83605 struct ata_port_info {
83606 unsigned long flags;
83607diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83608index a6a42dd..6c5ebce 100644
83609--- a/include/linux/linkage.h
83610+++ b/include/linux/linkage.h
83611@@ -36,6 +36,7 @@
83612 #endif
83613
83614 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83615+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83616 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83617
83618 /*
83619diff --git a/include/linux/list.h b/include/linux/list.h
83620index feb773c..98f3075 100644
83621--- a/include/linux/list.h
83622+++ b/include/linux/list.h
83623@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83624 extern void list_del(struct list_head *entry);
83625 #endif
83626
83627+extern void __pax_list_add(struct list_head *new,
83628+ struct list_head *prev,
83629+ struct list_head *next);
83630+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83631+{
83632+ __pax_list_add(new, head, head->next);
83633+}
83634+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83635+{
83636+ __pax_list_add(new, head->prev, head);
83637+}
83638+extern void pax_list_del(struct list_head *entry);
83639+
83640 /**
83641 * list_replace - replace old entry by new one
83642 * @old : the element to be replaced
83643@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83644 INIT_LIST_HEAD(entry);
83645 }
83646
83647+extern void pax_list_del_init(struct list_head *entry);
83648+
83649 /**
83650 * list_move - delete from one list and add as another's head
83651 * @list: the entry to move
83652diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83653index b10b122..d37b3de 100644
83654--- a/include/linux/lockref.h
83655+++ b/include/linux/lockref.h
83656@@ -28,7 +28,7 @@ struct lockref {
83657 #endif
83658 struct {
83659 spinlock_t lock;
83660- int count;
83661+ atomic_t count;
83662 };
83663 };
83664 };
83665@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
83666 extern int lockref_get_not_dead(struct lockref *);
83667
83668 /* Must be called under spinlock for reliable results */
83669-static inline int __lockref_is_dead(const struct lockref *l)
83670+static inline int __lockref_is_dead(const struct lockref *lockref)
83671 {
83672- return ((int)l->count < 0);
83673+ return atomic_read(&lockref->count) < 0;
83674+}
83675+
83676+static inline int __lockref_read(const struct lockref *lockref)
83677+{
83678+ return atomic_read(&lockref->count);
83679+}
83680+
83681+static inline void __lockref_set(struct lockref *lockref, int count)
83682+{
83683+ atomic_set(&lockref->count, count);
83684+}
83685+
83686+static inline void __lockref_inc(struct lockref *lockref)
83687+{
83688+ atomic_inc(&lockref->count);
83689+}
83690+
83691+static inline void __lockref_dec(struct lockref *lockref)
83692+{
83693+ atomic_dec(&lockref->count);
83694 }
83695
83696 #endif /* __LINUX_LOCKREF_H */
83697diff --git a/include/linux/math64.h b/include/linux/math64.h
83698index c45c089..298841c 100644
83699--- a/include/linux/math64.h
83700+++ b/include/linux/math64.h
83701@@ -15,7 +15,7 @@
83702 * This is commonly provided by 32bit archs to provide an optimized 64bit
83703 * divide.
83704 */
83705-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83706+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83707 {
83708 *remainder = dividend % divisor;
83709 return dividend / divisor;
83710@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83711 /**
83712 * div64_u64 - unsigned 64bit divide with 64bit divisor
83713 */
83714-static inline u64 div64_u64(u64 dividend, u64 divisor)
83715+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83716 {
83717 return dividend / divisor;
83718 }
83719@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83720 #define div64_ul(x, y) div_u64((x), (y))
83721
83722 #ifndef div_u64_rem
83723-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83724+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83725 {
83726 *remainder = do_div(dividend, divisor);
83727 return dividend;
83728@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83729 #endif
83730
83731 #ifndef div64_u64
83732-extern u64 div64_u64(u64 dividend, u64 divisor);
83733+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83734 #endif
83735
83736 #ifndef div64_s64
83737@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83738 * divide.
83739 */
83740 #ifndef div_u64
83741-static inline u64 div_u64(u64 dividend, u32 divisor)
83742+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83743 {
83744 u32 remainder;
83745 return div_u64_rem(dividend, divisor, &remainder);
83746diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83747index 3d385c8..deacb6a 100644
83748--- a/include/linux/mempolicy.h
83749+++ b/include/linux/mempolicy.h
83750@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83751 }
83752
83753 #define vma_policy(vma) ((vma)->vm_policy)
83754+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83755+{
83756+ vma->vm_policy = pol;
83757+}
83758
83759 static inline void mpol_get(struct mempolicy *pol)
83760 {
83761@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83762 }
83763
83764 #define vma_policy(vma) NULL
83765+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83766+{
83767+}
83768
83769 static inline int
83770 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83771diff --git a/include/linux/mm.h b/include/linux/mm.h
83772index 47a9392..ef645bc 100644
83773--- a/include/linux/mm.h
83774+++ b/include/linux/mm.h
83775@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83776
83777 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83778 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83779+
83780+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83781+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83782+#endif
83783+
83784 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83785 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83786 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83787@@ -254,8 +259,8 @@ struct vm_operations_struct {
83788 /* called by access_process_vm when get_user_pages() fails, typically
83789 * for use by special VMAs that can switch between memory and hardware
83790 */
83791- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83792- void *buf, int len, int write);
83793+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83794+ void *buf, size_t len, int write);
83795
83796 /* Called by the /proc/PID/maps code to ask the vma whether it
83797 * has a special name. Returning non-NULL will also cause this
83798@@ -293,6 +298,7 @@ struct vm_operations_struct {
83799 struct page *(*find_special_page)(struct vm_area_struct *vma,
83800 unsigned long addr);
83801 };
83802+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83803
83804 struct mmu_gather;
83805 struct inode;
83806@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83807 unsigned long *pfn);
83808 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83809 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83810-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83811- void *buf, int len, int write);
83812+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83813+ void *buf, size_t len, int write);
83814
83815 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83816 loff_t const holebegin, loff_t const holelen)
83817@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83818 }
83819 #endif
83820
83821-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83822-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83823- void *buf, int len, int write);
83824+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83825+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83826+ void *buf, size_t len, int write);
83827
83828 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83829 unsigned long start, unsigned long nr_pages,
83830@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
83831 int clear_page_dirty_for_io(struct page *page);
83832 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83833
83834-/* Is the vma a continuation of the stack vma above it? */
83835-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83836-{
83837- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83838-}
83839-
83840-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83841- unsigned long addr)
83842-{
83843- return (vma->vm_flags & VM_GROWSDOWN) &&
83844- (vma->vm_start == addr) &&
83845- !vma_growsdown(vma->vm_prev, addr);
83846-}
83847-
83848-/* Is the vma a continuation of the stack vma below it? */
83849-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83850-{
83851- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83852-}
83853-
83854-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83855- unsigned long addr)
83856-{
83857- return (vma->vm_flags & VM_GROWSUP) &&
83858- (vma->vm_end == addr) &&
83859- !vma_growsup(vma->vm_next, addr);
83860-}
83861-
83862 extern struct task_struct *task_of_stack(struct task_struct *task,
83863 struct vm_area_struct *vma, bool in_group);
83864
83865@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83866 {
83867 return 0;
83868 }
83869+
83870+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83871+ unsigned long address)
83872+{
83873+ return 0;
83874+}
83875 #else
83876 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83877+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83878 #endif
83879
83880 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
83881@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83882 return 0;
83883 }
83884
83885+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83886+ unsigned long address)
83887+{
83888+ return 0;
83889+}
83890+
83891 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
83892
83893 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
83894@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
83895
83896 #else
83897 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83898+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83899
83900 static inline void mm_nr_pmds_init(struct mm_struct *mm)
83901 {
83902@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83903 NULL: pud_offset(pgd, address);
83904 }
83905
83906+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83907+{
83908+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83909+ NULL: pud_offset(pgd, address);
83910+}
83911+
83912 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83913 {
83914 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83915 NULL: pmd_offset(pud, address);
83916 }
83917+
83918+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83919+{
83920+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83921+ NULL: pmd_offset(pud, address);
83922+}
83923 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83924
83925 #if USE_SPLIT_PTE_PTLOCKS
83926@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83927 bool *need_rmap_locks);
83928 extern void exit_mmap(struct mm_struct *);
83929
83930+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83931+extern void gr_learn_resource(const struct task_struct *task, const int res,
83932+ const unsigned long wanted, const int gt);
83933+#else
83934+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83935+ const unsigned long wanted, const int gt)
83936+{
83937+}
83938+#endif
83939+
83940 static inline int check_data_rlimit(unsigned long rlim,
83941 unsigned long new,
83942 unsigned long start,
83943 unsigned long end_data,
83944 unsigned long start_data)
83945 {
83946+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83947 if (rlim < RLIM_INFINITY) {
83948 if (((new - start) + (end_data - start_data)) > rlim)
83949 return -ENOSPC;
83950@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83951 unsigned long addr, unsigned long len,
83952 unsigned long flags, struct page **pages);
83953
83954-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83955+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83956
83957 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83958 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83959@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83960 unsigned long len, unsigned long prot, unsigned long flags,
83961 unsigned long pgoff, unsigned long *populate);
83962 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83963+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83964
83965 #ifdef CONFIG_MMU
83966 extern int __mm_populate(unsigned long addr, unsigned long len,
83967@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
83968 unsigned long high_limit;
83969 unsigned long align_mask;
83970 unsigned long align_offset;
83971+ unsigned long threadstack_offset;
83972 };
83973
83974-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83975-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83976+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83977+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83978
83979 /*
83980 * Search for an unmapped address range.
83981@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83982 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83983 */
83984 static inline unsigned long
83985-vm_unmapped_area(struct vm_unmapped_area_info *info)
83986+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83987 {
83988 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83989 return unmapped_area(info);
83990@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83991 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83992 struct vm_area_struct **pprev);
83993
83994+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83995+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83996+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83997+
83998 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83999 NULL if none. Assume start_addr < end_addr. */
84000 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84001@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84002 }
84003
84004 #ifdef CONFIG_MMU
84005-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84006+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84007 void vma_set_page_prot(struct vm_area_struct *vma);
84008 #else
84009-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84010+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84011 {
84012 return __pgprot(0);
84013 }
84014@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84015 static inline void vm_stat_account(struct mm_struct *mm,
84016 unsigned long flags, struct file *file, long pages)
84017 {
84018+
84019+#ifdef CONFIG_PAX_RANDMMAP
84020+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
84021+#endif
84022+
84023 mm->total_vm += pages;
84024 }
84025 #endif /* CONFIG_PROC_FS */
84026@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
84027 extern int sysctl_memory_failure_early_kill;
84028 extern int sysctl_memory_failure_recovery;
84029 extern void shake_page(struct page *p, int access);
84030-extern atomic_long_t num_poisoned_pages;
84031+extern atomic_long_unchecked_t num_poisoned_pages;
84032 extern int soft_offline_page(struct page *page, int flags);
84033
84034 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
84035@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
84036 static inline void setup_nr_node_ids(void) {}
84037 #endif
84038
84039+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84040+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
84041+#else
84042+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
84043+#endif
84044+
84045 #endif /* __KERNEL__ */
84046 #endif /* _LINUX_MM_H */
84047diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
84048index 199a03a..7328440 100644
84049--- a/include/linux/mm_types.h
84050+++ b/include/linux/mm_types.h
84051@@ -313,7 +313,9 @@ struct vm_area_struct {
84052 #ifdef CONFIG_NUMA
84053 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
84054 #endif
84055-};
84056+
84057+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
84058+} __randomize_layout;
84059
84060 struct core_thread {
84061 struct task_struct *task;
84062@@ -464,7 +466,25 @@ struct mm_struct {
84063 /* address of the bounds directory */
84064 void __user *bd_addr;
84065 #endif
84066-};
84067+
84068+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84069+ unsigned long pax_flags;
84070+#endif
84071+
84072+#ifdef CONFIG_PAX_DLRESOLVE
84073+ unsigned long call_dl_resolve;
84074+#endif
84075+
84076+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
84077+ unsigned long call_syscall;
84078+#endif
84079+
84080+#ifdef CONFIG_PAX_ASLR
84081+ unsigned long delta_mmap; /* randomized offset */
84082+ unsigned long delta_stack; /* randomized offset */
84083+#endif
84084+
84085+} __randomize_layout;
84086
84087 static inline void mm_init_cpumask(struct mm_struct *mm)
84088 {
84089diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
84090index 160448f..7b332b7 100644
84091--- a/include/linux/mmc/core.h
84092+++ b/include/linux/mmc/core.h
84093@@ -79,7 +79,7 @@ struct mmc_command {
84094 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
84095
84096 unsigned int retries; /* max number of retries */
84097- unsigned int error; /* command error */
84098+ int error; /* command error */
84099
84100 /*
84101 * Standard errno values are used for errors, but some have specific
84102diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
84103index c5d5278..f0b68c8 100644
84104--- a/include/linux/mmiotrace.h
84105+++ b/include/linux/mmiotrace.h
84106@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
84107 /* Called from ioremap.c */
84108 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
84109 void __iomem *addr);
84110-extern void mmiotrace_iounmap(volatile void __iomem *addr);
84111+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
84112
84113 /* For anyone to insert markers. Remember trailing newline. */
84114 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
84115@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
84116 {
84117 }
84118
84119-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
84120+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
84121 {
84122 }
84123
84124diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
84125index 2782df4..abe756e 100644
84126--- a/include/linux/mmzone.h
84127+++ b/include/linux/mmzone.h
84128@@ -526,7 +526,7 @@ struct zone {
84129
84130 ZONE_PADDING(_pad3_)
84131 /* Zone statistics */
84132- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84133+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84134 } ____cacheline_internodealigned_in_smp;
84135
84136 enum zone_flags {
84137diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
84138index e530533..c9620c7 100644
84139--- a/include/linux/mod_devicetable.h
84140+++ b/include/linux/mod_devicetable.h
84141@@ -139,7 +139,7 @@ struct usb_device_id {
84142 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
84143 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
84144
84145-#define HID_ANY_ID (~0)
84146+#define HID_ANY_ID (~0U)
84147 #define HID_BUS_ANY 0xffff
84148 #define HID_GROUP_ANY 0x0000
84149
84150@@ -470,7 +470,7 @@ struct dmi_system_id {
84151 const char *ident;
84152 struct dmi_strmatch matches[4];
84153 void *driver_data;
84154-};
84155+} __do_const;
84156 /*
84157 * struct dmi_device_id appears during expansion of
84158 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
84159diff --git a/include/linux/module.h b/include/linux/module.h
84160index b03485b..a26974f 100644
84161--- a/include/linux/module.h
84162+++ b/include/linux/module.h
84163@@ -17,9 +17,11 @@
84164 #include <linux/moduleparam.h>
84165 #include <linux/jump_label.h>
84166 #include <linux/export.h>
84167+#include <linux/fs.h>
84168
84169 #include <linux/percpu.h>
84170 #include <asm/module.h>
84171+#include <asm/pgtable.h>
84172
84173 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
84174 #define MODULE_SIG_STRING "~Module signature appended~\n"
84175@@ -42,7 +44,7 @@ struct module_kobject {
84176 struct kobject *drivers_dir;
84177 struct module_param_attrs *mp;
84178 struct completion *kobj_completion;
84179-};
84180+} __randomize_layout;
84181
84182 struct module_attribute {
84183 struct attribute attr;
84184@@ -54,12 +56,13 @@ struct module_attribute {
84185 int (*test)(struct module *);
84186 void (*free)(struct module *);
84187 };
84188+typedef struct module_attribute __no_const module_attribute_no_const;
84189
84190 struct module_version_attribute {
84191 struct module_attribute mattr;
84192 const char *module_name;
84193 const char *version;
84194-} __attribute__ ((__aligned__(sizeof(void *))));
84195+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
84196
84197 extern ssize_t __modver_version_show(struct module_attribute *,
84198 struct module_kobject *, char *);
84199@@ -221,7 +224,7 @@ struct module {
84200
84201 /* Sysfs stuff. */
84202 struct module_kobject mkobj;
84203- struct module_attribute *modinfo_attrs;
84204+ module_attribute_no_const *modinfo_attrs;
84205 const char *version;
84206 const char *srcversion;
84207 struct kobject *holders_dir;
84208@@ -270,19 +273,16 @@ struct module {
84209 int (*init)(void);
84210
84211 /* If this is non-NULL, vfree after init() returns */
84212- void *module_init;
84213+ void *module_init_rx, *module_init_rw;
84214
84215 /* Here is the actual code + data, vfree'd on unload. */
84216- void *module_core;
84217+ void *module_core_rx, *module_core_rw;
84218
84219 /* Here are the sizes of the init and core sections */
84220- unsigned int init_size, core_size;
84221+ unsigned int init_size_rw, core_size_rw;
84222
84223 /* The size of the executable code in each section. */
84224- unsigned int init_text_size, core_text_size;
84225-
84226- /* Size of RO sections of the module (text+rodata) */
84227- unsigned int init_ro_size, core_ro_size;
84228+ unsigned int init_size_rx, core_size_rx;
84229
84230 /* Arch-specific module values */
84231 struct mod_arch_specific arch;
84232@@ -338,6 +338,10 @@ struct module {
84233 #ifdef CONFIG_EVENT_TRACING
84234 struct ftrace_event_call **trace_events;
84235 unsigned int num_trace_events;
84236+ struct file_operations trace_id;
84237+ struct file_operations trace_enable;
84238+ struct file_operations trace_format;
84239+ struct file_operations trace_filter;
84240 #endif
84241 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
84242 unsigned int num_ftrace_callsites;
84243@@ -365,7 +369,7 @@ struct module {
84244 ctor_fn_t *ctors;
84245 unsigned int num_ctors;
84246 #endif
84247-};
84248+} __randomize_layout;
84249 #ifndef MODULE_ARCH_INIT
84250 #define MODULE_ARCH_INIT {}
84251 #endif
84252@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
84253 bool is_module_percpu_address(unsigned long addr);
84254 bool is_module_text_address(unsigned long addr);
84255
84256+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
84257+{
84258+
84259+#ifdef CONFIG_PAX_KERNEXEC
84260+ if (ktla_ktva(addr) >= (unsigned long)start &&
84261+ ktla_ktva(addr) < (unsigned long)start + size)
84262+ return 1;
84263+#endif
84264+
84265+ return ((void *)addr >= start && (void *)addr < start + size);
84266+}
84267+
84268+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
84269+{
84270+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
84271+}
84272+
84273+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
84274+{
84275+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
84276+}
84277+
84278+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
84279+{
84280+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
84281+}
84282+
84283+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
84284+{
84285+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
84286+}
84287+
84288 static inline bool within_module_core(unsigned long addr,
84289 const struct module *mod)
84290 {
84291- return (unsigned long)mod->module_core <= addr &&
84292- addr < (unsigned long)mod->module_core + mod->core_size;
84293+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
84294 }
84295
84296 static inline bool within_module_init(unsigned long addr,
84297 const struct module *mod)
84298 {
84299- return (unsigned long)mod->module_init <= addr &&
84300- addr < (unsigned long)mod->module_init + mod->init_size;
84301+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
84302 }
84303
84304 static inline bool within_module(unsigned long addr, const struct module *mod)
84305diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
84306index 4d0cb9b..3169ac7 100644
84307--- a/include/linux/moduleloader.h
84308+++ b/include/linux/moduleloader.h
84309@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
84310 sections. Returns NULL on failure. */
84311 void *module_alloc(unsigned long size);
84312
84313+#ifdef CONFIG_PAX_KERNEXEC
84314+void *module_alloc_exec(unsigned long size);
84315+#else
84316+#define module_alloc_exec(x) module_alloc(x)
84317+#endif
84318+
84319 /* Free memory returned from module_alloc. */
84320 void module_memfree(void *module_region);
84321
84322+#ifdef CONFIG_PAX_KERNEXEC
84323+void module_memfree_exec(void *module_region);
84324+#else
84325+#define module_memfree_exec(x) module_memfree((x))
84326+#endif
84327+
84328 /*
84329 * Apply the given relocation to the (simplified) ELF. Return -error
84330 * or 0.
84331@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
84332 unsigned int relsec,
84333 struct module *me)
84334 {
84335+#ifdef CONFIG_MODULES
84336 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84337 module_name(me));
84338+#endif
84339 return -ENOEXEC;
84340 }
84341 #endif
84342@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
84343 unsigned int relsec,
84344 struct module *me)
84345 {
84346+#ifdef CONFIG_MODULES
84347 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84348 module_name(me));
84349+#endif
84350 return -ENOEXEC;
84351 }
84352 #endif
84353diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
84354index 1c9effa..1160bdd 100644
84355--- a/include/linux/moduleparam.h
84356+++ b/include/linux/moduleparam.h
84357@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
84358 * @len is usually just sizeof(string).
84359 */
84360 #define module_param_string(name, string, len, perm) \
84361- static const struct kparam_string __param_string_##name \
84362+ static const struct kparam_string __param_string_##name __used \
84363 = { len, string }; \
84364 __module_param_call(MODULE_PARAM_PREFIX, name, \
84365 &param_ops_string, \
84366@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
84367 */
84368 #define module_param_array_named(name, array, type, nump, perm) \
84369 param_check_##type(name, &(array)[0]); \
84370- static const struct kparam_array __param_arr_##name \
84371+ static const struct kparam_array __param_arr_##name __used \
84372 = { .max = ARRAY_SIZE(array), .num = nump, \
84373 .ops = &param_ops_##type, \
84374 .elemsize = sizeof(array[0]), .elem = array }; \
84375diff --git a/include/linux/mount.h b/include/linux/mount.h
84376index c2c561d..a5f2a8c 100644
84377--- a/include/linux/mount.h
84378+++ b/include/linux/mount.h
84379@@ -66,7 +66,7 @@ struct vfsmount {
84380 struct dentry *mnt_root; /* root of the mounted tree */
84381 struct super_block *mnt_sb; /* pointer to superblock */
84382 int mnt_flags;
84383-};
84384+} __randomize_layout;
84385
84386 struct file; /* forward dec */
84387 struct path;
84388diff --git a/include/linux/namei.h b/include/linux/namei.h
84389index c899077..b9a2010 100644
84390--- a/include/linux/namei.h
84391+++ b/include/linux/namei.h
84392@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
84393 extern void unlock_rename(struct dentry *, struct dentry *);
84394
84395 extern void nd_jump_link(struct nameidata *nd, struct path *path);
84396-extern void nd_set_link(struct nameidata *nd, char *path);
84397-extern char *nd_get_link(struct nameidata *nd);
84398+extern void nd_set_link(struct nameidata *nd, const char *path);
84399+extern const char *nd_get_link(const struct nameidata *nd);
84400
84401 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
84402 {
84403diff --git a/include/linux/net.h b/include/linux/net.h
84404index 17d8339..81656c0 100644
84405--- a/include/linux/net.h
84406+++ b/include/linux/net.h
84407@@ -192,7 +192,7 @@ struct net_proto_family {
84408 int (*create)(struct net *net, struct socket *sock,
84409 int protocol, int kern);
84410 struct module *owner;
84411-};
84412+} __do_const;
84413
84414 struct iovec;
84415 struct kvec;
84416diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
84417index 2787388..1dd8e88 100644
84418--- a/include/linux/netdevice.h
84419+++ b/include/linux/netdevice.h
84420@@ -1198,6 +1198,7 @@ struct net_device_ops {
84421 u8 state);
84422 #endif
84423 };
84424+typedef struct net_device_ops __no_const net_device_ops_no_const;
84425
84426 /**
84427 * enum net_device_priv_flags - &struct net_device priv_flags
84428@@ -1546,10 +1547,10 @@ struct net_device {
84429
84430 struct net_device_stats stats;
84431
84432- atomic_long_t rx_dropped;
84433- atomic_long_t tx_dropped;
84434+ atomic_long_unchecked_t rx_dropped;
84435+ atomic_long_unchecked_t tx_dropped;
84436
84437- atomic_t carrier_changes;
84438+ atomic_unchecked_t carrier_changes;
84439
84440 #ifdef CONFIG_WIRELESS_EXT
84441 const struct iw_handler_def * wireless_handlers;
84442diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
84443index 2517ece..0bbfcfb 100644
84444--- a/include/linux/netfilter.h
84445+++ b/include/linux/netfilter.h
84446@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
84447 #endif
84448 /* Use the module struct to lock set/get code in place */
84449 struct module *owner;
84450-};
84451+} __do_const;
84452
84453 /* Function to register/unregister hook points. */
84454 int nf_register_hook(struct nf_hook_ops *reg);
84455diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84456index e955d47..04a5338 100644
84457--- a/include/linux/netfilter/nfnetlink.h
84458+++ b/include/linux/netfilter/nfnetlink.h
84459@@ -19,7 +19,7 @@ struct nfnl_callback {
84460 const struct nlattr * const cda[]);
84461 const struct nla_policy *policy; /* netlink attribute policy */
84462 const u_int16_t attr_count; /* number of nlattr's */
84463-};
84464+} __do_const;
84465
84466 struct nfnetlink_subsystem {
84467 const char *name;
84468diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84469new file mode 100644
84470index 0000000..33f4af8
84471--- /dev/null
84472+++ b/include/linux/netfilter/xt_gradm.h
84473@@ -0,0 +1,9 @@
84474+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84475+#define _LINUX_NETFILTER_XT_GRADM_H 1
84476+
84477+struct xt_gradm_mtinfo {
84478+ __u16 flags;
84479+ __u16 invflags;
84480+};
84481+
84482+#endif
84483diff --git a/include/linux/nls.h b/include/linux/nls.h
84484index 520681b..2b7fabb 100644
84485--- a/include/linux/nls.h
84486+++ b/include/linux/nls.h
84487@@ -31,7 +31,7 @@ struct nls_table {
84488 const unsigned char *charset2upper;
84489 struct module *owner;
84490 struct nls_table *next;
84491-};
84492+} __do_const;
84493
84494 /* this value hold the maximum octet of charset */
84495 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84496@@ -46,7 +46,7 @@ enum utf16_endian {
84497 /* nls_base.c */
84498 extern int __register_nls(struct nls_table *, struct module *);
84499 extern int unregister_nls(struct nls_table *);
84500-extern struct nls_table *load_nls(char *);
84501+extern struct nls_table *load_nls(const char *);
84502 extern void unload_nls(struct nls_table *);
84503 extern struct nls_table *load_nls_default(void);
84504 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84505diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84506index d14a4c3..a078786 100644
84507--- a/include/linux/notifier.h
84508+++ b/include/linux/notifier.h
84509@@ -54,7 +54,8 @@ struct notifier_block {
84510 notifier_fn_t notifier_call;
84511 struct notifier_block __rcu *next;
84512 int priority;
84513-};
84514+} __do_const;
84515+typedef struct notifier_block __no_const notifier_block_no_const;
84516
84517 struct atomic_notifier_head {
84518 spinlock_t lock;
84519diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84520index b2a0f15..4d7da32 100644
84521--- a/include/linux/oprofile.h
84522+++ b/include/linux/oprofile.h
84523@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84524 int oprofilefs_create_ro_ulong(struct dentry * root,
84525 char const * name, ulong * val);
84526
84527-/** Create a file for read-only access to an atomic_t. */
84528+/** Create a file for read-only access to an atomic_unchecked_t. */
84529 int oprofilefs_create_ro_atomic(struct dentry * root,
84530- char const * name, atomic_t * val);
84531+ char const * name, atomic_unchecked_t * val);
84532
84533 /** create a directory */
84534 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84535diff --git a/include/linux/padata.h b/include/linux/padata.h
84536index 4386946..f50c615 100644
84537--- a/include/linux/padata.h
84538+++ b/include/linux/padata.h
84539@@ -129,7 +129,7 @@ struct parallel_data {
84540 struct padata_serial_queue __percpu *squeue;
84541 atomic_t reorder_objects;
84542 atomic_t refcnt;
84543- atomic_t seq_nr;
84544+ atomic_unchecked_t seq_nr;
84545 struct padata_cpumask cpumask;
84546 spinlock_t lock ____cacheline_aligned;
84547 unsigned int processed;
84548diff --git a/include/linux/path.h b/include/linux/path.h
84549index d137218..be0c176 100644
84550--- a/include/linux/path.h
84551+++ b/include/linux/path.h
84552@@ -1,13 +1,15 @@
84553 #ifndef _LINUX_PATH_H
84554 #define _LINUX_PATH_H
84555
84556+#include <linux/compiler.h>
84557+
84558 struct dentry;
84559 struct vfsmount;
84560
84561 struct path {
84562 struct vfsmount *mnt;
84563 struct dentry *dentry;
84564-};
84565+} __randomize_layout;
84566
84567 extern void path_get(const struct path *);
84568 extern void path_put(const struct path *);
84569diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84570index 8c78950..0d74ed9 100644
84571--- a/include/linux/pci_hotplug.h
84572+++ b/include/linux/pci_hotplug.h
84573@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84574 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84575 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84576 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84577-};
84578+} __do_const;
84579+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84580
84581 /**
84582 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84583diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84584index caebf2a..4c3ae9d 100644
84585--- a/include/linux/percpu.h
84586+++ b/include/linux/percpu.h
84587@@ -34,7 +34,7 @@
84588 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84589 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84590 */
84591-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84592+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84593 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84594
84595 /*
84596diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84597index 2b62198..2b74233 100644
84598--- a/include/linux/perf_event.h
84599+++ b/include/linux/perf_event.h
84600@@ -343,8 +343,8 @@ struct perf_event {
84601
84602 enum perf_event_active_state state;
84603 unsigned int attach_state;
84604- local64_t count;
84605- atomic64_t child_count;
84606+ local64_t count; /* PaX: fix it one day */
84607+ atomic64_unchecked_t child_count;
84608
84609 /*
84610 * These are the total time in nanoseconds that the event
84611@@ -395,8 +395,8 @@ struct perf_event {
84612 * These accumulate total time (in nanoseconds) that children
84613 * events have been enabled and running, respectively.
84614 */
84615- atomic64_t child_total_time_enabled;
84616- atomic64_t child_total_time_running;
84617+ atomic64_unchecked_t child_total_time_enabled;
84618+ atomic64_unchecked_t child_total_time_running;
84619
84620 /*
84621 * Protect attach/detach and child_list:
84622@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84623 entry->ip[entry->nr++] = ip;
84624 }
84625
84626-extern int sysctl_perf_event_paranoid;
84627+extern int sysctl_perf_event_legitimately_concerned;
84628 extern int sysctl_perf_event_mlock;
84629 extern int sysctl_perf_event_sample_rate;
84630 extern int sysctl_perf_cpu_time_max_percent;
84631@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84632 loff_t *ppos);
84633
84634
84635+static inline bool perf_paranoid_any(void)
84636+{
84637+ return sysctl_perf_event_legitimately_concerned > 2;
84638+}
84639+
84640 static inline bool perf_paranoid_tracepoint_raw(void)
84641 {
84642- return sysctl_perf_event_paranoid > -1;
84643+ return sysctl_perf_event_legitimately_concerned > -1;
84644 }
84645
84646 static inline bool perf_paranoid_cpu(void)
84647 {
84648- return sysctl_perf_event_paranoid > 0;
84649+ return sysctl_perf_event_legitimately_concerned > 0;
84650 }
84651
84652 static inline bool perf_paranoid_kernel(void)
84653 {
84654- return sysctl_perf_event_paranoid > 1;
84655+ return sysctl_perf_event_legitimately_concerned > 1;
84656 }
84657
84658 extern void perf_event_init(void);
84659@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
84660 struct device_attribute attr;
84661 u64 id;
84662 const char *event_str;
84663-};
84664+} __do_const;
84665
84666 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
84667 char *page);
84668diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84669index 918b117..7af374b7 100644
84670--- a/include/linux/pid_namespace.h
84671+++ b/include/linux/pid_namespace.h
84672@@ -45,7 +45,7 @@ struct pid_namespace {
84673 int hide_pid;
84674 int reboot; /* group exit code if this pidns was rebooted */
84675 struct ns_common ns;
84676-};
84677+} __randomize_layout;
84678
84679 extern struct pid_namespace init_pid_ns;
84680
84681diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84682index eb8b8ac..62649e1 100644
84683--- a/include/linux/pipe_fs_i.h
84684+++ b/include/linux/pipe_fs_i.h
84685@@ -47,10 +47,10 @@ struct pipe_inode_info {
84686 struct mutex mutex;
84687 wait_queue_head_t wait;
84688 unsigned int nrbufs, curbuf, buffers;
84689- unsigned int readers;
84690- unsigned int writers;
84691- unsigned int files;
84692- unsigned int waiting_writers;
84693+ atomic_t readers;
84694+ atomic_t writers;
84695+ atomic_t files;
84696+ atomic_t waiting_writers;
84697 unsigned int r_counter;
84698 unsigned int w_counter;
84699 struct page *tmp_page;
84700diff --git a/include/linux/pm.h b/include/linux/pm.h
84701index e2f1be6..78a0506 100644
84702--- a/include/linux/pm.h
84703+++ b/include/linux/pm.h
84704@@ -608,6 +608,7 @@ struct dev_pm_domain {
84705 struct dev_pm_ops ops;
84706 void (*detach)(struct device *dev, bool power_off);
84707 };
84708+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84709
84710 /*
84711 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84712diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84713index 080e778..cbdaef7 100644
84714--- a/include/linux/pm_domain.h
84715+++ b/include/linux/pm_domain.h
84716@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84717 int (*save_state)(struct device *dev);
84718 int (*restore_state)(struct device *dev);
84719 bool (*active_wakeup)(struct device *dev);
84720-};
84721+} __no_const;
84722
84723 struct gpd_cpuidle_data {
84724 unsigned int saved_exit_latency;
84725- struct cpuidle_state *idle_state;
84726+ cpuidle_state_no_const *idle_state;
84727 };
84728
84729 struct generic_pm_domain {
84730diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84731index 30e84d4..22278b4 100644
84732--- a/include/linux/pm_runtime.h
84733+++ b/include/linux/pm_runtime.h
84734@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84735
84736 static inline void pm_runtime_mark_last_busy(struct device *dev)
84737 {
84738- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84739+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84740 }
84741
84742 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84743diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84744index 6512e9c..ec27fa2 100644
84745--- a/include/linux/pnp.h
84746+++ b/include/linux/pnp.h
84747@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84748 struct pnp_fixup {
84749 char id[7];
84750 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84751-};
84752+} __do_const;
84753
84754 /* config parameters */
84755 #define PNP_CONFIG_NORMAL 0x0001
84756diff --git a/include/linux/poison.h b/include/linux/poison.h
84757index 2110a81..13a11bb 100644
84758--- a/include/linux/poison.h
84759+++ b/include/linux/poison.h
84760@@ -19,8 +19,8 @@
84761 * under normal circumstances, used to verify that nobody uses
84762 * non-initialized list entries.
84763 */
84764-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84765-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84766+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84767+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84768
84769 /********** include/linux/timer.h **********/
84770 /*
84771diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84772index d8b187c3..9a9257a 100644
84773--- a/include/linux/power/smartreflex.h
84774+++ b/include/linux/power/smartreflex.h
84775@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84776 int (*notify)(struct omap_sr *sr, u32 status);
84777 u8 notify_flags;
84778 u8 class_type;
84779-};
84780+} __do_const;
84781
84782 /**
84783 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84784diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84785index 4ea1d37..80f4b33 100644
84786--- a/include/linux/ppp-comp.h
84787+++ b/include/linux/ppp-comp.h
84788@@ -84,7 +84,7 @@ struct compressor {
84789 struct module *owner;
84790 /* Extra skb space needed by the compressor algorithm */
84791 unsigned int comp_extra;
84792-};
84793+} __do_const;
84794
84795 /*
84796 * The return value from decompress routine is the length of the
84797diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84798index de83b4e..c4b997d 100644
84799--- a/include/linux/preempt.h
84800+++ b/include/linux/preempt.h
84801@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84802 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84803 #endif
84804
84805+#define raw_preempt_count_add(val) __preempt_count_add(val)
84806+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84807+
84808 #define __preempt_count_inc() __preempt_count_add(1)
84809 #define __preempt_count_dec() __preempt_count_sub(1)
84810
84811 #define preempt_count_inc() preempt_count_add(1)
84812+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84813 #define preempt_count_dec() preempt_count_sub(1)
84814+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84815
84816 #ifdef CONFIG_PREEMPT_COUNT
84817
84818@@ -41,6 +46,12 @@ do { \
84819 barrier(); \
84820 } while (0)
84821
84822+#define raw_preempt_disable() \
84823+do { \
84824+ raw_preempt_count_inc(); \
84825+ barrier(); \
84826+} while (0)
84827+
84828 #define sched_preempt_enable_no_resched() \
84829 do { \
84830 barrier(); \
84831@@ -49,6 +60,12 @@ do { \
84832
84833 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84834
84835+#define raw_preempt_enable_no_resched() \
84836+do { \
84837+ barrier(); \
84838+ raw_preempt_count_dec(); \
84839+} while (0)
84840+
84841 #ifdef CONFIG_PREEMPT
84842 #define preempt_enable() \
84843 do { \
84844@@ -113,8 +130,10 @@ do { \
84845 * region.
84846 */
84847 #define preempt_disable() barrier()
84848+#define raw_preempt_disable() barrier()
84849 #define sched_preempt_enable_no_resched() barrier()
84850 #define preempt_enable_no_resched() barrier()
84851+#define raw_preempt_enable_no_resched() barrier()
84852 #define preempt_enable() barrier()
84853 #define preempt_check_resched() do { } while (0)
84854
84855@@ -128,11 +147,13 @@ do { \
84856 /*
84857 * Modules have no business playing preemption tricks.
84858 */
84859+#ifndef CONFIG_PAX_KERNEXEC
84860 #undef sched_preempt_enable_no_resched
84861 #undef preempt_enable_no_resched
84862 #undef preempt_enable_no_resched_notrace
84863 #undef preempt_check_resched
84864 #endif
84865+#endif
84866
84867 #define preempt_set_need_resched() \
84868 do { \
84869diff --git a/include/linux/printk.h b/include/linux/printk.h
84870index baa3f97..168cff1 100644
84871--- a/include/linux/printk.h
84872+++ b/include/linux/printk.h
84873@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84874 #endif
84875
84876 typedef int(*printk_func_t)(const char *fmt, va_list args);
84877+extern int kptr_restrict;
84878
84879 #ifdef CONFIG_PRINTK
84880 asmlinkage __printf(5, 0)
84881@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84882
84883 extern int printk_delay_msec;
84884 extern int dmesg_restrict;
84885-extern int kptr_restrict;
84886
84887 extern void wake_up_klogd(void);
84888
84889diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84890index b97bf2e..f14c92d4 100644
84891--- a/include/linux/proc_fs.h
84892+++ b/include/linux/proc_fs.h
84893@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84894 extern struct proc_dir_entry *proc_symlink(const char *,
84895 struct proc_dir_entry *, const char *);
84896 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84897+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84898 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84899 struct proc_dir_entry *, void *);
84900+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84901+ struct proc_dir_entry *, void *);
84902 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84903 struct proc_dir_entry *);
84904
84905@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84906 return proc_create_data(name, mode, parent, proc_fops, NULL);
84907 }
84908
84909+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84910+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84911+{
84912+#ifdef CONFIG_GRKERNSEC_PROC_USER
84913+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84914+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84915+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84916+#else
84917+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84918+#endif
84919+}
84920+
84921+
84922 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84923 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84924 extern void *PDE_DATA(const struct inode *);
84925@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84926 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84927 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84928 struct proc_dir_entry *parent) {return NULL;}
84929+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84930+ struct proc_dir_entry *parent) { return NULL; }
84931 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84932 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84933+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84934+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84935 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84936 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84937 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84938@@ -79,7 +99,7 @@ struct net;
84939 static inline struct proc_dir_entry *proc_net_mkdir(
84940 struct net *net, const char *name, struct proc_dir_entry *parent)
84941 {
84942- return proc_mkdir_data(name, 0, parent, net);
84943+ return proc_mkdir_data_restrict(name, 0, parent, net);
84944 }
84945
84946 #endif /* _LINUX_PROC_FS_H */
84947diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84948index 42dfc61..8113a99 100644
84949--- a/include/linux/proc_ns.h
84950+++ b/include/linux/proc_ns.h
84951@@ -16,7 +16,7 @@ struct proc_ns_operations {
84952 struct ns_common *(*get)(struct task_struct *task);
84953 void (*put)(struct ns_common *ns);
84954 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84955-};
84956+} __do_const __randomize_layout;
84957
84958 extern const struct proc_ns_operations netns_operations;
84959 extern const struct proc_ns_operations utsns_operations;
84960diff --git a/include/linux/quota.h b/include/linux/quota.h
84961index d534e8e..782e604 100644
84962--- a/include/linux/quota.h
84963+++ b/include/linux/quota.h
84964@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84965
84966 extern bool qid_eq(struct kqid left, struct kqid right);
84967 extern bool qid_lt(struct kqid left, struct kqid right);
84968-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84969+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84970 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84971 extern bool qid_valid(struct kqid qid);
84972
84973diff --git a/include/linux/random.h b/include/linux/random.h
84974index b05856e..0a9f14e 100644
84975--- a/include/linux/random.h
84976+++ b/include/linux/random.h
84977@@ -9,9 +9,19 @@
84978 #include <uapi/linux/random.h>
84979
84980 extern void add_device_randomness(const void *, unsigned int);
84981+
84982+static inline void add_latent_entropy(void)
84983+{
84984+
84985+#ifdef LATENT_ENTROPY_PLUGIN
84986+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84987+#endif
84988+
84989+}
84990+
84991 extern void add_input_randomness(unsigned int type, unsigned int code,
84992- unsigned int value);
84993-extern void add_interrupt_randomness(int irq, int irq_flags);
84994+ unsigned int value) __latent_entropy;
84995+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84996
84997 extern void get_random_bytes(void *buf, int nbytes);
84998 extern void get_random_bytes_arch(void *buf, int nbytes);
84999@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
85000 extern const struct file_operations random_fops, urandom_fops;
85001 #endif
85002
85003-unsigned int get_random_int(void);
85004+unsigned int __intentional_overflow(-1) get_random_int(void);
85005 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
85006
85007-u32 prandom_u32(void);
85008+u32 prandom_u32(void) __intentional_overflow(-1);
85009 void prandom_bytes(void *buf, size_t nbytes);
85010 void prandom_seed(u32 seed);
85011 void prandom_reseed_late(void);
85012@@ -37,6 +47,11 @@ struct rnd_state {
85013 u32 prandom_u32_state(struct rnd_state *state);
85014 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
85015
85016+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
85017+{
85018+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
85019+}
85020+
85021 /**
85022 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
85023 * @ep_ro: right open interval endpoint
85024@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
85025 *
85026 * Returns: pseudo-random number in interval [0, ep_ro)
85027 */
85028-static inline u32 prandom_u32_max(u32 ep_ro)
85029+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
85030 {
85031 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
85032 }
85033diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
85034index 378c5ee..aa84a47 100644
85035--- a/include/linux/rbtree_augmented.h
85036+++ b/include/linux/rbtree_augmented.h
85037@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
85038 old->rbaugmented = rbcompute(old); \
85039 } \
85040 rbstatic const struct rb_augment_callbacks rbname = { \
85041- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
85042+ .propagate = rbname ## _propagate, \
85043+ .copy = rbname ## _copy, \
85044+ .rotate = rbname ## _rotate \
85045 };
85046
85047
85048diff --git a/include/linux/rculist.h b/include/linux/rculist.h
85049index a18b16f..2683096 100644
85050--- a/include/linux/rculist.h
85051+++ b/include/linux/rculist.h
85052@@ -29,8 +29,8 @@
85053 */
85054 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
85055 {
85056- ACCESS_ONCE(list->next) = list;
85057- ACCESS_ONCE(list->prev) = list;
85058+ ACCESS_ONCE_RW(list->next) = list;
85059+ ACCESS_ONCE_RW(list->prev) = list;
85060 }
85061
85062 /*
85063@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
85064 struct list_head *prev, struct list_head *next);
85065 #endif
85066
85067+void __pax_list_add_rcu(struct list_head *new,
85068+ struct list_head *prev, struct list_head *next);
85069+
85070 /**
85071 * list_add_rcu - add a new entry to rcu-protected list
85072 * @new: new entry to be added
85073@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
85074 __list_add_rcu(new, head, head->next);
85075 }
85076
85077+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
85078+{
85079+ __pax_list_add_rcu(new, head, head->next);
85080+}
85081+
85082 /**
85083 * list_add_tail_rcu - add a new entry to rcu-protected list
85084 * @new: new entry to be added
85085@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
85086 __list_add_rcu(new, head->prev, head);
85087 }
85088
85089+static inline void pax_list_add_tail_rcu(struct list_head *new,
85090+ struct list_head *head)
85091+{
85092+ __pax_list_add_rcu(new, head->prev, head);
85093+}
85094+
85095 /**
85096 * list_del_rcu - deletes entry from list without re-initialization
85097 * @entry: the element to delete from the list.
85098@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
85099 entry->prev = LIST_POISON2;
85100 }
85101
85102+extern void pax_list_del_rcu(struct list_head *entry);
85103+
85104 /**
85105 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
85106 * @n: the element to delete from the hash list.
85107diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
85108index 7809749..1cd9315 100644
85109--- a/include/linux/rcupdate.h
85110+++ b/include/linux/rcupdate.h
85111@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
85112 do { \
85113 rcu_all_qs(); \
85114 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
85115- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
85116+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
85117 } while (0)
85118 #else /* #ifdef CONFIG_TASKS_RCU */
85119 #define TASKS_RCU(x) do { } while (0)
85120diff --git a/include/linux/reboot.h b/include/linux/reboot.h
85121index 67fc8fc..a90f7d8 100644
85122--- a/include/linux/reboot.h
85123+++ b/include/linux/reboot.h
85124@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
85125 */
85126
85127 extern void migrate_to_reboot_cpu(void);
85128-extern void machine_restart(char *cmd);
85129-extern void machine_halt(void);
85130-extern void machine_power_off(void);
85131+extern void machine_restart(char *cmd) __noreturn;
85132+extern void machine_halt(void) __noreturn;
85133+extern void machine_power_off(void) __noreturn;
85134
85135 extern void machine_shutdown(void);
85136 struct pt_regs;
85137@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
85138 */
85139
85140 extern void kernel_restart_prepare(char *cmd);
85141-extern void kernel_restart(char *cmd);
85142-extern void kernel_halt(void);
85143-extern void kernel_power_off(void);
85144+extern void kernel_restart(char *cmd) __noreturn;
85145+extern void kernel_halt(void) __noreturn;
85146+extern void kernel_power_off(void) __noreturn;
85147
85148 extern int C_A_D; /* for sysctl */
85149 void ctrl_alt_del(void);
85150@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
85151 * Emergency restart, callable from an interrupt handler.
85152 */
85153
85154-extern void emergency_restart(void);
85155+extern void emergency_restart(void) __noreturn;
85156 #include <asm/emergency-restart.h>
85157
85158 #endif /* _LINUX_REBOOT_H */
85159diff --git a/include/linux/regset.h b/include/linux/regset.h
85160index 8e0c9fe..ac4d221 100644
85161--- a/include/linux/regset.h
85162+++ b/include/linux/regset.h
85163@@ -161,7 +161,8 @@ struct user_regset {
85164 unsigned int align;
85165 unsigned int bias;
85166 unsigned int core_note_type;
85167-};
85168+} __do_const;
85169+typedef struct user_regset __no_const user_regset_no_const;
85170
85171 /**
85172 * struct user_regset_view - available regsets
85173diff --git a/include/linux/relay.h b/include/linux/relay.h
85174index d7c8359..818daf5 100644
85175--- a/include/linux/relay.h
85176+++ b/include/linux/relay.h
85177@@ -157,7 +157,7 @@ struct rchan_callbacks
85178 * The callback should return 0 if successful, negative if not.
85179 */
85180 int (*remove_buf_file)(struct dentry *dentry);
85181-};
85182+} __no_const;
85183
85184 /*
85185 * CONFIG_RELAY kernel API, kernel/relay.c
85186diff --git a/include/linux/rio.h b/include/linux/rio.h
85187index 6bda06f..bf39a9b 100644
85188--- a/include/linux/rio.h
85189+++ b/include/linux/rio.h
85190@@ -358,7 +358,7 @@ struct rio_ops {
85191 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
85192 u64 rstart, u32 size, u32 flags);
85193 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
85194-};
85195+} __no_const;
85196
85197 #define RIO_RESOURCE_MEM 0x00000100
85198 #define RIO_RESOURCE_DOORBELL 0x00000200
85199diff --git a/include/linux/rmap.h b/include/linux/rmap.h
85200index c4c559a..6ba9a26 100644
85201--- a/include/linux/rmap.h
85202+++ b/include/linux/rmap.h
85203@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
85204 void anon_vma_init(void); /* create anon_vma_cachep */
85205 int anon_vma_prepare(struct vm_area_struct *);
85206 void unlink_anon_vmas(struct vm_area_struct *);
85207-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
85208-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
85209+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
85210+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
85211
85212 static inline void anon_vma_merge(struct vm_area_struct *vma,
85213 struct vm_area_struct *next)
85214diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
85215index ed8f9e70..999bc96 100644
85216--- a/include/linux/scatterlist.h
85217+++ b/include/linux/scatterlist.h
85218@@ -1,6 +1,7 @@
85219 #ifndef _LINUX_SCATTERLIST_H
85220 #define _LINUX_SCATTERLIST_H
85221
85222+#include <linux/sched.h>
85223 #include <linux/string.h>
85224 #include <linux/bug.h>
85225 #include <linux/mm.h>
85226@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
85227 #ifdef CONFIG_DEBUG_SG
85228 BUG_ON(!virt_addr_valid(buf));
85229 #endif
85230+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85231+ if (object_starts_on_stack(buf)) {
85232+ void *adjbuf = buf - current->stack + current->lowmem_stack;
85233+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
85234+ } else
85235+#endif
85236 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
85237 }
85238
85239diff --git a/include/linux/sched.h b/include/linux/sched.h
85240index a419b65..6dd8f3f 100644
85241--- a/include/linux/sched.h
85242+++ b/include/linux/sched.h
85243@@ -133,6 +133,7 @@ struct fs_struct;
85244 struct perf_event_context;
85245 struct blk_plug;
85246 struct filename;
85247+struct linux_binprm;
85248
85249 #define VMACACHE_BITS 2
85250 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
85251@@ -412,7 +413,7 @@ extern char __sched_text_start[], __sched_text_end[];
85252 extern int in_sched_functions(unsigned long addr);
85253
85254 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
85255-extern signed long schedule_timeout(signed long timeout);
85256+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
85257 extern signed long schedule_timeout_interruptible(signed long timeout);
85258 extern signed long schedule_timeout_killable(signed long timeout);
85259 extern signed long schedule_timeout_uninterruptible(signed long timeout);
85260@@ -430,6 +431,19 @@ struct nsproxy;
85261 struct user_namespace;
85262
85263 #ifdef CONFIG_MMU
85264+
85265+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
85266+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
85267+#else
85268+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
85269+{
85270+ return 0;
85271+}
85272+#endif
85273+
85274+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
85275+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
85276+
85277 extern void arch_pick_mmap_layout(struct mm_struct *mm);
85278 extern unsigned long
85279 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
85280@@ -728,6 +742,17 @@ struct signal_struct {
85281 #ifdef CONFIG_TASKSTATS
85282 struct taskstats *stats;
85283 #endif
85284+
85285+#ifdef CONFIG_GRKERNSEC
85286+ u32 curr_ip;
85287+ u32 saved_ip;
85288+ u32 gr_saddr;
85289+ u32 gr_daddr;
85290+ u16 gr_sport;
85291+ u16 gr_dport;
85292+ u8 used_accept:1;
85293+#endif
85294+
85295 #ifdef CONFIG_AUDIT
85296 unsigned audit_tty;
85297 unsigned audit_tty_log_passwd;
85298@@ -754,7 +779,7 @@ struct signal_struct {
85299 struct mutex cred_guard_mutex; /* guard against foreign influences on
85300 * credential calculations
85301 * (notably. ptrace) */
85302-};
85303+} __randomize_layout;
85304
85305 /*
85306 * Bits in flags field of signal_struct.
85307@@ -807,6 +832,14 @@ struct user_struct {
85308 struct key *session_keyring; /* UID's default session keyring */
85309 #endif
85310
85311+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
85312+ unsigned char kernel_banned;
85313+#endif
85314+#ifdef CONFIG_GRKERNSEC_BRUTE
85315+ unsigned char suid_banned;
85316+ unsigned long suid_ban_expires;
85317+#endif
85318+
85319 /* Hash table maintenance information */
85320 struct hlist_node uidhash_node;
85321 kuid_t uid;
85322@@ -814,7 +847,7 @@ struct user_struct {
85323 #ifdef CONFIG_PERF_EVENTS
85324 atomic_long_t locked_vm;
85325 #endif
85326-};
85327+} __randomize_layout;
85328
85329 extern int uids_sysfs_init(void);
85330
85331@@ -1278,6 +1311,9 @@ enum perf_event_task_context {
85332 struct task_struct {
85333 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
85334 void *stack;
85335+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85336+ void *lowmem_stack;
85337+#endif
85338 atomic_t usage;
85339 unsigned int flags; /* per process flags, defined below */
85340 unsigned int ptrace;
85341@@ -1411,8 +1447,8 @@ struct task_struct {
85342 struct list_head thread_node;
85343
85344 struct completion *vfork_done; /* for vfork() */
85345- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
85346- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85347+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
85348+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85349
85350 cputime_t utime, stime, utimescaled, stimescaled;
85351 cputime_t gtime;
85352@@ -1437,11 +1473,6 @@ struct task_struct {
85353 struct task_cputime cputime_expires;
85354 struct list_head cpu_timers[3];
85355
85356-/* process credentials */
85357- const struct cred __rcu *real_cred; /* objective and real subjective task
85358- * credentials (COW) */
85359- const struct cred __rcu *cred; /* effective (overridable) subjective task
85360- * credentials (COW) */
85361 char comm[TASK_COMM_LEN]; /* executable name excluding path
85362 - access with [gs]et_task_comm (which lock
85363 it with task_lock())
85364@@ -1459,6 +1490,10 @@ struct task_struct {
85365 #endif
85366 /* CPU-specific state of this task */
85367 struct thread_struct thread;
85368+/* thread_info moved to task_struct */
85369+#ifdef CONFIG_X86
85370+ struct thread_info tinfo;
85371+#endif
85372 /* filesystem information */
85373 struct fs_struct *fs;
85374 /* open file information */
85375@@ -1533,6 +1568,10 @@ struct task_struct {
85376 gfp_t lockdep_reclaim_gfp;
85377 #endif
85378
85379+/* process credentials */
85380+ const struct cred __rcu *real_cred; /* objective and real subjective task
85381+ * credentials (COW) */
85382+
85383 /* journalling filesystem info */
85384 void *journal_info;
85385
85386@@ -1571,6 +1610,10 @@ struct task_struct {
85387 /* cg_list protected by css_set_lock and tsk->alloc_lock */
85388 struct list_head cg_list;
85389 #endif
85390+
85391+ const struct cred __rcu *cred; /* effective (overridable) subjective task
85392+ * credentials (COW) */
85393+
85394 #ifdef CONFIG_FUTEX
85395 struct robust_list_head __user *robust_list;
85396 #ifdef CONFIG_COMPAT
85397@@ -1682,7 +1725,7 @@ struct task_struct {
85398 * Number of functions that haven't been traced
85399 * because of depth overrun.
85400 */
85401- atomic_t trace_overrun;
85402+ atomic_unchecked_t trace_overrun;
85403 /* Pause for the tracing */
85404 atomic_t tracing_graph_pause;
85405 #endif
85406@@ -1710,7 +1753,78 @@ struct task_struct {
85407 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
85408 unsigned long task_state_change;
85409 #endif
85410-};
85411+
85412+#ifdef CONFIG_GRKERNSEC
85413+ /* grsecurity */
85414+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85415+ u64 exec_id;
85416+#endif
85417+#ifdef CONFIG_GRKERNSEC_SETXID
85418+ const struct cred *delayed_cred;
85419+#endif
85420+ struct dentry *gr_chroot_dentry;
85421+ struct acl_subject_label *acl;
85422+ struct acl_subject_label *tmpacl;
85423+ struct acl_role_label *role;
85424+ struct file *exec_file;
85425+ unsigned long brute_expires;
85426+ u16 acl_role_id;
85427+ u8 inherited;
85428+ /* is this the task that authenticated to the special role */
85429+ u8 acl_sp_role;
85430+ u8 is_writable;
85431+ u8 brute;
85432+ u8 gr_is_chrooted;
85433+#endif
85434+
85435+} __randomize_layout;
85436+
85437+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
85438+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
85439+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
85440+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
85441+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
85442+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
85443+
85444+#ifdef CONFIG_PAX_SOFTMODE
85445+extern int pax_softmode;
85446+#endif
85447+
85448+extern int pax_check_flags(unsigned long *);
85449+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
85450+
85451+/* if tsk != current then task_lock must be held on it */
85452+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85453+static inline unsigned long pax_get_flags(struct task_struct *tsk)
85454+{
85455+ if (likely(tsk->mm))
85456+ return tsk->mm->pax_flags;
85457+ else
85458+ return 0UL;
85459+}
85460+
85461+/* if tsk != current then task_lock must be held on it */
85462+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
85463+{
85464+ if (likely(tsk->mm)) {
85465+ tsk->mm->pax_flags = flags;
85466+ return 0;
85467+ }
85468+ return -EINVAL;
85469+}
85470+#endif
85471+
85472+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
85473+extern void pax_set_initial_flags(struct linux_binprm *bprm);
85474+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
85475+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
85476+#endif
85477+
85478+struct path;
85479+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85480+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85481+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85482+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85483
85484 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85485 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85486@@ -1793,7 +1907,7 @@ struct pid_namespace;
85487 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85488 struct pid_namespace *ns);
85489
85490-static inline pid_t task_pid_nr(struct task_struct *tsk)
85491+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85492 {
85493 return tsk->pid;
85494 }
85495@@ -2161,6 +2275,25 @@ extern u64 sched_clock_cpu(int cpu);
85496
85497 extern void sched_clock_init(void);
85498
85499+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85500+static inline void populate_stack(void)
85501+{
85502+ struct task_struct *curtask = current;
85503+ int c;
85504+ int *ptr = curtask->stack;
85505+ int *end = curtask->stack + THREAD_SIZE;
85506+
85507+ while (ptr < end) {
85508+ c = *(volatile int *)ptr;
85509+ ptr += PAGE_SIZE/sizeof(int);
85510+ }
85511+}
85512+#else
85513+static inline void populate_stack(void)
85514+{
85515+}
85516+#endif
85517+
85518 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85519 static inline void sched_clock_tick(void)
85520 {
85521@@ -2294,7 +2427,9 @@ void yield(void);
85522 extern struct exec_domain default_exec_domain;
85523
85524 union thread_union {
85525+#ifndef CONFIG_X86
85526 struct thread_info thread_info;
85527+#endif
85528 unsigned long stack[THREAD_SIZE/sizeof(long)];
85529 };
85530
85531@@ -2327,6 +2462,7 @@ extern struct pid_namespace init_pid_ns;
85532 */
85533
85534 extern struct task_struct *find_task_by_vpid(pid_t nr);
85535+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85536 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85537 struct pid_namespace *ns);
85538
85539@@ -2491,7 +2627,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85540 extern void exit_itimers(struct signal_struct *);
85541 extern void flush_itimer_signals(void);
85542
85543-extern void do_group_exit(int);
85544+extern __noreturn void do_group_exit(int);
85545
85546 extern int do_execve(struct filename *,
85547 const char __user * const __user *,
85548@@ -2712,9 +2848,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85549 #define task_stack_end_corrupted(task) \
85550 (*(end_of_stack(task)) != STACK_END_MAGIC)
85551
85552-static inline int object_is_on_stack(void *obj)
85553+static inline int object_starts_on_stack(const void *obj)
85554 {
85555- void *stack = task_stack_page(current);
85556+ const void *stack = task_stack_page(current);
85557
85558 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85559 }
85560diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85561index 596a0e0..bea77ec 100644
85562--- a/include/linux/sched/sysctl.h
85563+++ b/include/linux/sched/sysctl.h
85564@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85565 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85566
85567 extern int sysctl_max_map_count;
85568+extern unsigned long sysctl_heap_stack_gap;
85569
85570 extern unsigned int sysctl_sched_latency;
85571 extern unsigned int sysctl_sched_min_granularity;
85572diff --git a/include/linux/security.h b/include/linux/security.h
85573index a1b7dbd..036f47f 100644
85574--- a/include/linux/security.h
85575+++ b/include/linux/security.h
85576@@ -27,6 +27,7 @@
85577 #include <linux/slab.h>
85578 #include <linux/err.h>
85579 #include <linux/string.h>
85580+#include <linux/grsecurity.h>
85581
85582 struct linux_binprm;
85583 struct cred;
85584@@ -116,8 +117,6 @@ struct seq_file;
85585
85586 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85587
85588-void reset_security_ops(void);
85589-
85590 #ifdef CONFIG_MMU
85591 extern unsigned long mmap_min_addr;
85592 extern unsigned long dac_mmap_min_addr;
85593@@ -1756,7 +1755,7 @@ struct security_operations {
85594 struct audit_context *actx);
85595 void (*audit_rule_free) (void *lsmrule);
85596 #endif /* CONFIG_AUDIT */
85597-};
85598+} __randomize_layout;
85599
85600 /* prototypes */
85601 extern int security_init(void);
85602diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85603index dc368b8..e895209 100644
85604--- a/include/linux/semaphore.h
85605+++ b/include/linux/semaphore.h
85606@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85607 }
85608
85609 extern void down(struct semaphore *sem);
85610-extern int __must_check down_interruptible(struct semaphore *sem);
85611+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85612 extern int __must_check down_killable(struct semaphore *sem);
85613 extern int __must_check down_trylock(struct semaphore *sem);
85614 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85615diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85616index afbb1fd..e1d205d 100644
85617--- a/include/linux/seq_file.h
85618+++ b/include/linux/seq_file.h
85619@@ -27,6 +27,9 @@ struct seq_file {
85620 struct mutex lock;
85621 const struct seq_operations *op;
85622 int poll_event;
85623+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85624+ u64 exec_id;
85625+#endif
85626 #ifdef CONFIG_USER_NS
85627 struct user_namespace *user_ns;
85628 #endif
85629@@ -39,6 +42,7 @@ struct seq_operations {
85630 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85631 int (*show) (struct seq_file *m, void *v);
85632 };
85633+typedef struct seq_operations __no_const seq_operations_no_const;
85634
85635 #define SEQ_SKIP 1
85636
85637@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85638
85639 char *mangle_path(char *s, const char *p, const char *esc);
85640 int seq_open(struct file *, const struct seq_operations *);
85641+int seq_open_restrict(struct file *, const struct seq_operations *);
85642 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85643 loff_t seq_lseek(struct file *, loff_t, int);
85644 int seq_release(struct inode *, struct file *);
85645@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
85646 const struct path *root, const char *esc);
85647
85648 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85649+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85650 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85651 int single_release(struct inode *, struct file *);
85652 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85653diff --git a/include/linux/shm.h b/include/linux/shm.h
85654index 6fb8016..ab4465e 100644
85655--- a/include/linux/shm.h
85656+++ b/include/linux/shm.h
85657@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85658 /* The task created the shm object. NULL if the task is dead. */
85659 struct task_struct *shm_creator;
85660 struct list_head shm_clist; /* list by creator */
85661+#ifdef CONFIG_GRKERNSEC
85662+ u64 shm_createtime;
85663+ pid_t shm_lapid;
85664+#endif
85665 };
85666
85667 /* shm_mode upper byte flags */
85668diff --git a/include/linux/signal.h b/include/linux/signal.h
85669index ab1e039..ad4229e 100644
85670--- a/include/linux/signal.h
85671+++ b/include/linux/signal.h
85672@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85673 * know it'll be handled, so that they don't get converted to
85674 * SIGKILL or just silently dropped.
85675 */
85676- kernel_sigaction(sig, (__force __sighandler_t)2);
85677+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85678 }
85679
85680 static inline void disallow_signal(int sig)
85681diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85682index f54d665..e41848d 100644
85683--- a/include/linux/skbuff.h
85684+++ b/include/linux/skbuff.h
85685@@ -770,7 +770,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85686 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85687 int node);
85688 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85689-static inline struct sk_buff *alloc_skb(unsigned int size,
85690+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85691 gfp_t priority)
85692 {
85693 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85694@@ -1966,7 +1966,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85695 return skb->inner_transport_header - skb->inner_network_header;
85696 }
85697
85698-static inline int skb_network_offset(const struct sk_buff *skb)
85699+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85700 {
85701 return skb_network_header(skb) - skb->data;
85702 }
85703@@ -2026,7 +2026,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85704 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85705 */
85706 #ifndef NET_SKB_PAD
85707-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85708+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85709 #endif
85710
85711 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85712@@ -2668,9 +2668,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85713 int *err);
85714 unsigned int datagram_poll(struct file *file, struct socket *sock,
85715 struct poll_table_struct *wait);
85716-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85717+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85718 struct iov_iter *to, int size);
85719-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85720+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85721 struct msghdr *msg, int size)
85722 {
85723 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85724@@ -3180,6 +3180,9 @@ static inline void nf_reset(struct sk_buff *skb)
85725 nf_bridge_put(skb->nf_bridge);
85726 skb->nf_bridge = NULL;
85727 #endif
85728+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85729+ skb->nf_trace = 0;
85730+#endif
85731 }
85732
85733 static inline void nf_reset_trace(struct sk_buff *skb)
85734diff --git a/include/linux/slab.h b/include/linux/slab.h
85735index 76f1fee..d95e6d2 100644
85736--- a/include/linux/slab.h
85737+++ b/include/linux/slab.h
85738@@ -14,15 +14,29 @@
85739 #include <linux/gfp.h>
85740 #include <linux/types.h>
85741 #include <linux/workqueue.h>
85742-
85743+#include <linux/err.h>
85744
85745 /*
85746 * Flags to pass to kmem_cache_create().
85747 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85748 */
85749 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85750+
85751+#ifdef CONFIG_PAX_USERCOPY_SLABS
85752+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85753+#else
85754+#define SLAB_USERCOPY 0x00000000UL
85755+#endif
85756+
85757 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85758 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85759+
85760+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85761+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85762+#else
85763+#define SLAB_NO_SANITIZE 0x00000000UL
85764+#endif
85765+
85766 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85767 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85768 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85769@@ -98,10 +112,13 @@
85770 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85771 * Both make kfree a no-op.
85772 */
85773-#define ZERO_SIZE_PTR ((void *)16)
85774+#define ZERO_SIZE_PTR \
85775+({ \
85776+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85777+ (void *)(-MAX_ERRNO-1L); \
85778+})
85779
85780-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85781- (unsigned long)ZERO_SIZE_PTR)
85782+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85783
85784 #include <linux/kmemleak.h>
85785 #include <linux/kasan.h>
85786@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85787 void kfree(const void *);
85788 void kzfree(const void *);
85789 size_t ksize(const void *);
85790+const char *check_heap_object(const void *ptr, unsigned long n);
85791+bool is_usercopy_object(const void *ptr);
85792
85793 /*
85794 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85795@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85796 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85797 #endif
85798
85799+#ifdef CONFIG_PAX_USERCOPY_SLABS
85800+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85801+#endif
85802+
85803 /*
85804 * Figure out which kmalloc slab an allocation of a certain size
85805 * belongs to.
85806@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85807 * 2 = 120 .. 192 bytes
85808 * n = 2^(n-1) .. 2^n -1
85809 */
85810-static __always_inline int kmalloc_index(size_t size)
85811+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85812 {
85813 if (!size)
85814 return 0;
85815@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
85816 }
85817 #endif /* !CONFIG_SLOB */
85818
85819-void *__kmalloc(size_t size, gfp_t flags);
85820+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85821 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85822 void kmem_cache_free(struct kmem_cache *, void *);
85823
85824 #ifdef CONFIG_NUMA
85825-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85826+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85827 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85828 #else
85829-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85830+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85831 {
85832 return __kmalloc(size, flags);
85833 }
85834diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85835index 33d0490..70a6313 100644
85836--- a/include/linux/slab_def.h
85837+++ b/include/linux/slab_def.h
85838@@ -40,7 +40,7 @@ struct kmem_cache {
85839 /* 4) cache creation/removal */
85840 const char *name;
85841 struct list_head list;
85842- int refcount;
85843+ atomic_t refcount;
85844 int object_size;
85845 int align;
85846
85847@@ -56,10 +56,14 @@ struct kmem_cache {
85848 unsigned long node_allocs;
85849 unsigned long node_frees;
85850 unsigned long node_overflow;
85851- atomic_t allochit;
85852- atomic_t allocmiss;
85853- atomic_t freehit;
85854- atomic_t freemiss;
85855+ atomic_unchecked_t allochit;
85856+ atomic_unchecked_t allocmiss;
85857+ atomic_unchecked_t freehit;
85858+ atomic_unchecked_t freemiss;
85859+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85860+ atomic_unchecked_t sanitized;
85861+ atomic_unchecked_t not_sanitized;
85862+#endif
85863
85864 /*
85865 * If debugging is enabled, then the allocator can add additional
85866diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85867index 3388511..6252f90 100644
85868--- a/include/linux/slub_def.h
85869+++ b/include/linux/slub_def.h
85870@@ -74,7 +74,7 @@ struct kmem_cache {
85871 struct kmem_cache_order_objects max;
85872 struct kmem_cache_order_objects min;
85873 gfp_t allocflags; /* gfp flags to use on each alloc */
85874- int refcount; /* Refcount for slab cache destroy */
85875+ atomic_t refcount; /* Refcount for slab cache destroy */
85876 void (*ctor)(void *);
85877 int inuse; /* Offset to metadata */
85878 int align; /* Alignment */
85879diff --git a/include/linux/smp.h b/include/linux/smp.h
85880index be91db2..3f23232 100644
85881--- a/include/linux/smp.h
85882+++ b/include/linux/smp.h
85883@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
85884 #endif
85885
85886 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85887+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85888 #define put_cpu() preempt_enable()
85889+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85890
85891 /*
85892 * Callback to arch code if there's nosmp or maxcpus=0 on the
85893diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85894index 46cca4c..3323536 100644
85895--- a/include/linux/sock_diag.h
85896+++ b/include/linux/sock_diag.h
85897@@ -11,7 +11,7 @@ struct sock;
85898 struct sock_diag_handler {
85899 __u8 family;
85900 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85901-};
85902+} __do_const;
85903
85904 int sock_diag_register(const struct sock_diag_handler *h);
85905 void sock_diag_unregister(const struct sock_diag_handler *h);
85906diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85907index 680f9a3..f13aeb0 100644
85908--- a/include/linux/sonet.h
85909+++ b/include/linux/sonet.h
85910@@ -7,7 +7,7 @@
85911 #include <uapi/linux/sonet.h>
85912
85913 struct k_sonet_stats {
85914-#define __HANDLE_ITEM(i) atomic_t i
85915+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85916 __SONET_ITEMS
85917 #undef __HANDLE_ITEM
85918 };
85919diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85920index 07d8e53..dc934c9 100644
85921--- a/include/linux/sunrpc/addr.h
85922+++ b/include/linux/sunrpc/addr.h
85923@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85924 {
85925 switch (sap->sa_family) {
85926 case AF_INET:
85927- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85928+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85929 case AF_INET6:
85930- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85931+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85932 }
85933 return 0;
85934 }
85935@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85936 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85937 const struct sockaddr *src)
85938 {
85939- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85940+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85941 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85942
85943 dsin->sin_family = ssin->sin_family;
85944@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85945 if (sa->sa_family != AF_INET6)
85946 return 0;
85947
85948- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85949+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85950 }
85951
85952 #endif /* _LINUX_SUNRPC_ADDR_H */
85953diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85954index 598ba80..d90cba6 100644
85955--- a/include/linux/sunrpc/clnt.h
85956+++ b/include/linux/sunrpc/clnt.h
85957@@ -100,7 +100,7 @@ struct rpc_procinfo {
85958 unsigned int p_timer; /* Which RTT timer to use */
85959 u32 p_statidx; /* Which procedure to account */
85960 const char * p_name; /* name of procedure */
85961-};
85962+} __do_const;
85963
85964 #ifdef __KERNEL__
85965
85966diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85967index fae6fb9..023fbcd 100644
85968--- a/include/linux/sunrpc/svc.h
85969+++ b/include/linux/sunrpc/svc.h
85970@@ -420,7 +420,7 @@ struct svc_procedure {
85971 unsigned int pc_count; /* call count */
85972 unsigned int pc_cachetype; /* cache info (NFS) */
85973 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85974-};
85975+} __do_const;
85976
85977 /*
85978 * Function prototypes.
85979diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85980index df8edf8..d140fec 100644
85981--- a/include/linux/sunrpc/svc_rdma.h
85982+++ b/include/linux/sunrpc/svc_rdma.h
85983@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85984 extern unsigned int svcrdma_max_requests;
85985 extern unsigned int svcrdma_max_req_size;
85986
85987-extern atomic_t rdma_stat_recv;
85988-extern atomic_t rdma_stat_read;
85989-extern atomic_t rdma_stat_write;
85990-extern atomic_t rdma_stat_sq_starve;
85991-extern atomic_t rdma_stat_rq_starve;
85992-extern atomic_t rdma_stat_rq_poll;
85993-extern atomic_t rdma_stat_rq_prod;
85994-extern atomic_t rdma_stat_sq_poll;
85995-extern atomic_t rdma_stat_sq_prod;
85996+extern atomic_unchecked_t rdma_stat_recv;
85997+extern atomic_unchecked_t rdma_stat_read;
85998+extern atomic_unchecked_t rdma_stat_write;
85999+extern atomic_unchecked_t rdma_stat_sq_starve;
86000+extern atomic_unchecked_t rdma_stat_rq_starve;
86001+extern atomic_unchecked_t rdma_stat_rq_poll;
86002+extern atomic_unchecked_t rdma_stat_rq_prod;
86003+extern atomic_unchecked_t rdma_stat_sq_poll;
86004+extern atomic_unchecked_t rdma_stat_sq_prod;
86005
86006 /*
86007 * Contexts are built when an RDMA request is created and are a
86008diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
86009index 8d71d65..f79586e 100644
86010--- a/include/linux/sunrpc/svcauth.h
86011+++ b/include/linux/sunrpc/svcauth.h
86012@@ -120,7 +120,7 @@ struct auth_ops {
86013 int (*release)(struct svc_rqst *rq);
86014 void (*domain_release)(struct auth_domain *);
86015 int (*set_client)(struct svc_rqst *rq);
86016-};
86017+} __do_const;
86018
86019 #define SVC_GARBAGE 1
86020 #define SVC_SYSERR 2
86021diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
86022index e7a018e..49f8b17 100644
86023--- a/include/linux/swiotlb.h
86024+++ b/include/linux/swiotlb.h
86025@@ -60,7 +60,8 @@ extern void
86026
86027 extern void
86028 swiotlb_free_coherent(struct device *hwdev, size_t size,
86029- void *vaddr, dma_addr_t dma_handle);
86030+ void *vaddr, dma_addr_t dma_handle,
86031+ struct dma_attrs *attrs);
86032
86033 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
86034 unsigned long offset, size_t size,
86035diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
86036index 76d1e38..d92ff38 100644
86037--- a/include/linux/syscalls.h
86038+++ b/include/linux/syscalls.h
86039@@ -102,7 +102,12 @@ union bpf_attr;
86040 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
86041 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
86042 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
86043-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
86044+#define __SC_LONG(t, a) __typeof__( \
86045+ __builtin_choose_expr( \
86046+ sizeof(t) > sizeof(int), \
86047+ (t) 0, \
86048+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
86049+ )) a
86050 #define __SC_CAST(t, a) (t) a
86051 #define __SC_ARGS(t, a) a
86052 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
86053@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
86054 asmlinkage long sys_fsync(unsigned int fd);
86055 asmlinkage long sys_fdatasync(unsigned int fd);
86056 asmlinkage long sys_bdflush(int func, long data);
86057-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
86058- char __user *type, unsigned long flags,
86059+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
86060+ const char __user *type, unsigned long flags,
86061 void __user *data);
86062-asmlinkage long sys_umount(char __user *name, int flags);
86063-asmlinkage long sys_oldumount(char __user *name);
86064+asmlinkage long sys_umount(const char __user *name, int flags);
86065+asmlinkage long sys_oldumount(const char __user *name);
86066 asmlinkage long sys_truncate(const char __user *path, long length);
86067 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
86068 asmlinkage long sys_stat(const char __user *filename,
86069@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
86070 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
86071 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
86072 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
86073- struct sockaddr __user *, int);
86074+ struct sockaddr __user *, int) __intentional_overflow(0);
86075 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
86076 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
86077 unsigned int vlen, unsigned flags);
86078diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
86079index 27b3b0b..e093dd9 100644
86080--- a/include/linux/syscore_ops.h
86081+++ b/include/linux/syscore_ops.h
86082@@ -16,7 +16,7 @@ struct syscore_ops {
86083 int (*suspend)(void);
86084 void (*resume)(void);
86085 void (*shutdown)(void);
86086-};
86087+} __do_const;
86088
86089 extern void register_syscore_ops(struct syscore_ops *ops);
86090 extern void unregister_syscore_ops(struct syscore_ops *ops);
86091diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
86092index b7361f8..341a15a 100644
86093--- a/include/linux/sysctl.h
86094+++ b/include/linux/sysctl.h
86095@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
86096
86097 extern int proc_dostring(struct ctl_table *, int,
86098 void __user *, size_t *, loff_t *);
86099+extern int proc_dostring_modpriv(struct ctl_table *, int,
86100+ void __user *, size_t *, loff_t *);
86101 extern int proc_dointvec(struct ctl_table *, int,
86102 void __user *, size_t *, loff_t *);
86103 extern int proc_dointvec_minmax(struct ctl_table *, int,
86104@@ -113,7 +115,8 @@ struct ctl_table
86105 struct ctl_table_poll *poll;
86106 void *extra1;
86107 void *extra2;
86108-};
86109+} __do_const __randomize_layout;
86110+typedef struct ctl_table __no_const ctl_table_no_const;
86111
86112 struct ctl_node {
86113 struct rb_node node;
86114diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
86115index ddad161..a3efd26 100644
86116--- a/include/linux/sysfs.h
86117+++ b/include/linux/sysfs.h
86118@@ -34,7 +34,8 @@ struct attribute {
86119 struct lock_class_key *key;
86120 struct lock_class_key skey;
86121 #endif
86122-};
86123+} __do_const;
86124+typedef struct attribute __no_const attribute_no_const;
86125
86126 /**
86127 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
86128@@ -63,7 +64,8 @@ struct attribute_group {
86129 struct attribute *, int);
86130 struct attribute **attrs;
86131 struct bin_attribute **bin_attrs;
86132-};
86133+} __do_const;
86134+typedef struct attribute_group __no_const attribute_group_no_const;
86135
86136 /**
86137 * Use these macros to make defining attributes easier. See include/linux/device.h
86138@@ -137,7 +139,8 @@ struct bin_attribute {
86139 char *, loff_t, size_t);
86140 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
86141 struct vm_area_struct *vma);
86142-};
86143+} __do_const;
86144+typedef struct bin_attribute __no_const bin_attribute_no_const;
86145
86146 /**
86147 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
86148diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
86149index 387fa7d..3fcde6b 100644
86150--- a/include/linux/sysrq.h
86151+++ b/include/linux/sysrq.h
86152@@ -16,6 +16,7 @@
86153
86154 #include <linux/errno.h>
86155 #include <linux/types.h>
86156+#include <linux/compiler.h>
86157
86158 /* Possible values of bitmask for enabling sysrq functions */
86159 /* 0x0001 is reserved for enable everything */
86160@@ -33,7 +34,7 @@ struct sysrq_key_op {
86161 char *help_msg;
86162 char *action_msg;
86163 int enable_mask;
86164-};
86165+} __do_const;
86166
86167 #ifdef CONFIG_MAGIC_SYSRQ
86168
86169diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
86170index ff307b5..f1a4468 100644
86171--- a/include/linux/thread_info.h
86172+++ b/include/linux/thread_info.h
86173@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
86174 #error "no set_restore_sigmask() provided and default one won't work"
86175 #endif
86176
86177+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
86178+
86179+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
86180+{
86181+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
86182+}
86183+
86184 #endif /* __KERNEL__ */
86185
86186 #endif /* _LINUX_THREAD_INFO_H */
86187diff --git a/include/linux/tty.h b/include/linux/tty.h
86188index 358a337..8829c1f 100644
86189--- a/include/linux/tty.h
86190+++ b/include/linux/tty.h
86191@@ -225,7 +225,7 @@ struct tty_port {
86192 const struct tty_port_operations *ops; /* Port operations */
86193 spinlock_t lock; /* Lock protecting tty field */
86194 int blocked_open; /* Waiting to open */
86195- int count; /* Usage count */
86196+ atomic_t count; /* Usage count */
86197 wait_queue_head_t open_wait; /* Open waiters */
86198 wait_queue_head_t close_wait; /* Close waiters */
86199 wait_queue_head_t delta_msr_wait; /* Modem status change */
86200@@ -313,7 +313,7 @@ struct tty_struct {
86201 /* If the tty has a pending do_SAK, queue it here - akpm */
86202 struct work_struct SAK_work;
86203 struct tty_port *port;
86204-};
86205+} __randomize_layout;
86206
86207 /* Each of a tty's open files has private_data pointing to tty_file_private */
86208 struct tty_file_private {
86209@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
86210 struct tty_struct *tty, struct file *filp);
86211 static inline int tty_port_users(struct tty_port *port)
86212 {
86213- return port->count + port->blocked_open;
86214+ return atomic_read(&port->count) + port->blocked_open;
86215 }
86216
86217 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
86218diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
86219index 92e337c..f46757b 100644
86220--- a/include/linux/tty_driver.h
86221+++ b/include/linux/tty_driver.h
86222@@ -291,7 +291,7 @@ struct tty_operations {
86223 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
86224 #endif
86225 const struct file_operations *proc_fops;
86226-};
86227+} __do_const __randomize_layout;
86228
86229 struct tty_driver {
86230 int magic; /* magic number for this structure */
86231@@ -325,7 +325,7 @@ struct tty_driver {
86232
86233 const struct tty_operations *ops;
86234 struct list_head tty_drivers;
86235-};
86236+} __randomize_layout;
86237
86238 extern struct list_head tty_drivers;
86239
86240diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
86241index 00c9d68..bc0188b 100644
86242--- a/include/linux/tty_ldisc.h
86243+++ b/include/linux/tty_ldisc.h
86244@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
86245
86246 struct module *owner;
86247
86248- int refcount;
86249+ atomic_t refcount;
86250 };
86251
86252 struct tty_ldisc {
86253diff --git a/include/linux/types.h b/include/linux/types.h
86254index 6747247..fc7ec8b 100644
86255--- a/include/linux/types.h
86256+++ b/include/linux/types.h
86257@@ -174,10 +174,26 @@ typedef struct {
86258 int counter;
86259 } atomic_t;
86260
86261+#ifdef CONFIG_PAX_REFCOUNT
86262+typedef struct {
86263+ int counter;
86264+} atomic_unchecked_t;
86265+#else
86266+typedef atomic_t atomic_unchecked_t;
86267+#endif
86268+
86269 #ifdef CONFIG_64BIT
86270 typedef struct {
86271 long counter;
86272 } atomic64_t;
86273+
86274+#ifdef CONFIG_PAX_REFCOUNT
86275+typedef struct {
86276+ long counter;
86277+} atomic64_unchecked_t;
86278+#else
86279+typedef atomic64_t atomic64_unchecked_t;
86280+#endif
86281 #endif
86282
86283 struct list_head {
86284diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
86285index ecd3319..8a36ded 100644
86286--- a/include/linux/uaccess.h
86287+++ b/include/linux/uaccess.h
86288@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
86289 long ret; \
86290 mm_segment_t old_fs = get_fs(); \
86291 \
86292- set_fs(KERNEL_DS); \
86293 pagefault_disable(); \
86294- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
86295- pagefault_enable(); \
86296+ set_fs(KERNEL_DS); \
86297+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
86298 set_fs(old_fs); \
86299+ pagefault_enable(); \
86300 ret; \
86301 })
86302
86303diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
86304index 2d1f9b6..d7a9fce 100644
86305--- a/include/linux/uidgid.h
86306+++ b/include/linux/uidgid.h
86307@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
86308
86309 #endif /* CONFIG_USER_NS */
86310
86311+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
86312+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
86313+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
86314+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
86315+
86316 #endif /* _LINUX_UIDGID_H */
86317diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
86318index 32c0e83..671eb35 100644
86319--- a/include/linux/uio_driver.h
86320+++ b/include/linux/uio_driver.h
86321@@ -67,7 +67,7 @@ struct uio_device {
86322 struct module *owner;
86323 struct device *dev;
86324 int minor;
86325- atomic_t event;
86326+ atomic_unchecked_t event;
86327 struct fasync_struct *async_queue;
86328 wait_queue_head_t wait;
86329 struct uio_info *info;
86330diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
86331index 99c1b4d..562e6f3 100644
86332--- a/include/linux/unaligned/access_ok.h
86333+++ b/include/linux/unaligned/access_ok.h
86334@@ -4,34 +4,34 @@
86335 #include <linux/kernel.h>
86336 #include <asm/byteorder.h>
86337
86338-static inline u16 get_unaligned_le16(const void *p)
86339+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
86340 {
86341- return le16_to_cpup((__le16 *)p);
86342+ return le16_to_cpup((const __le16 *)p);
86343 }
86344
86345-static inline u32 get_unaligned_le32(const void *p)
86346+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
86347 {
86348- return le32_to_cpup((__le32 *)p);
86349+ return le32_to_cpup((const __le32 *)p);
86350 }
86351
86352-static inline u64 get_unaligned_le64(const void *p)
86353+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
86354 {
86355- return le64_to_cpup((__le64 *)p);
86356+ return le64_to_cpup((const __le64 *)p);
86357 }
86358
86359-static inline u16 get_unaligned_be16(const void *p)
86360+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
86361 {
86362- return be16_to_cpup((__be16 *)p);
86363+ return be16_to_cpup((const __be16 *)p);
86364 }
86365
86366-static inline u32 get_unaligned_be32(const void *p)
86367+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
86368 {
86369- return be32_to_cpup((__be32 *)p);
86370+ return be32_to_cpup((const __be32 *)p);
86371 }
86372
86373-static inline u64 get_unaligned_be64(const void *p)
86374+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
86375 {
86376- return be64_to_cpup((__be64 *)p);
86377+ return be64_to_cpup((const __be64 *)p);
86378 }
86379
86380 static inline void put_unaligned_le16(u16 val, void *p)
86381diff --git a/include/linux/usb.h b/include/linux/usb.h
86382index 7ee1b5c..82e2c1a 100644
86383--- a/include/linux/usb.h
86384+++ b/include/linux/usb.h
86385@@ -566,7 +566,7 @@ struct usb_device {
86386 int maxchild;
86387
86388 u32 quirks;
86389- atomic_t urbnum;
86390+ atomic_unchecked_t urbnum;
86391
86392 unsigned long active_duration;
86393
86394@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
86395
86396 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
86397 __u8 request, __u8 requesttype, __u16 value, __u16 index,
86398- void *data, __u16 size, int timeout);
86399+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
86400 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
86401 void *data, int len, int *actual_length, int timeout);
86402 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
86403diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
86404index 9fd9e48..e2c5f35 100644
86405--- a/include/linux/usb/renesas_usbhs.h
86406+++ b/include/linux/usb/renesas_usbhs.h
86407@@ -39,7 +39,7 @@ enum {
86408 */
86409 struct renesas_usbhs_driver_callback {
86410 int (*notify_hotplug)(struct platform_device *pdev);
86411-};
86412+} __no_const;
86413
86414 /*
86415 * callback functions for platform
86416diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
86417index 8297e5b..0dfae27 100644
86418--- a/include/linux/user_namespace.h
86419+++ b/include/linux/user_namespace.h
86420@@ -39,7 +39,7 @@ struct user_namespace {
86421 struct key *persistent_keyring_register;
86422 struct rw_semaphore persistent_keyring_register_sem;
86423 #endif
86424-};
86425+} __randomize_layout;
86426
86427 extern struct user_namespace init_user_ns;
86428
86429diff --git a/include/linux/utsname.h b/include/linux/utsname.h
86430index 5093f58..c103e58 100644
86431--- a/include/linux/utsname.h
86432+++ b/include/linux/utsname.h
86433@@ -25,7 +25,7 @@ struct uts_namespace {
86434 struct new_utsname name;
86435 struct user_namespace *user_ns;
86436 struct ns_common ns;
86437-};
86438+} __randomize_layout;
86439 extern struct uts_namespace init_uts_ns;
86440
86441 #ifdef CONFIG_UTS_NS
86442diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
86443index 6f8fbcf..4efc177 100644
86444--- a/include/linux/vermagic.h
86445+++ b/include/linux/vermagic.h
86446@@ -25,9 +25,42 @@
86447 #define MODULE_ARCH_VERMAGIC ""
86448 #endif
86449
86450+#ifdef CONFIG_PAX_REFCOUNT
86451+#define MODULE_PAX_REFCOUNT "REFCOUNT "
86452+#else
86453+#define MODULE_PAX_REFCOUNT ""
86454+#endif
86455+
86456+#ifdef CONSTIFY_PLUGIN
86457+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
86458+#else
86459+#define MODULE_CONSTIFY_PLUGIN ""
86460+#endif
86461+
86462+#ifdef STACKLEAK_PLUGIN
86463+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86464+#else
86465+#define MODULE_STACKLEAK_PLUGIN ""
86466+#endif
86467+
86468+#ifdef RANDSTRUCT_PLUGIN
86469+#include <generated/randomize_layout_hash.h>
86470+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86471+#else
86472+#define MODULE_RANDSTRUCT_PLUGIN
86473+#endif
86474+
86475+#ifdef CONFIG_GRKERNSEC
86476+#define MODULE_GRSEC "GRSEC "
86477+#else
86478+#define MODULE_GRSEC ""
86479+#endif
86480+
86481 #define VERMAGIC_STRING \
86482 UTS_RELEASE " " \
86483 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86484 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86485- MODULE_ARCH_VERMAGIC
86486+ MODULE_ARCH_VERMAGIC \
86487+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86488+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86489
86490diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86491index b483abd..af305ad 100644
86492--- a/include/linux/vga_switcheroo.h
86493+++ b/include/linux/vga_switcheroo.h
86494@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86495
86496 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86497
86498-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86499+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86500 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86501-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86502+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86503 #else
86504
86505 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86506@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86507
86508 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86509
86510-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86511+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86512 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86513-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86514+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86515
86516 #endif
86517 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86518diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86519index 0ec5983..cc61051 100644
86520--- a/include/linux/vmalloc.h
86521+++ b/include/linux/vmalloc.h
86522@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86523 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86524 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
86525 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
86526+
86527+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86528+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
86529+#endif
86530+
86531 /* bits [20..32] reserved for arch specific ioremap internals */
86532
86533 /*
86534@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86535 unsigned long flags, pgprot_t prot);
86536 extern void vunmap(const void *addr);
86537
86538+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86539+extern void unmap_process_stacks(struct task_struct *task);
86540+#endif
86541+
86542 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86543 unsigned long uaddr, void *kaddr,
86544 unsigned long size);
86545@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
86546
86547 /* for /dev/kmem */
86548 extern long vread(char *buf, char *addr, unsigned long count);
86549-extern long vwrite(char *buf, char *addr, unsigned long count);
86550+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86551
86552 /*
86553 * Internals. Dont't use..
86554diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86555index 82e7db7..f8ce3d0 100644
86556--- a/include/linux/vmstat.h
86557+++ b/include/linux/vmstat.h
86558@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86559 /*
86560 * Zone based page accounting with per cpu differentials.
86561 */
86562-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86563+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86564
86565 static inline void zone_page_state_add(long x, struct zone *zone,
86566 enum zone_stat_item item)
86567 {
86568- atomic_long_add(x, &zone->vm_stat[item]);
86569- atomic_long_add(x, &vm_stat[item]);
86570+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86571+ atomic_long_add_unchecked(x, &vm_stat[item]);
86572 }
86573
86574-static inline unsigned long global_page_state(enum zone_stat_item item)
86575+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86576 {
86577- long x = atomic_long_read(&vm_stat[item]);
86578+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86579 #ifdef CONFIG_SMP
86580 if (x < 0)
86581 x = 0;
86582@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86583 return x;
86584 }
86585
86586-static inline unsigned long zone_page_state(struct zone *zone,
86587+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86588 enum zone_stat_item item)
86589 {
86590- long x = atomic_long_read(&zone->vm_stat[item]);
86591+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86592 #ifdef CONFIG_SMP
86593 if (x < 0)
86594 x = 0;
86595@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86596 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86597 enum zone_stat_item item)
86598 {
86599- long x = atomic_long_read(&zone->vm_stat[item]);
86600+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86601
86602 #ifdef CONFIG_SMP
86603 int cpu;
86604@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86605
86606 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86607 {
86608- atomic_long_inc(&zone->vm_stat[item]);
86609- atomic_long_inc(&vm_stat[item]);
86610+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86611+ atomic_long_inc_unchecked(&vm_stat[item]);
86612 }
86613
86614 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86615 {
86616- atomic_long_dec(&zone->vm_stat[item]);
86617- atomic_long_dec(&vm_stat[item]);
86618+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86619+ atomic_long_dec_unchecked(&vm_stat[item]);
86620 }
86621
86622 static inline void __inc_zone_page_state(struct page *page,
86623diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86624index 91b0a68..0e9adf6 100644
86625--- a/include/linux/xattr.h
86626+++ b/include/linux/xattr.h
86627@@ -28,7 +28,7 @@ struct xattr_handler {
86628 size_t size, int handler_flags);
86629 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86630 size_t size, int flags, int handler_flags);
86631-};
86632+} __do_const;
86633
86634 struct xattr {
86635 const char *name;
86636@@ -37,6 +37,9 @@ struct xattr {
86637 };
86638
86639 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86640+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86641+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86642+#endif
86643 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86644 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86645 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86646diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86647index 92dbbd3..13ab0b3 100644
86648--- a/include/linux/zlib.h
86649+++ b/include/linux/zlib.h
86650@@ -31,6 +31,7 @@
86651 #define _ZLIB_H
86652
86653 #include <linux/zconf.h>
86654+#include <linux/compiler.h>
86655
86656 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86657 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86658@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86659
86660 /* basic functions */
86661
86662-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86663+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86664 /*
86665 Returns the number of bytes that needs to be allocated for a per-
86666 stream workspace with the specified parameters. A pointer to this
86667diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86668index 3e4fddf..5ec9104 100644
86669--- a/include/media/v4l2-dev.h
86670+++ b/include/media/v4l2-dev.h
86671@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86672 int (*mmap) (struct file *, struct vm_area_struct *);
86673 int (*open) (struct file *);
86674 int (*release) (struct file *);
86675-};
86676+} __do_const;
86677
86678 /*
86679 * Newer version of video_device, handled by videodev2.c
86680diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86681index ffb69da..040393e 100644
86682--- a/include/media/v4l2-device.h
86683+++ b/include/media/v4l2-device.h
86684@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86685 this function returns 0. If the name ends with a digit (e.g. cx18),
86686 then the name will be set to cx18-0 since cx180 looks really odd. */
86687 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86688- atomic_t *instance);
86689+ atomic_unchecked_t *instance);
86690
86691 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86692 Since the parent disappears this ensures that v4l2_dev doesn't have an
86693diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86694index 2a25dec..bf6dd8a 100644
86695--- a/include/net/9p/transport.h
86696+++ b/include/net/9p/transport.h
86697@@ -62,7 +62,7 @@ struct p9_trans_module {
86698 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86699 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86700 char *, char *, int , int, int, int);
86701-};
86702+} __do_const;
86703
86704 void v9fs_register_trans(struct p9_trans_module *m);
86705 void v9fs_unregister_trans(struct p9_trans_module *m);
86706diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86707index a175ba4..196eb8242 100644
86708--- a/include/net/af_unix.h
86709+++ b/include/net/af_unix.h
86710@@ -36,7 +36,7 @@ struct unix_skb_parms {
86711 u32 secid; /* Security ID */
86712 #endif
86713 u32 consumed;
86714-};
86715+} __randomize_layout;
86716
86717 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86718 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86719diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86720index 2239a37..a83461f 100644
86721--- a/include/net/bluetooth/l2cap.h
86722+++ b/include/net/bluetooth/l2cap.h
86723@@ -609,7 +609,7 @@ struct l2cap_ops {
86724 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86725 unsigned long hdr_len,
86726 unsigned long len, int nb);
86727-};
86728+} __do_const;
86729
86730 struct l2cap_conn {
86731 struct hci_conn *hcon;
86732diff --git a/include/net/bonding.h b/include/net/bonding.h
86733index fda6fee..dbdf83c 100644
86734--- a/include/net/bonding.h
86735+++ b/include/net/bonding.h
86736@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
86737
86738 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86739 {
86740- atomic_long_inc(&dev->tx_dropped);
86741+ atomic_long_inc_unchecked(&dev->tx_dropped);
86742 dev_kfree_skb_any(skb);
86743 }
86744
86745diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86746index f2ae33d..c457cf0 100644
86747--- a/include/net/caif/cfctrl.h
86748+++ b/include/net/caif/cfctrl.h
86749@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86750 void (*radioset_rsp)(void);
86751 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86752 struct cflayer *client_layer);
86753-};
86754+} __no_const;
86755
86756 /* Link Setup Parameters for CAIF-Links. */
86757 struct cfctrl_link_param {
86758@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86759 struct cfctrl {
86760 struct cfsrvl serv;
86761 struct cfctrl_rsp res;
86762- atomic_t req_seq_no;
86763- atomic_t rsp_seq_no;
86764+ atomic_unchecked_t req_seq_no;
86765+ atomic_unchecked_t rsp_seq_no;
86766 struct list_head list;
86767 /* Protects from simultaneous access to first_req list */
86768 spinlock_t info_list_lock;
86769diff --git a/include/net/flow.h b/include/net/flow.h
86770index 8109a15..504466d 100644
86771--- a/include/net/flow.h
86772+++ b/include/net/flow.h
86773@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86774
86775 void flow_cache_flush(struct net *net);
86776 void flow_cache_flush_deferred(struct net *net);
86777-extern atomic_t flow_cache_genid;
86778+extern atomic_unchecked_t flow_cache_genid;
86779
86780 #endif
86781diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86782index 0574abd..0f16881 100644
86783--- a/include/net/genetlink.h
86784+++ b/include/net/genetlink.h
86785@@ -130,7 +130,7 @@ struct genl_ops {
86786 u8 cmd;
86787 u8 internal_flags;
86788 u8 flags;
86789-};
86790+} __do_const;
86791
86792 int __genl_register_family(struct genl_family *family);
86793
86794diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86795index 0f712c0..cd762c4 100644
86796--- a/include/net/gro_cells.h
86797+++ b/include/net/gro_cells.h
86798@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86799 cell = this_cpu_ptr(gcells->cells);
86800
86801 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86802- atomic_long_inc(&dev->rx_dropped);
86803+ atomic_long_inc_unchecked(&dev->rx_dropped);
86804 kfree_skb(skb);
86805 return;
86806 }
86807diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86808index 5976bde..3a81660 100644
86809--- a/include/net/inet_connection_sock.h
86810+++ b/include/net/inet_connection_sock.h
86811@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86812 int (*bind_conflict)(const struct sock *sk,
86813 const struct inet_bind_bucket *tb, bool relax);
86814 void (*mtu_reduced)(struct sock *sk);
86815-};
86816+} __do_const;
86817
86818 /** inet_connection_sock - INET connection oriented sock
86819 *
86820diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86821index 80479ab..0c3f647 100644
86822--- a/include/net/inetpeer.h
86823+++ b/include/net/inetpeer.h
86824@@ -47,7 +47,7 @@ struct inet_peer {
86825 */
86826 union {
86827 struct {
86828- atomic_t rid; /* Frag reception counter */
86829+ atomic_unchecked_t rid; /* Frag reception counter */
86830 };
86831 struct rcu_head rcu;
86832 struct inet_peer *gc_next;
86833diff --git a/include/net/ip.h b/include/net/ip.h
86834index 6cc1eaf..14059b0 100644
86835--- a/include/net/ip.h
86836+++ b/include/net/ip.h
86837@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86838 }
86839 }
86840
86841-u32 ip_idents_reserve(u32 hash, int segs);
86842+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86843 void __ip_select_ident(struct iphdr *iph, int segs);
86844
86845 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86846diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86847index 5bd120e4..03fb812 100644
86848--- a/include/net/ip_fib.h
86849+++ b/include/net/ip_fib.h
86850@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86851
86852 #define FIB_RES_SADDR(net, res) \
86853 ((FIB_RES_NH(res).nh_saddr_genid == \
86854- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86855+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86856 FIB_RES_NH(res).nh_saddr : \
86857 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86858 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86859diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86860index 615b20b..fd4cbd8 100644
86861--- a/include/net/ip_vs.h
86862+++ b/include/net/ip_vs.h
86863@@ -534,7 +534,7 @@ struct ip_vs_conn {
86864 struct ip_vs_conn *control; /* Master control connection */
86865 atomic_t n_control; /* Number of controlled ones */
86866 struct ip_vs_dest *dest; /* real server */
86867- atomic_t in_pkts; /* incoming packet counter */
86868+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86869
86870 /* Packet transmitter for different forwarding methods. If it
86871 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86872@@ -682,7 +682,7 @@ struct ip_vs_dest {
86873 __be16 port; /* port number of the server */
86874 union nf_inet_addr addr; /* IP address of the server */
86875 volatile unsigned int flags; /* dest status flags */
86876- atomic_t conn_flags; /* flags to copy to conn */
86877+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86878 atomic_t weight; /* server weight */
86879
86880 atomic_t refcnt; /* reference counter */
86881@@ -928,11 +928,11 @@ struct netns_ipvs {
86882 /* ip_vs_lblc */
86883 int sysctl_lblc_expiration;
86884 struct ctl_table_header *lblc_ctl_header;
86885- struct ctl_table *lblc_ctl_table;
86886+ ctl_table_no_const *lblc_ctl_table;
86887 /* ip_vs_lblcr */
86888 int sysctl_lblcr_expiration;
86889 struct ctl_table_header *lblcr_ctl_header;
86890- struct ctl_table *lblcr_ctl_table;
86891+ ctl_table_no_const *lblcr_ctl_table;
86892 /* ip_vs_est */
86893 struct list_head est_list; /* estimator list */
86894 spinlock_t est_lock;
86895diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86896index 8d4f588..2e37ad2 100644
86897--- a/include/net/irda/ircomm_tty.h
86898+++ b/include/net/irda/ircomm_tty.h
86899@@ -33,6 +33,7 @@
86900 #include <linux/termios.h>
86901 #include <linux/timer.h>
86902 #include <linux/tty.h> /* struct tty_struct */
86903+#include <asm/local.h>
86904
86905 #include <net/irda/irias_object.h>
86906 #include <net/irda/ircomm_core.h>
86907diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86908index 714cc9a..ea05f3e 100644
86909--- a/include/net/iucv/af_iucv.h
86910+++ b/include/net/iucv/af_iucv.h
86911@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86912 struct iucv_sock_list {
86913 struct hlist_head head;
86914 rwlock_t lock;
86915- atomic_t autobind_name;
86916+ atomic_unchecked_t autobind_name;
86917 };
86918
86919 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86920diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86921index f3be818..bf46196 100644
86922--- a/include/net/llc_c_ac.h
86923+++ b/include/net/llc_c_ac.h
86924@@ -87,7 +87,7 @@
86925 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86926 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86927
86928-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86929+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86930
86931 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86932 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86933diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86934index 3948cf1..83b28c4 100644
86935--- a/include/net/llc_c_ev.h
86936+++ b/include/net/llc_c_ev.h
86937@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86938 return (struct llc_conn_state_ev *)skb->cb;
86939 }
86940
86941-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86942-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86943+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86944+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86945
86946 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86947 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86948diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86949index 48f3f89..0e92c50 100644
86950--- a/include/net/llc_c_st.h
86951+++ b/include/net/llc_c_st.h
86952@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86953 u8 next_state;
86954 const llc_conn_ev_qfyr_t *ev_qualifiers;
86955 const llc_conn_action_t *ev_actions;
86956-};
86957+} __do_const;
86958
86959 struct llc_conn_state {
86960 u8 current_state;
86961diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86962index a61b98c..aade1eb 100644
86963--- a/include/net/llc_s_ac.h
86964+++ b/include/net/llc_s_ac.h
86965@@ -23,7 +23,7 @@
86966 #define SAP_ACT_TEST_IND 9
86967
86968 /* All action functions must look like this */
86969-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86970+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86971
86972 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86973 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86974diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86975index c4359e2..76dbc4a 100644
86976--- a/include/net/llc_s_st.h
86977+++ b/include/net/llc_s_st.h
86978@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86979 llc_sap_ev_t ev;
86980 u8 next_state;
86981 const llc_sap_action_t *ev_actions;
86982-};
86983+} __do_const;
86984
86985 struct llc_sap_state {
86986 u8 curr_state;
86987diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86988index d52914b..2b13cec 100644
86989--- a/include/net/mac80211.h
86990+++ b/include/net/mac80211.h
86991@@ -4915,7 +4915,7 @@ struct rate_control_ops {
86992 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86993
86994 u32 (*get_expected_throughput)(void *priv_sta);
86995-};
86996+} __do_const;
86997
86998 static inline int rate_supported(struct ieee80211_sta *sta,
86999 enum ieee80211_band band,
87000diff --git a/include/net/neighbour.h b/include/net/neighbour.h
87001index 76f7084..8f36e39 100644
87002--- a/include/net/neighbour.h
87003+++ b/include/net/neighbour.h
87004@@ -163,7 +163,7 @@ struct neigh_ops {
87005 void (*error_report)(struct neighbour *, struct sk_buff *);
87006 int (*output)(struct neighbour *, struct sk_buff *);
87007 int (*connected_output)(struct neighbour *, struct sk_buff *);
87008-};
87009+} __do_const;
87010
87011 struct pneigh_entry {
87012 struct pneigh_entry *next;
87013@@ -217,7 +217,7 @@ struct neigh_table {
87014 struct neigh_statistics __percpu *stats;
87015 struct neigh_hash_table __rcu *nht;
87016 struct pneigh_entry **phash_buckets;
87017-};
87018+} __randomize_layout;
87019
87020 enum {
87021 NEIGH_ARP_TABLE = 0,
87022diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
87023index 36faf49..6927638 100644
87024--- a/include/net/net_namespace.h
87025+++ b/include/net/net_namespace.h
87026@@ -131,8 +131,8 @@ struct net {
87027 struct netns_ipvs *ipvs;
87028 #endif
87029 struct sock *diag_nlsk;
87030- atomic_t fnhe_genid;
87031-};
87032+ atomic_unchecked_t fnhe_genid;
87033+} __randomize_layout;
87034
87035 #include <linux/seq_file_net.h>
87036
87037@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
87038 #define __net_init __init
87039 #define __net_exit __exit_refok
87040 #define __net_initdata __initdata
87041+#ifdef CONSTIFY_PLUGIN
87042 #define __net_initconst __initconst
87043+#else
87044+#define __net_initconst __initdata
87045+#endif
87046 #endif
87047
87048 int peernet2id(struct net *net, struct net *peer);
87049@@ -301,7 +305,7 @@ struct pernet_operations {
87050 void (*exit_batch)(struct list_head *net_exit_list);
87051 int *id;
87052 size_t size;
87053-};
87054+} __do_const;
87055
87056 /*
87057 * Use these carefully. If you implement a network device and it
87058@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
87059
87060 static inline int rt_genid_ipv4(struct net *net)
87061 {
87062- return atomic_read(&net->ipv4.rt_genid);
87063+ return atomic_read_unchecked(&net->ipv4.rt_genid);
87064 }
87065
87066 static inline void rt_genid_bump_ipv4(struct net *net)
87067 {
87068- atomic_inc(&net->ipv4.rt_genid);
87069+ atomic_inc_unchecked(&net->ipv4.rt_genid);
87070 }
87071
87072 extern void (*__fib6_flush_trees)(struct net *net);
87073@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
87074
87075 static inline int fnhe_genid(struct net *net)
87076 {
87077- return atomic_read(&net->fnhe_genid);
87078+ return atomic_read_unchecked(&net->fnhe_genid);
87079 }
87080
87081 static inline void fnhe_genid_bump(struct net *net)
87082 {
87083- atomic_inc(&net->fnhe_genid);
87084+ atomic_inc_unchecked(&net->fnhe_genid);
87085 }
87086
87087 #endif /* __NET_NET_NAMESPACE_H */
87088diff --git a/include/net/netlink.h b/include/net/netlink.h
87089index e010ee8..405b9f4 100644
87090--- a/include/net/netlink.h
87091+++ b/include/net/netlink.h
87092@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
87093 {
87094 if (mark) {
87095 WARN_ON((unsigned char *) mark < skb->data);
87096- skb_trim(skb, (unsigned char *) mark - skb->data);
87097+ skb_trim(skb, (const unsigned char *) mark - skb->data);
87098 }
87099 }
87100
87101diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
87102index 29d6a94..235d3d84 100644
87103--- a/include/net/netns/conntrack.h
87104+++ b/include/net/netns/conntrack.h
87105@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
87106 struct nf_proto_net {
87107 #ifdef CONFIG_SYSCTL
87108 struct ctl_table_header *ctl_table_header;
87109- struct ctl_table *ctl_table;
87110+ ctl_table_no_const *ctl_table;
87111 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
87112 struct ctl_table_header *ctl_compat_header;
87113- struct ctl_table *ctl_compat_table;
87114+ ctl_table_no_const *ctl_compat_table;
87115 #endif
87116 #endif
87117 unsigned int users;
87118@@ -60,7 +60,7 @@ struct nf_ip_net {
87119 struct nf_icmp_net icmpv6;
87120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
87121 struct ctl_table_header *ctl_table_header;
87122- struct ctl_table *ctl_table;
87123+ ctl_table_no_const *ctl_table;
87124 #endif
87125 };
87126
87127diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
87128index dbe2254..ed0c151 100644
87129--- a/include/net/netns/ipv4.h
87130+++ b/include/net/netns/ipv4.h
87131@@ -87,7 +87,7 @@ struct netns_ipv4 {
87132
87133 struct ping_group_range ping_group_range;
87134
87135- atomic_t dev_addr_genid;
87136+ atomic_unchecked_t dev_addr_genid;
87137
87138 #ifdef CONFIG_SYSCTL
87139 unsigned long *sysctl_local_reserved_ports;
87140@@ -101,6 +101,6 @@ struct netns_ipv4 {
87141 struct fib_rules_ops *mr_rules_ops;
87142 #endif
87143 #endif
87144- atomic_t rt_genid;
87145+ atomic_unchecked_t rt_genid;
87146 };
87147 #endif
87148diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
87149index 69ae41f..4f94868 100644
87150--- a/include/net/netns/ipv6.h
87151+++ b/include/net/netns/ipv6.h
87152@@ -75,8 +75,8 @@ struct netns_ipv6 {
87153 struct fib_rules_ops *mr6_rules_ops;
87154 #endif
87155 #endif
87156- atomic_t dev_addr_genid;
87157- atomic_t fib6_sernum;
87158+ atomic_unchecked_t dev_addr_genid;
87159+ atomic_unchecked_t fib6_sernum;
87160 };
87161
87162 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
87163diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
87164index 730d82a..045f2c4 100644
87165--- a/include/net/netns/xfrm.h
87166+++ b/include/net/netns/xfrm.h
87167@@ -78,7 +78,7 @@ struct netns_xfrm {
87168
87169 /* flow cache part */
87170 struct flow_cache flow_cache_global;
87171- atomic_t flow_cache_genid;
87172+ atomic_unchecked_t flow_cache_genid;
87173 struct list_head flow_cache_gc_list;
87174 spinlock_t flow_cache_gc_lock;
87175 struct work_struct flow_cache_gc_work;
87176diff --git a/include/net/ping.h b/include/net/ping.h
87177index cc16d41..664f40b 100644
87178--- a/include/net/ping.h
87179+++ b/include/net/ping.h
87180@@ -54,7 +54,7 @@ struct ping_iter_state {
87181
87182 extern struct proto ping_prot;
87183 #if IS_ENABLED(CONFIG_IPV6)
87184-extern struct pingv6_ops pingv6_ops;
87185+extern struct pingv6_ops *pingv6_ops;
87186 #endif
87187
87188 struct pingfakehdr {
87189diff --git a/include/net/protocol.h b/include/net/protocol.h
87190index d6fcc1f..ca277058 100644
87191--- a/include/net/protocol.h
87192+++ b/include/net/protocol.h
87193@@ -49,7 +49,7 @@ struct net_protocol {
87194 * socket lookup?
87195 */
87196 icmp_strict_tag_validation:1;
87197-};
87198+} __do_const;
87199
87200 #if IS_ENABLED(CONFIG_IPV6)
87201 struct inet6_protocol {
87202@@ -62,7 +62,7 @@ struct inet6_protocol {
87203 u8 type, u8 code, int offset,
87204 __be32 info);
87205 unsigned int flags; /* INET6_PROTO_xxx */
87206-};
87207+} __do_const;
87208
87209 #define INET6_PROTO_NOPOLICY 0x1
87210 #define INET6_PROTO_FINAL 0x2
87211diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
87212index 6c6d539..af70817 100644
87213--- a/include/net/rtnetlink.h
87214+++ b/include/net/rtnetlink.h
87215@@ -95,7 +95,7 @@ struct rtnl_link_ops {
87216 const struct net_device *dev,
87217 const struct net_device *slave_dev);
87218 struct net *(*get_link_net)(const struct net_device *dev);
87219-};
87220+} __do_const;
87221
87222 int __rtnl_link_register(struct rtnl_link_ops *ops);
87223 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
87224diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
87225index 4a5b9a3..ca27d73 100644
87226--- a/include/net/sctp/checksum.h
87227+++ b/include/net/sctp/checksum.h
87228@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
87229 unsigned int offset)
87230 {
87231 struct sctphdr *sh = sctp_hdr(skb);
87232- __le32 ret, old = sh->checksum;
87233- const struct skb_checksum_ops ops = {
87234+ __le32 ret, old = sh->checksum;
87235+ static const struct skb_checksum_ops ops = {
87236 .update = sctp_csum_update,
87237 .combine = sctp_csum_combine,
87238 };
87239diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
87240index 487ef34..d457f98 100644
87241--- a/include/net/sctp/sm.h
87242+++ b/include/net/sctp/sm.h
87243@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
87244 typedef struct {
87245 sctp_state_fn_t *fn;
87246 const char *name;
87247-} sctp_sm_table_entry_t;
87248+} __do_const sctp_sm_table_entry_t;
87249
87250 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
87251 * currently in use.
87252@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
87253 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
87254
87255 /* Extern declarations for major data structures. */
87256-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87257+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87258
87259
87260 /* Get the size of a DATA chunk payload. */
87261diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
87262index 2bb2fcf..d17c291 100644
87263--- a/include/net/sctp/structs.h
87264+++ b/include/net/sctp/structs.h
87265@@ -509,7 +509,7 @@ struct sctp_pf {
87266 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
87267 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
87268 struct sctp_af *af;
87269-};
87270+} __do_const;
87271
87272
87273 /* Structure to track chunk fragments that have been acked, but peer
87274diff --git a/include/net/sock.h b/include/net/sock.h
87275index e4079c2..79c5d3a 100644
87276--- a/include/net/sock.h
87277+++ b/include/net/sock.h
87278@@ -362,7 +362,7 @@ struct sock {
87279 unsigned int sk_napi_id;
87280 unsigned int sk_ll_usec;
87281 #endif
87282- atomic_t sk_drops;
87283+ atomic_unchecked_t sk_drops;
87284 int sk_rcvbuf;
87285
87286 struct sk_filter __rcu *sk_filter;
87287@@ -1039,7 +1039,7 @@ struct proto {
87288 void (*destroy_cgroup)(struct mem_cgroup *memcg);
87289 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
87290 #endif
87291-};
87292+} __randomize_layout;
87293
87294 /*
87295 * Bits in struct cg_proto.flags
87296@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
87297 page_counter_uncharge(&prot->memory_allocated, amt);
87298 }
87299
87300-static inline long
87301+static inline long __intentional_overflow(-1)
87302 sk_memory_allocated(const struct sock *sk)
87303 {
87304 struct proto *prot = sk->sk_prot;
87305@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
87306 }
87307
87308 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
87309- struct iov_iter *from, char *to,
87310+ struct iov_iter *from, unsigned char *to,
87311 int copy, int offset)
87312 {
87313 if (skb->ip_summed == CHECKSUM_NONE) {
87314@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
87315 }
87316 }
87317
87318-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87319+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87320
87321 /**
87322 * sk_page_frag - return an appropriate page_frag
87323diff --git a/include/net/tcp.h b/include/net/tcp.h
87324index 8d6b983..5813205 100644
87325--- a/include/net/tcp.h
87326+++ b/include/net/tcp.h
87327@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
87328 void tcp_xmit_retransmit_queue(struct sock *);
87329 void tcp_simple_retransmit(struct sock *);
87330 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
87331-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87332+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87333
87334 void tcp_send_probe0(struct sock *);
87335 void tcp_send_partial(struct sock *);
87336@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
87337 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
87338 */
87339 struct tcp_skb_cb {
87340- __u32 seq; /* Starting sequence number */
87341- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
87342+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
87343+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
87344 union {
87345 /* Note : tcp_tw_isn is used in input path only
87346 * (isn chosen by tcp_timewait_state_process())
87347@@ -720,7 +720,7 @@ struct tcp_skb_cb {
87348
87349 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
87350 /* 1 byte hole */
87351- __u32 ack_seq; /* Sequence number ACK'd */
87352+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
87353 union {
87354 struct inet_skb_parm h4;
87355 #if IS_ENABLED(CONFIG_IPV6)
87356diff --git a/include/net/xfrm.h b/include/net/xfrm.h
87357index dc4865e..152ee4c 100644
87358--- a/include/net/xfrm.h
87359+++ b/include/net/xfrm.h
87360@@ -285,7 +285,6 @@ struct xfrm_dst;
87361 struct xfrm_policy_afinfo {
87362 unsigned short family;
87363 struct dst_ops *dst_ops;
87364- void (*garbage_collect)(struct net *net);
87365 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
87366 const xfrm_address_t *saddr,
87367 const xfrm_address_t *daddr);
87368@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
87369 struct net_device *dev,
87370 const struct flowi *fl);
87371 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
87372-};
87373+} __do_const;
87374
87375 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
87376 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
87377@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
87378 int (*transport_finish)(struct sk_buff *skb,
87379 int async);
87380 void (*local_error)(struct sk_buff *skb, u32 mtu);
87381-};
87382+} __do_const;
87383
87384 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
87385 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
87386@@ -437,7 +436,7 @@ struct xfrm_mode {
87387 struct module *owner;
87388 unsigned int encap;
87389 int flags;
87390-};
87391+} __do_const;
87392
87393 /* Flags for xfrm_mode. */
87394 enum {
87395@@ -534,7 +533,7 @@ struct xfrm_policy {
87396 struct timer_list timer;
87397
87398 struct flow_cache_object flo;
87399- atomic_t genid;
87400+ atomic_unchecked_t genid;
87401 u32 priority;
87402 u32 index;
87403 struct xfrm_mark mark;
87404@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
87405 }
87406
87407 void xfrm_garbage_collect(struct net *net);
87408+void xfrm_garbage_collect_deferred(struct net *net);
87409
87410 #else
87411
87412@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
87413 static inline void xfrm_garbage_collect(struct net *net)
87414 {
87415 }
87416+static inline void xfrm_garbage_collect_deferred(struct net *net)
87417+{
87418+}
87419 #endif
87420
87421 static __inline__
87422diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
87423index 1017e0b..227aa4d 100644
87424--- a/include/rdma/iw_cm.h
87425+++ b/include/rdma/iw_cm.h
87426@@ -122,7 +122,7 @@ struct iw_cm_verbs {
87427 int backlog);
87428
87429 int (*destroy_listen)(struct iw_cm_id *cm_id);
87430-};
87431+} __no_const;
87432
87433 /**
87434 * iw_create_cm_id - Create an IW CM identifier.
87435diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
87436index 93d14da..734b3d8 100644
87437--- a/include/scsi/libfc.h
87438+++ b/include/scsi/libfc.h
87439@@ -771,6 +771,7 @@ struct libfc_function_template {
87440 */
87441 void (*disc_stop_final) (struct fc_lport *);
87442 };
87443+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
87444
87445 /**
87446 * struct fc_disc - Discovery context
87447@@ -875,7 +876,7 @@ struct fc_lport {
87448 struct fc_vport *vport;
87449
87450 /* Operational Information */
87451- struct libfc_function_template tt;
87452+ libfc_function_template_no_const tt;
87453 u8 link_up;
87454 u8 qfull;
87455 enum fc_lport_state state;
87456diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87457index a4c9336..d6f8f34 100644
87458--- a/include/scsi/scsi_device.h
87459+++ b/include/scsi/scsi_device.h
87460@@ -185,9 +185,9 @@ struct scsi_device {
87461 unsigned int max_device_blocked; /* what device_blocked counts down from */
87462 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87463
87464- atomic_t iorequest_cnt;
87465- atomic_t iodone_cnt;
87466- atomic_t ioerr_cnt;
87467+ atomic_unchecked_t iorequest_cnt;
87468+ atomic_unchecked_t iodone_cnt;
87469+ atomic_unchecked_t ioerr_cnt;
87470
87471 struct device sdev_gendev,
87472 sdev_dev;
87473diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87474index 007a0bc..7188db8 100644
87475--- a/include/scsi/scsi_transport_fc.h
87476+++ b/include/scsi/scsi_transport_fc.h
87477@@ -756,7 +756,8 @@ struct fc_function_template {
87478 unsigned long show_host_system_hostname:1;
87479
87480 unsigned long disable_target_scan:1;
87481-};
87482+} __do_const;
87483+typedef struct fc_function_template __no_const fc_function_template_no_const;
87484
87485
87486 /**
87487diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87488index f48089d..73abe48 100644
87489--- a/include/sound/compress_driver.h
87490+++ b/include/sound/compress_driver.h
87491@@ -130,7 +130,7 @@ struct snd_compr_ops {
87492 struct snd_compr_caps *caps);
87493 int (*get_codec_caps) (struct snd_compr_stream *stream,
87494 struct snd_compr_codec_caps *codec);
87495-};
87496+} __no_const;
87497
87498 /**
87499 * struct snd_compr: Compressed device
87500diff --git a/include/sound/soc.h b/include/sound/soc.h
87501index 0d1ade1..34e77d3 100644
87502--- a/include/sound/soc.h
87503+++ b/include/sound/soc.h
87504@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
87505 enum snd_soc_dapm_type, int);
87506
87507 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
87508-};
87509+} __do_const;
87510
87511 /* SoC platform interface */
87512 struct snd_soc_platform_driver {
87513@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
87514 const struct snd_compr_ops *compr_ops;
87515
87516 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87517-};
87518+} __do_const;
87519
87520 struct snd_soc_dai_link_component {
87521 const char *name;
87522diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87523index 672150b..9d4bec4 100644
87524--- a/include/target/target_core_base.h
87525+++ b/include/target/target_core_base.h
87526@@ -767,7 +767,7 @@ struct se_device {
87527 atomic_long_t write_bytes;
87528 /* Active commands on this virtual SE device */
87529 atomic_t simple_cmds;
87530- atomic_t dev_ordered_id;
87531+ atomic_unchecked_t dev_ordered_id;
87532 atomic_t dev_ordered_sync;
87533 atomic_t dev_qf_count;
87534 int export_count;
87535diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87536new file mode 100644
87537index 0000000..fb634b7
87538--- /dev/null
87539+++ b/include/trace/events/fs.h
87540@@ -0,0 +1,53 @@
87541+#undef TRACE_SYSTEM
87542+#define TRACE_SYSTEM fs
87543+
87544+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87545+#define _TRACE_FS_H
87546+
87547+#include <linux/fs.h>
87548+#include <linux/tracepoint.h>
87549+
87550+TRACE_EVENT(do_sys_open,
87551+
87552+ TP_PROTO(const char *filename, int flags, int mode),
87553+
87554+ TP_ARGS(filename, flags, mode),
87555+
87556+ TP_STRUCT__entry(
87557+ __string( filename, filename )
87558+ __field( int, flags )
87559+ __field( int, mode )
87560+ ),
87561+
87562+ TP_fast_assign(
87563+ __assign_str(filename, filename);
87564+ __entry->flags = flags;
87565+ __entry->mode = mode;
87566+ ),
87567+
87568+ TP_printk("\"%s\" %x %o",
87569+ __get_str(filename), __entry->flags, __entry->mode)
87570+);
87571+
87572+TRACE_EVENT(open_exec,
87573+
87574+ TP_PROTO(const char *filename),
87575+
87576+ TP_ARGS(filename),
87577+
87578+ TP_STRUCT__entry(
87579+ __string( filename, filename )
87580+ ),
87581+
87582+ TP_fast_assign(
87583+ __assign_str(filename, filename);
87584+ ),
87585+
87586+ TP_printk("\"%s\"",
87587+ __get_str(filename))
87588+);
87589+
87590+#endif /* _TRACE_FS_H */
87591+
87592+/* This part must be outside protection */
87593+#include <trace/define_trace.h>
87594diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87595index 3608beb..df39d8a 100644
87596--- a/include/trace/events/irq.h
87597+++ b/include/trace/events/irq.h
87598@@ -36,7 +36,7 @@ struct softirq_action;
87599 */
87600 TRACE_EVENT(irq_handler_entry,
87601
87602- TP_PROTO(int irq, struct irqaction *action),
87603+ TP_PROTO(int irq, const struct irqaction *action),
87604
87605 TP_ARGS(irq, action),
87606
87607@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87608 */
87609 TRACE_EVENT(irq_handler_exit,
87610
87611- TP_PROTO(int irq, struct irqaction *action, int ret),
87612+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87613
87614 TP_ARGS(irq, action, ret),
87615
87616diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87617index 7caf44c..23c6f27 100644
87618--- a/include/uapi/linux/a.out.h
87619+++ b/include/uapi/linux/a.out.h
87620@@ -39,6 +39,14 @@ enum machine_type {
87621 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87622 };
87623
87624+/* Constants for the N_FLAGS field */
87625+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87626+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87627+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87628+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87629+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87630+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87631+
87632 #if !defined (N_MAGIC)
87633 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87634 #endif
87635diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87636index 22b6ad3..aeba37e 100644
87637--- a/include/uapi/linux/bcache.h
87638+++ b/include/uapi/linux/bcache.h
87639@@ -5,6 +5,7 @@
87640 * Bcache on disk data structures
87641 */
87642
87643+#include <linux/compiler.h>
87644 #include <asm/types.h>
87645
87646 #define BITMASK(name, type, field, offset, size) \
87647@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87648 /* Btree keys - all units are in sectors */
87649
87650 struct bkey {
87651- __u64 high;
87652- __u64 low;
87653+ __u64 high __intentional_overflow(-1);
87654+ __u64 low __intentional_overflow(-1);
87655 __u64 ptr[];
87656 };
87657
87658diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87659index d876736..ccce5c0 100644
87660--- a/include/uapi/linux/byteorder/little_endian.h
87661+++ b/include/uapi/linux/byteorder/little_endian.h
87662@@ -42,51 +42,51 @@
87663
87664 static inline __le64 __cpu_to_le64p(const __u64 *p)
87665 {
87666- return (__force __le64)*p;
87667+ return (__force const __le64)*p;
87668 }
87669-static inline __u64 __le64_to_cpup(const __le64 *p)
87670+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87671 {
87672- return (__force __u64)*p;
87673+ return (__force const __u64)*p;
87674 }
87675 static inline __le32 __cpu_to_le32p(const __u32 *p)
87676 {
87677- return (__force __le32)*p;
87678+ return (__force const __le32)*p;
87679 }
87680 static inline __u32 __le32_to_cpup(const __le32 *p)
87681 {
87682- return (__force __u32)*p;
87683+ return (__force const __u32)*p;
87684 }
87685 static inline __le16 __cpu_to_le16p(const __u16 *p)
87686 {
87687- return (__force __le16)*p;
87688+ return (__force const __le16)*p;
87689 }
87690 static inline __u16 __le16_to_cpup(const __le16 *p)
87691 {
87692- return (__force __u16)*p;
87693+ return (__force const __u16)*p;
87694 }
87695 static inline __be64 __cpu_to_be64p(const __u64 *p)
87696 {
87697- return (__force __be64)__swab64p(p);
87698+ return (__force const __be64)__swab64p(p);
87699 }
87700 static inline __u64 __be64_to_cpup(const __be64 *p)
87701 {
87702- return __swab64p((__u64 *)p);
87703+ return __swab64p((const __u64 *)p);
87704 }
87705 static inline __be32 __cpu_to_be32p(const __u32 *p)
87706 {
87707- return (__force __be32)__swab32p(p);
87708+ return (__force const __be32)__swab32p(p);
87709 }
87710-static inline __u32 __be32_to_cpup(const __be32 *p)
87711+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87712 {
87713- return __swab32p((__u32 *)p);
87714+ return __swab32p((const __u32 *)p);
87715 }
87716 static inline __be16 __cpu_to_be16p(const __u16 *p)
87717 {
87718- return (__force __be16)__swab16p(p);
87719+ return (__force const __be16)__swab16p(p);
87720 }
87721 static inline __u16 __be16_to_cpup(const __be16 *p)
87722 {
87723- return __swab16p((__u16 *)p);
87724+ return __swab16p((const __u16 *)p);
87725 }
87726 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87727 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87728diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87729index 71e1d0e..6cc9caf 100644
87730--- a/include/uapi/linux/elf.h
87731+++ b/include/uapi/linux/elf.h
87732@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87733 #define PT_GNU_EH_FRAME 0x6474e550
87734
87735 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87736+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87737+
87738+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87739+
87740+/* Constants for the e_flags field */
87741+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87742+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87743+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87744+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87745+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87746+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87747
87748 /*
87749 * Extended Numbering
87750@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87751 #define DT_DEBUG 21
87752 #define DT_TEXTREL 22
87753 #define DT_JMPREL 23
87754+#define DT_FLAGS 30
87755+ #define DF_TEXTREL 0x00000004
87756 #define DT_ENCODING 32
87757 #define OLD_DT_LOOS 0x60000000
87758 #define DT_LOOS 0x6000000d
87759@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87760 #define PF_W 0x2
87761 #define PF_X 0x1
87762
87763+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87764+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87765+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87766+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87767+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87768+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87769+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87770+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87771+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87772+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87773+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87774+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87775+
87776 typedef struct elf32_phdr{
87777 Elf32_Word p_type;
87778 Elf32_Off p_offset;
87779@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87780 #define EI_OSABI 7
87781 #define EI_PAD 8
87782
87783+#define EI_PAX 14
87784+
87785 #define ELFMAG0 0x7f /* EI_MAG */
87786 #define ELFMAG1 'E'
87787 #define ELFMAG2 'L'
87788diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87789index aa169c4..6a2771d 100644
87790--- a/include/uapi/linux/personality.h
87791+++ b/include/uapi/linux/personality.h
87792@@ -30,6 +30,7 @@ enum {
87793 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87794 ADDR_NO_RANDOMIZE | \
87795 ADDR_COMPAT_LAYOUT | \
87796+ ADDR_LIMIT_3GB | \
87797 MMAP_PAGE_ZERO)
87798
87799 /*
87800diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87801index 7530e74..e714828 100644
87802--- a/include/uapi/linux/screen_info.h
87803+++ b/include/uapi/linux/screen_info.h
87804@@ -43,7 +43,8 @@ struct screen_info {
87805 __u16 pages; /* 0x32 */
87806 __u16 vesa_attributes; /* 0x34 */
87807 __u32 capabilities; /* 0x36 */
87808- __u8 _reserved[6]; /* 0x3a */
87809+ __u16 vesapm_size; /* 0x3a */
87810+ __u8 _reserved[4]; /* 0x3c */
87811 } __attribute__((packed));
87812
87813 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87814diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87815index 0e011eb..82681b1 100644
87816--- a/include/uapi/linux/swab.h
87817+++ b/include/uapi/linux/swab.h
87818@@ -43,7 +43,7 @@
87819 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87820 */
87821
87822-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87823+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87824 {
87825 #ifdef __HAVE_BUILTIN_BSWAP16__
87826 return __builtin_bswap16(val);
87827@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87828 #endif
87829 }
87830
87831-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87832+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87833 {
87834 #ifdef __HAVE_BUILTIN_BSWAP32__
87835 return __builtin_bswap32(val);
87836@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87837 #endif
87838 }
87839
87840-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87841+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87842 {
87843 #ifdef __HAVE_BUILTIN_BSWAP64__
87844 return __builtin_bswap64(val);
87845diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87846index 1590c49..5eab462 100644
87847--- a/include/uapi/linux/xattr.h
87848+++ b/include/uapi/linux/xattr.h
87849@@ -73,5 +73,9 @@
87850 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87851 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87852
87853+/* User namespace */
87854+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87855+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87856+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87857
87858 #endif /* _UAPI_LINUX_XATTR_H */
87859diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87860index f9466fa..f4e2b81 100644
87861--- a/include/video/udlfb.h
87862+++ b/include/video/udlfb.h
87863@@ -53,10 +53,10 @@ struct dlfb_data {
87864 u32 pseudo_palette[256];
87865 int blank_mode; /*one of FB_BLANK_ */
87866 /* blit-only rendering path metrics, exposed through sysfs */
87867- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87868- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87869- atomic_t bytes_sent; /* to usb, after compression including overhead */
87870- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87871+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87872+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87873+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87874+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87875 };
87876
87877 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87878diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87879index 30f5362..8ed8ac9 100644
87880--- a/include/video/uvesafb.h
87881+++ b/include/video/uvesafb.h
87882@@ -122,6 +122,7 @@ struct uvesafb_par {
87883 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87884 u8 pmi_setpal; /* PMI for palette changes */
87885 u16 *pmi_base; /* protected mode interface location */
87886+ u8 *pmi_code; /* protected mode code location */
87887 void *pmi_start;
87888 void *pmi_pal;
87889 u8 *vbe_state_orig; /*
87890diff --git a/init/Kconfig b/init/Kconfig
87891index f5dbc6d..8259396 100644
87892--- a/init/Kconfig
87893+++ b/init/Kconfig
87894@@ -1136,6 +1136,7 @@ endif # CGROUPS
87895
87896 config CHECKPOINT_RESTORE
87897 bool "Checkpoint/restore support" if EXPERT
87898+ depends on !GRKERNSEC
87899 default n
87900 help
87901 Enables additional kernel features in a sake of checkpoint/restore.
87902@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
87903
87904 config COMPAT_BRK
87905 bool "Disable heap randomization"
87906- default y
87907+ default n
87908 help
87909 Randomizing heap placement makes heap exploits harder, but it
87910 also breaks ancient binaries (including anything libc5 based).
87911@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
87912 config STOP_MACHINE
87913 bool
87914 default y
87915- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87916+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87917 help
87918 Need stop_machine() primitive.
87919
87920diff --git a/init/Makefile b/init/Makefile
87921index 7bc47ee..6da2dc7 100644
87922--- a/init/Makefile
87923+++ b/init/Makefile
87924@@ -2,6 +2,9 @@
87925 # Makefile for the linux kernel.
87926 #
87927
87928+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87929+asflags-y := $(GCC_PLUGINS_AFLAGS)
87930+
87931 obj-y := main.o version.o mounts.o
87932 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87933 obj-y += noinitramfs.o
87934diff --git a/init/do_mounts.c b/init/do_mounts.c
87935index eb41008..f5dbbf9 100644
87936--- a/init/do_mounts.c
87937+++ b/init/do_mounts.c
87938@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87939 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87940 {
87941 struct super_block *s;
87942- int err = sys_mount(name, "/root", fs, flags, data);
87943+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87944 if (err)
87945 return err;
87946
87947- sys_chdir("/root");
87948+ sys_chdir((const char __force_user *)"/root");
87949 s = current->fs->pwd.dentry->d_sb;
87950 ROOT_DEV = s->s_dev;
87951 printk(KERN_INFO
87952@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87953 va_start(args, fmt);
87954 vsprintf(buf, fmt, args);
87955 va_end(args);
87956- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87957+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87958 if (fd >= 0) {
87959 sys_ioctl(fd, FDEJECT, 0);
87960 sys_close(fd);
87961 }
87962 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87963- fd = sys_open("/dev/console", O_RDWR, 0);
87964+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87965 if (fd >= 0) {
87966 sys_ioctl(fd, TCGETS, (long)&termios);
87967 termios.c_lflag &= ~ICANON;
87968 sys_ioctl(fd, TCSETSF, (long)&termios);
87969- sys_read(fd, &c, 1);
87970+ sys_read(fd, (char __user *)&c, 1);
87971 termios.c_lflag |= ICANON;
87972 sys_ioctl(fd, TCSETSF, (long)&termios);
87973 sys_close(fd);
87974@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87975 mount_root();
87976 out:
87977 devtmpfs_mount("dev");
87978- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87979- sys_chroot(".");
87980+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87981+ sys_chroot((const char __force_user *)".");
87982 }
87983
87984 static bool is_tmpfs;
87985diff --git a/init/do_mounts.h b/init/do_mounts.h
87986index f5b978a..69dbfe8 100644
87987--- a/init/do_mounts.h
87988+++ b/init/do_mounts.h
87989@@ -15,15 +15,15 @@ extern int root_mountflags;
87990
87991 static inline int create_dev(char *name, dev_t dev)
87992 {
87993- sys_unlink(name);
87994- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87995+ sys_unlink((char __force_user *)name);
87996+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87997 }
87998
87999 #if BITS_PER_LONG == 32
88000 static inline u32 bstat(char *name)
88001 {
88002 struct stat64 stat;
88003- if (sys_stat64(name, &stat) != 0)
88004+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
88005 return 0;
88006 if (!S_ISBLK(stat.st_mode))
88007 return 0;
88008@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
88009 static inline u32 bstat(char *name)
88010 {
88011 struct stat stat;
88012- if (sys_newstat(name, &stat) != 0)
88013+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
88014 return 0;
88015 if (!S_ISBLK(stat.st_mode))
88016 return 0;
88017diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
88018index 3e0878e..8a9d7a0 100644
88019--- a/init/do_mounts_initrd.c
88020+++ b/init/do_mounts_initrd.c
88021@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
88022 {
88023 sys_unshare(CLONE_FS | CLONE_FILES);
88024 /* stdin/stdout/stderr for /linuxrc */
88025- sys_open("/dev/console", O_RDWR, 0);
88026+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
88027 sys_dup(0);
88028 sys_dup(0);
88029 /* move initrd over / and chdir/chroot in initrd root */
88030- sys_chdir("/root");
88031- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88032- sys_chroot(".");
88033+ sys_chdir((const char __force_user *)"/root");
88034+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88035+ sys_chroot((const char __force_user *)".");
88036 sys_setsid();
88037 return 0;
88038 }
88039@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
88040 create_dev("/dev/root.old", Root_RAM0);
88041 /* mount initrd on rootfs' /root */
88042 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
88043- sys_mkdir("/old", 0700);
88044- sys_chdir("/old");
88045+ sys_mkdir((const char __force_user *)"/old", 0700);
88046+ sys_chdir((const char __force_user *)"/old");
88047
88048 /* try loading default modules from initrd */
88049 load_default_modules();
88050@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
88051 current->flags &= ~PF_FREEZER_SKIP;
88052
88053 /* move initrd to rootfs' /old */
88054- sys_mount("..", ".", NULL, MS_MOVE, NULL);
88055+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
88056 /* switch root and cwd back to / of rootfs */
88057- sys_chroot("..");
88058+ sys_chroot((const char __force_user *)"..");
88059
88060 if (new_decode_dev(real_root_dev) == Root_RAM0) {
88061- sys_chdir("/old");
88062+ sys_chdir((const char __force_user *)"/old");
88063 return;
88064 }
88065
88066- sys_chdir("/");
88067+ sys_chdir((const char __force_user *)"/");
88068 ROOT_DEV = new_decode_dev(real_root_dev);
88069 mount_root();
88070
88071 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
88072- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
88073+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
88074 if (!error)
88075 printk("okay\n");
88076 else {
88077- int fd = sys_open("/dev/root.old", O_RDWR, 0);
88078+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
88079 if (error == -ENOENT)
88080 printk("/initrd does not exist. Ignored.\n");
88081 else
88082 printk("failed\n");
88083 printk(KERN_NOTICE "Unmounting old root\n");
88084- sys_umount("/old", MNT_DETACH);
88085+ sys_umount((char __force_user *)"/old", MNT_DETACH);
88086 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
88087 if (fd < 0) {
88088 error = fd;
88089@@ -127,11 +127,11 @@ int __init initrd_load(void)
88090 * mounted in the normal path.
88091 */
88092 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
88093- sys_unlink("/initrd.image");
88094+ sys_unlink((const char __force_user *)"/initrd.image");
88095 handle_initrd();
88096 return 1;
88097 }
88098 }
88099- sys_unlink("/initrd.image");
88100+ sys_unlink((const char __force_user *)"/initrd.image");
88101 return 0;
88102 }
88103diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
88104index 8cb6db5..d729f50 100644
88105--- a/init/do_mounts_md.c
88106+++ b/init/do_mounts_md.c
88107@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
88108 partitioned ? "_d" : "", minor,
88109 md_setup_args[ent].device_names);
88110
88111- fd = sys_open(name, 0, 0);
88112+ fd = sys_open((char __force_user *)name, 0, 0);
88113 if (fd < 0) {
88114 printk(KERN_ERR "md: open failed - cannot start "
88115 "array %s\n", name);
88116@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
88117 * array without it
88118 */
88119 sys_close(fd);
88120- fd = sys_open(name, 0, 0);
88121+ fd = sys_open((char __force_user *)name, 0, 0);
88122 sys_ioctl(fd, BLKRRPART, 0);
88123 }
88124 sys_close(fd);
88125@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
88126
88127 wait_for_device_probe();
88128
88129- fd = sys_open("/dev/md0", 0, 0);
88130+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
88131 if (fd >= 0) {
88132 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
88133 sys_close(fd);
88134diff --git a/init/init_task.c b/init/init_task.c
88135index ba0a7f36..2bcf1d5 100644
88136--- a/init/init_task.c
88137+++ b/init/init_task.c
88138@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
88139 * Initial thread structure. Alignment of this is handled by a special
88140 * linker map entry.
88141 */
88142+#ifdef CONFIG_X86
88143+union thread_union init_thread_union __init_task_data;
88144+#else
88145 union thread_union init_thread_union __init_task_data =
88146 { INIT_THREAD_INFO(init_task) };
88147+#endif
88148diff --git a/init/initramfs.c b/init/initramfs.c
88149index ad1bd77..dca2c1b 100644
88150--- a/init/initramfs.c
88151+++ b/init/initramfs.c
88152@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
88153
88154 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
88155 while (count) {
88156- ssize_t rv = sys_write(fd, p, count);
88157+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
88158
88159 if (rv < 0) {
88160 if (rv == -EINTR || rv == -EAGAIN)
88161@@ -107,7 +107,7 @@ static void __init free_hash(void)
88162 }
88163 }
88164
88165-static long __init do_utime(char *filename, time_t mtime)
88166+static long __init do_utime(char __force_user *filename, time_t mtime)
88167 {
88168 struct timespec t[2];
88169
88170@@ -142,7 +142,7 @@ static void __init dir_utime(void)
88171 struct dir_entry *de, *tmp;
88172 list_for_each_entry_safe(de, tmp, &dir_list, list) {
88173 list_del(&de->list);
88174- do_utime(de->name, de->mtime);
88175+ do_utime((char __force_user *)de->name, de->mtime);
88176 kfree(de->name);
88177 kfree(de);
88178 }
88179@@ -304,7 +304,7 @@ static int __init maybe_link(void)
88180 if (nlink >= 2) {
88181 char *old = find_link(major, minor, ino, mode, collected);
88182 if (old)
88183- return (sys_link(old, collected) < 0) ? -1 : 1;
88184+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
88185 }
88186 return 0;
88187 }
88188@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
88189 {
88190 struct stat st;
88191
88192- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
88193+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
88194 if (S_ISDIR(st.st_mode))
88195- sys_rmdir(path);
88196+ sys_rmdir((char __force_user *)path);
88197 else
88198- sys_unlink(path);
88199+ sys_unlink((char __force_user *)path);
88200 }
88201 }
88202
88203@@ -338,7 +338,7 @@ static int __init do_name(void)
88204 int openflags = O_WRONLY|O_CREAT;
88205 if (ml != 1)
88206 openflags |= O_TRUNC;
88207- wfd = sys_open(collected, openflags, mode);
88208+ wfd = sys_open((char __force_user *)collected, openflags, mode);
88209
88210 if (wfd >= 0) {
88211 sys_fchown(wfd, uid, gid);
88212@@ -350,17 +350,17 @@ static int __init do_name(void)
88213 }
88214 }
88215 } else if (S_ISDIR(mode)) {
88216- sys_mkdir(collected, mode);
88217- sys_chown(collected, uid, gid);
88218- sys_chmod(collected, mode);
88219+ sys_mkdir((char __force_user *)collected, mode);
88220+ sys_chown((char __force_user *)collected, uid, gid);
88221+ sys_chmod((char __force_user *)collected, mode);
88222 dir_add(collected, mtime);
88223 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
88224 S_ISFIFO(mode) || S_ISSOCK(mode)) {
88225 if (maybe_link() == 0) {
88226- sys_mknod(collected, mode, rdev);
88227- sys_chown(collected, uid, gid);
88228- sys_chmod(collected, mode);
88229- do_utime(collected, mtime);
88230+ sys_mknod((char __force_user *)collected, mode, rdev);
88231+ sys_chown((char __force_user *)collected, uid, gid);
88232+ sys_chmod((char __force_user *)collected, mode);
88233+ do_utime((char __force_user *)collected, mtime);
88234 }
88235 }
88236 return 0;
88237@@ -372,7 +372,7 @@ static int __init do_copy(void)
88238 if (xwrite(wfd, victim, body_len) != body_len)
88239 error("write error");
88240 sys_close(wfd);
88241- do_utime(vcollected, mtime);
88242+ do_utime((char __force_user *)vcollected, mtime);
88243 kfree(vcollected);
88244 eat(body_len);
88245 state = SkipIt;
88246@@ -390,9 +390,9 @@ static int __init do_symlink(void)
88247 {
88248 collected[N_ALIGN(name_len) + body_len] = '\0';
88249 clean_path(collected, 0);
88250- sys_symlink(collected + N_ALIGN(name_len), collected);
88251- sys_lchown(collected, uid, gid);
88252- do_utime(collected, mtime);
88253+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
88254+ sys_lchown((char __force_user *)collected, uid, gid);
88255+ do_utime((char __force_user *)collected, mtime);
88256 state = SkipIt;
88257 next_state = Reset;
88258 return 0;
88259diff --git a/init/main.c b/init/main.c
88260index 6f0f1c5f..a542824 100644
88261--- a/init/main.c
88262+++ b/init/main.c
88263@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
88264 static inline void mark_rodata_ro(void) { }
88265 #endif
88266
88267+extern void grsecurity_init(void);
88268+
88269 /*
88270 * Debug helper: via this flag we know that we are in 'early bootup code'
88271 * where only the boot processor is running with IRQ disabled. This means
88272@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
88273
88274 __setup("reset_devices", set_reset_devices);
88275
88276+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
88277+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
88278+static int __init setup_grsec_proc_gid(char *str)
88279+{
88280+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
88281+ return 1;
88282+}
88283+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
88284+#endif
88285+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
88286+int grsec_enable_sysfs_restrict = 1;
88287+static int __init setup_grsec_sysfs_restrict(char *str)
88288+{
88289+ if (!simple_strtol(str, NULL, 0))
88290+ grsec_enable_sysfs_restrict = 0;
88291+ return 1;
88292+}
88293+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
88294+#endif
88295+
88296+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
88297+unsigned long pax_user_shadow_base __read_only;
88298+EXPORT_SYMBOL(pax_user_shadow_base);
88299+extern char pax_enter_kernel_user[];
88300+extern char pax_exit_kernel_user[];
88301+#endif
88302+
88303+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
88304+static int __init setup_pax_nouderef(char *str)
88305+{
88306+#ifdef CONFIG_X86_32
88307+ unsigned int cpu;
88308+ struct desc_struct *gdt;
88309+
88310+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
88311+ gdt = get_cpu_gdt_table(cpu);
88312+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
88313+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
88314+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
88315+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
88316+ }
88317+ loadsegment(ds, __KERNEL_DS);
88318+ loadsegment(es, __KERNEL_DS);
88319+ loadsegment(ss, __KERNEL_DS);
88320+#else
88321+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
88322+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
88323+ clone_pgd_mask = ~(pgdval_t)0UL;
88324+ pax_user_shadow_base = 0UL;
88325+ setup_clear_cpu_cap(X86_FEATURE_PCID);
88326+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
88327+#endif
88328+
88329+ return 0;
88330+}
88331+early_param("pax_nouderef", setup_pax_nouderef);
88332+
88333+#ifdef CONFIG_X86_64
88334+static int __init setup_pax_weakuderef(char *str)
88335+{
88336+ if (clone_pgd_mask != ~(pgdval_t)0UL)
88337+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
88338+ return 1;
88339+}
88340+__setup("pax_weakuderef", setup_pax_weakuderef);
88341+#endif
88342+#endif
88343+
88344+#ifdef CONFIG_PAX_SOFTMODE
88345+int pax_softmode;
88346+
88347+static int __init setup_pax_softmode(char *str)
88348+{
88349+ get_option(&str, &pax_softmode);
88350+ return 1;
88351+}
88352+__setup("pax_softmode=", setup_pax_softmode);
88353+#endif
88354+
88355 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
88356 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
88357 static const char *panic_later, *panic_param;
88358@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
88359 struct blacklist_entry *entry;
88360 char *fn_name;
88361
88362- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
88363+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
88364 if (!fn_name)
88365 return false;
88366
88367@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
88368 {
88369 int count = preempt_count();
88370 int ret;
88371- char msgbuf[64];
88372+ const char *msg1 = "", *msg2 = "";
88373
88374 if (initcall_blacklisted(fn))
88375 return -EPERM;
88376@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
88377 else
88378 ret = fn();
88379
88380- msgbuf[0] = 0;
88381-
88382 if (preempt_count() != count) {
88383- sprintf(msgbuf, "preemption imbalance ");
88384+ msg1 = " preemption imbalance";
88385 preempt_count_set(count);
88386 }
88387 if (irqs_disabled()) {
88388- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
88389+ msg2 = " disabled interrupts";
88390 local_irq_enable();
88391 }
88392- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
88393+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
88394
88395+ add_latent_entropy();
88396 return ret;
88397 }
88398
88399@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
88400 {
88401 argv_init[0] = init_filename;
88402 return do_execve(getname_kernel(init_filename),
88403- (const char __user *const __user *)argv_init,
88404- (const char __user *const __user *)envp_init);
88405+ (const char __user *const __force_user *)argv_init,
88406+ (const char __user *const __force_user *)envp_init);
88407 }
88408
88409 static int try_to_run_init_process(const char *init_filename)
88410@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
88411 return ret;
88412 }
88413
88414+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88415+extern int gr_init_ran;
88416+#endif
88417+
88418 static noinline void __init kernel_init_freeable(void);
88419
88420 static int __ref kernel_init(void *unused)
88421@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
88422 ramdisk_execute_command, ret);
88423 }
88424
88425+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88426+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
88427+ gr_init_ran = 1;
88428+#endif
88429+
88430 /*
88431 * We try each of these until one succeeds.
88432 *
88433@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
88434 do_basic_setup();
88435
88436 /* Open the /dev/console on the rootfs, this should never fail */
88437- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88438+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88439 pr_err("Warning: unable to open an initial console.\n");
88440
88441 (void) sys_dup(0);
88442@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
88443 if (!ramdisk_execute_command)
88444 ramdisk_execute_command = "/init";
88445
88446- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88447+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88448 ramdisk_execute_command = NULL;
88449 prepare_namespace();
88450 }
88451
88452+ grsecurity_init();
88453+
88454 /*
88455 * Ok, we have completed the initial bootup, and
88456 * we're essentially up and running. Get rid of the
88457diff --git a/ipc/compat.c b/ipc/compat.c
88458index 9b3c85f..1c4d897 100644
88459--- a/ipc/compat.c
88460+++ b/ipc/compat.c
88461@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88462 COMPAT_SHMLBA);
88463 if (err < 0)
88464 return err;
88465- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88466+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88467 }
88468 case SHMDT:
88469 return sys_shmdt(compat_ptr(ptr));
88470diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88471index 8ad93c2..efd80f8 100644
88472--- a/ipc/ipc_sysctl.c
88473+++ b/ipc/ipc_sysctl.c
88474@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88475 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88476 void __user *buffer, size_t *lenp, loff_t *ppos)
88477 {
88478- struct ctl_table ipc_table;
88479+ ctl_table_no_const ipc_table;
88480
88481 memcpy(&ipc_table, table, sizeof(ipc_table));
88482 ipc_table.data = get_ipc(table);
88483@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88484 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88485 void __user *buffer, size_t *lenp, loff_t *ppos)
88486 {
88487- struct ctl_table ipc_table;
88488+ ctl_table_no_const ipc_table;
88489
88490 memcpy(&ipc_table, table, sizeof(ipc_table));
88491 ipc_table.data = get_ipc(table);
88492@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88493 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88494 void __user *buffer, size_t *lenp, loff_t *ppos)
88495 {
88496- struct ctl_table ipc_table;
88497+ ctl_table_no_const ipc_table;
88498 memcpy(&ipc_table, table, sizeof(ipc_table));
88499 ipc_table.data = get_ipc(table);
88500
88501@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88502 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
88503 void __user *buffer, size_t *lenp, loff_t *ppos)
88504 {
88505- struct ctl_table ipc_table;
88506+ ctl_table_no_const ipc_table;
88507 int dummy = 0;
88508
88509 memcpy(&ipc_table, table, sizeof(ipc_table));
88510diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88511index 68d4e95..1477ded 100644
88512--- a/ipc/mq_sysctl.c
88513+++ b/ipc/mq_sysctl.c
88514@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88515 static int proc_mq_dointvec(struct ctl_table *table, int write,
88516 void __user *buffer, size_t *lenp, loff_t *ppos)
88517 {
88518- struct ctl_table mq_table;
88519+ ctl_table_no_const mq_table;
88520 memcpy(&mq_table, table, sizeof(mq_table));
88521 mq_table.data = get_mq(table);
88522
88523@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88524 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88525 void __user *buffer, size_t *lenp, loff_t *ppos)
88526 {
88527- struct ctl_table mq_table;
88528+ ctl_table_no_const mq_table;
88529 memcpy(&mq_table, table, sizeof(mq_table));
88530 mq_table.data = get_mq(table);
88531
88532diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88533index 7635a1c..7432cb6 100644
88534--- a/ipc/mqueue.c
88535+++ b/ipc/mqueue.c
88536@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88537 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88538 info->attr.mq_msgsize);
88539
88540+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88541 spin_lock(&mq_lock);
88542 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88543 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88544diff --git a/ipc/shm.c b/ipc/shm.c
88545index 19633b4..d454904 100644
88546--- a/ipc/shm.c
88547+++ b/ipc/shm.c
88548@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88549 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88550 #endif
88551
88552+#ifdef CONFIG_GRKERNSEC
88553+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88554+ const u64 shm_createtime, const kuid_t cuid,
88555+ const int shmid);
88556+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88557+ const u64 shm_createtime);
88558+#endif
88559+
88560 void shm_init_ns(struct ipc_namespace *ns)
88561 {
88562 ns->shm_ctlmax = SHMMAX;
88563@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88564 shp->shm_lprid = 0;
88565 shp->shm_atim = shp->shm_dtim = 0;
88566 shp->shm_ctim = get_seconds();
88567+#ifdef CONFIG_GRKERNSEC
88568+ shp->shm_createtime = ktime_get_ns();
88569+#endif
88570 shp->shm_segsz = size;
88571 shp->shm_nattch = 0;
88572 shp->shm_file = file;
88573@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88574 f_mode = FMODE_READ | FMODE_WRITE;
88575 }
88576 if (shmflg & SHM_EXEC) {
88577+
88578+#ifdef CONFIG_PAX_MPROTECT
88579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88580+ goto out;
88581+#endif
88582+
88583 prot |= PROT_EXEC;
88584 acc_mode |= S_IXUGO;
88585 }
88586@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88587 if (err)
88588 goto out_unlock;
88589
88590+#ifdef CONFIG_GRKERNSEC
88591+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88592+ shp->shm_perm.cuid, shmid) ||
88593+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88594+ err = -EACCES;
88595+ goto out_unlock;
88596+ }
88597+#endif
88598+
88599 ipc_lock_object(&shp->shm_perm);
88600
88601 /* check if shm_destroy() is tearing down shp */
88602@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88603 path = shp->shm_file->f_path;
88604 path_get(&path);
88605 shp->shm_nattch++;
88606+#ifdef CONFIG_GRKERNSEC
88607+ shp->shm_lapid = current->pid;
88608+#endif
88609 size = i_size_read(path.dentry->d_inode);
88610 ipc_unlock_object(&shp->shm_perm);
88611 rcu_read_unlock();
88612diff --git a/ipc/util.c b/ipc/util.c
88613index 106bed0..f851429 100644
88614--- a/ipc/util.c
88615+++ b/ipc/util.c
88616@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88617 int (*show)(struct seq_file *, void *);
88618 };
88619
88620+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88621+
88622 /**
88623 * ipc_init - initialise ipc subsystem
88624 *
88625@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88626 granted_mode >>= 6;
88627 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88628 granted_mode >>= 3;
88629+
88630+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88631+ return -1;
88632+
88633 /* is there some bit set in requested_mode but not in granted_mode? */
88634 if ((requested_mode & ~granted_mode & 0007) &&
88635 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88636diff --git a/kernel/audit.c b/kernel/audit.c
88637index 72ab759..757deba 100644
88638--- a/kernel/audit.c
88639+++ b/kernel/audit.c
88640@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88641 3) suppressed due to audit_rate_limit
88642 4) suppressed due to audit_backlog_limit
88643 */
88644-static atomic_t audit_lost = ATOMIC_INIT(0);
88645+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88646
88647 /* The netlink socket. */
88648 static struct sock *audit_sock;
88649@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88650 unsigned long now;
88651 int print;
88652
88653- atomic_inc(&audit_lost);
88654+ atomic_inc_unchecked(&audit_lost);
88655
88656 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88657
88658@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88659 if (print) {
88660 if (printk_ratelimit())
88661 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88662- atomic_read(&audit_lost),
88663+ atomic_read_unchecked(&audit_lost),
88664 audit_rate_limit,
88665 audit_backlog_limit);
88666 audit_panic(message);
88667@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88668 s.pid = audit_pid;
88669 s.rate_limit = audit_rate_limit;
88670 s.backlog_limit = audit_backlog_limit;
88671- s.lost = atomic_read(&audit_lost);
88672+ s.lost = atomic_read_unchecked(&audit_lost);
88673 s.backlog = skb_queue_len(&audit_skb_queue);
88674 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88675 s.backlog_wait_time = audit_backlog_wait_time;
88676diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88677index dc4ae70..2a2bddc 100644
88678--- a/kernel/auditsc.c
88679+++ b/kernel/auditsc.c
88680@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88681 }
88682
88683 /* global counter which is incremented every time something logs in */
88684-static atomic_t session_id = ATOMIC_INIT(0);
88685+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88686
88687 static int audit_set_loginuid_perm(kuid_t loginuid)
88688 {
88689@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
88690
88691 /* are we setting or clearing? */
88692 if (uid_valid(loginuid))
88693- sessionid = (unsigned int)atomic_inc_return(&session_id);
88694+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88695
88696 task->sessionid = sessionid;
88697 task->loginuid = loginuid;
88698diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88699index a64e7a2..2e69448 100644
88700--- a/kernel/bpf/core.c
88701+++ b/kernel/bpf/core.c
88702@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88703 * random section of illegal instructions.
88704 */
88705 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88706- hdr = module_alloc(size);
88707+ hdr = module_alloc_exec(size);
88708 if (hdr == NULL)
88709 return NULL;
88710
88711 /* Fill space with illegal/arch-dep instructions. */
88712 bpf_fill_ill_insns(hdr, size);
88713
88714+ pax_open_kernel();
88715 hdr->pages = size / PAGE_SIZE;
88716+ pax_close_kernel();
88717+
88718 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88719 PAGE_SIZE - sizeof(*hdr));
88720 start = (prandom_u32() % hole) & ~(alignment - 1);
88721@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88722
88723 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88724 {
88725- module_memfree(hdr);
88726+ module_memfree_exec(hdr);
88727 }
88728 #endif /* CONFIG_BPF_JIT */
88729
88730diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88731index 536edc2..d28c85d 100644
88732--- a/kernel/bpf/syscall.c
88733+++ b/kernel/bpf/syscall.c
88734@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88735 int err;
88736
88737 /* the syscall is limited to root temporarily. This restriction will be
88738- * lifted when security audit is clean. Note that eBPF+tracing must have
88739- * this restriction, since it may pass kernel data to user space
88740+ * lifted by upstream when a half-assed security audit is clean. Note
88741+ * that eBPF+tracing must have this restriction, since it may pass
88742+ * kernel data to user space
88743 */
88744 if (!capable(CAP_SYS_ADMIN))
88745 return -EPERM;
88746+#ifdef CONFIG_GRKERNSEC
88747+ return -EPERM;
88748+#endif
88749
88750 if (!access_ok(VERIFY_READ, uattr, 1))
88751 return -EFAULT;
88752diff --git a/kernel/capability.c b/kernel/capability.c
88753index 989f5bf..d317ca0 100644
88754--- a/kernel/capability.c
88755+++ b/kernel/capability.c
88756@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88757 * before modification is attempted and the application
88758 * fails.
88759 */
88760+ if (tocopy > ARRAY_SIZE(kdata))
88761+ return -EFAULT;
88762+
88763 if (copy_to_user(dataptr, kdata, tocopy
88764 * sizeof(struct __user_cap_data_struct))) {
88765 return -EFAULT;
88766@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88767 int ret;
88768
88769 rcu_read_lock();
88770- ret = security_capable(__task_cred(t), ns, cap);
88771+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88772+ gr_task_is_capable(t, __task_cred(t), cap);
88773 rcu_read_unlock();
88774
88775- return (ret == 0);
88776+ return ret;
88777 }
88778
88779 /**
88780@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88781 int ret;
88782
88783 rcu_read_lock();
88784- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88785+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88786 rcu_read_unlock();
88787
88788- return (ret == 0);
88789+ return ret;
88790 }
88791
88792 /**
88793@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88794 BUG();
88795 }
88796
88797- if (security_capable(current_cred(), ns, cap) == 0) {
88798+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88799 current->flags |= PF_SUPERPRIV;
88800 return true;
88801 }
88802@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88803 }
88804 EXPORT_SYMBOL(ns_capable);
88805
88806+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88807+{
88808+ if (unlikely(!cap_valid(cap))) {
88809+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88810+ BUG();
88811+ }
88812+
88813+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88814+ current->flags |= PF_SUPERPRIV;
88815+ return true;
88816+ }
88817+ return false;
88818+}
88819+EXPORT_SYMBOL(ns_capable_nolog);
88820+
88821 /**
88822 * file_ns_capable - Determine if the file's opener had a capability in effect
88823 * @file: The file we want to check
88824@@ -427,6 +446,12 @@ bool capable(int cap)
88825 }
88826 EXPORT_SYMBOL(capable);
88827
88828+bool capable_nolog(int cap)
88829+{
88830+ return ns_capable_nolog(&init_user_ns, cap);
88831+}
88832+EXPORT_SYMBOL(capable_nolog);
88833+
88834 /**
88835 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88836 * @inode: The inode in question
88837@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88838 kgid_has_mapping(ns, inode->i_gid);
88839 }
88840 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88841+
88842+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88843+{
88844+ struct user_namespace *ns = current_user_ns();
88845+
88846+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88847+ kgid_has_mapping(ns, inode->i_gid);
88848+}
88849+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88850diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88851index 29a7b2c..a64e30a 100644
88852--- a/kernel/cgroup.c
88853+++ b/kernel/cgroup.c
88854@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
88855 if (!pathbuf || !agentbuf)
88856 goto out;
88857
88858+ if (agentbuf[0] == '\0')
88859+ goto out;
88860+
88861 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88862 if (!path)
88863 goto out;
88864@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88865 struct task_struct *task;
88866 int count = 0;
88867
88868- seq_printf(seq, "css_set %p\n", cset);
88869+ seq_printf(seq, "css_set %pK\n", cset);
88870
88871 list_for_each_entry(task, &cset->tasks, cg_list) {
88872 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88873diff --git a/kernel/compat.c b/kernel/compat.c
88874index 24f0061..ea80802 100644
88875--- a/kernel/compat.c
88876+++ b/kernel/compat.c
88877@@ -13,6 +13,7 @@
88878
88879 #include <linux/linkage.h>
88880 #include <linux/compat.h>
88881+#include <linux/module.h>
88882 #include <linux/errno.h>
88883 #include <linux/time.h>
88884 #include <linux/signal.h>
88885@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88886 mm_segment_t oldfs;
88887 long ret;
88888
88889- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88890+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88891 oldfs = get_fs();
88892 set_fs(KERNEL_DS);
88893 ret = hrtimer_nanosleep_restart(restart);
88894@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88895 oldfs = get_fs();
88896 set_fs(KERNEL_DS);
88897 ret = hrtimer_nanosleep(&tu,
88898- rmtp ? (struct timespec __user *)&rmt : NULL,
88899+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88900 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88901 set_fs(oldfs);
88902
88903@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88904 mm_segment_t old_fs = get_fs();
88905
88906 set_fs(KERNEL_DS);
88907- ret = sys_sigpending((old_sigset_t __user *) &s);
88908+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88909 set_fs(old_fs);
88910 if (ret == 0)
88911 ret = put_user(s, set);
88912@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88913 mm_segment_t old_fs = get_fs();
88914
88915 set_fs(KERNEL_DS);
88916- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88917+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88918 set_fs(old_fs);
88919
88920 if (!ret) {
88921@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88922 set_fs (KERNEL_DS);
88923 ret = sys_wait4(pid,
88924 (stat_addr ?
88925- (unsigned int __user *) &status : NULL),
88926- options, (struct rusage __user *) &r);
88927+ (unsigned int __force_user *) &status : NULL),
88928+ options, (struct rusage __force_user *) &r);
88929 set_fs (old_fs);
88930
88931 if (ret > 0) {
88932@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88933 memset(&info, 0, sizeof(info));
88934
88935 set_fs(KERNEL_DS);
88936- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88937- uru ? (struct rusage __user *)&ru : NULL);
88938+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88939+ uru ? (struct rusage __force_user *)&ru : NULL);
88940 set_fs(old_fs);
88941
88942 if ((ret < 0) || (info.si_signo == 0))
88943@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88944 oldfs = get_fs();
88945 set_fs(KERNEL_DS);
88946 err = sys_timer_settime(timer_id, flags,
88947- (struct itimerspec __user *) &newts,
88948- (struct itimerspec __user *) &oldts);
88949+ (struct itimerspec __force_user *) &newts,
88950+ (struct itimerspec __force_user *) &oldts);
88951 set_fs(oldfs);
88952 if (!err && old && put_compat_itimerspec(old, &oldts))
88953 return -EFAULT;
88954@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88955 oldfs = get_fs();
88956 set_fs(KERNEL_DS);
88957 err = sys_timer_gettime(timer_id,
88958- (struct itimerspec __user *) &ts);
88959+ (struct itimerspec __force_user *) &ts);
88960 set_fs(oldfs);
88961 if (!err && put_compat_itimerspec(setting, &ts))
88962 return -EFAULT;
88963@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88964 oldfs = get_fs();
88965 set_fs(KERNEL_DS);
88966 err = sys_clock_settime(which_clock,
88967- (struct timespec __user *) &ts);
88968+ (struct timespec __force_user *) &ts);
88969 set_fs(oldfs);
88970 return err;
88971 }
88972@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88973 oldfs = get_fs();
88974 set_fs(KERNEL_DS);
88975 err = sys_clock_gettime(which_clock,
88976- (struct timespec __user *) &ts);
88977+ (struct timespec __force_user *) &ts);
88978 set_fs(oldfs);
88979 if (!err && compat_put_timespec(&ts, tp))
88980 return -EFAULT;
88981@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88982
88983 oldfs = get_fs();
88984 set_fs(KERNEL_DS);
88985- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88986+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88987 set_fs(oldfs);
88988
88989 err = compat_put_timex(utp, &txc);
88990@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88991 oldfs = get_fs();
88992 set_fs(KERNEL_DS);
88993 err = sys_clock_getres(which_clock,
88994- (struct timespec __user *) &ts);
88995+ (struct timespec __force_user *) &ts);
88996 set_fs(oldfs);
88997 if (!err && tp && compat_put_timespec(&ts, tp))
88998 return -EFAULT;
88999@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
89000 struct timespec tu;
89001 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
89002
89003- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
89004+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
89005 oldfs = get_fs();
89006 set_fs(KERNEL_DS);
89007 err = clock_nanosleep_restart(restart);
89008@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
89009 oldfs = get_fs();
89010 set_fs(KERNEL_DS);
89011 err = sys_clock_nanosleep(which_clock, flags,
89012- (struct timespec __user *) &in,
89013- (struct timespec __user *) &out);
89014+ (struct timespec __force_user *) &in,
89015+ (struct timespec __force_user *) &out);
89016 set_fs(oldfs);
89017
89018 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
89019@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
89020 mm_segment_t old_fs = get_fs();
89021
89022 set_fs(KERNEL_DS);
89023- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
89024+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
89025 set_fs(old_fs);
89026 if (compat_put_timespec(&t, interval))
89027 return -EFAULT;
89028diff --git a/kernel/configs.c b/kernel/configs.c
89029index c18b1f1..b9a0132 100644
89030--- a/kernel/configs.c
89031+++ b/kernel/configs.c
89032@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
89033 struct proc_dir_entry *entry;
89034
89035 /* create the current config file */
89036+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
89037+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
89038+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
89039+ &ikconfig_file_ops);
89040+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89041+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
89042+ &ikconfig_file_ops);
89043+#endif
89044+#else
89045 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
89046 &ikconfig_file_ops);
89047+#endif
89048+
89049 if (!entry)
89050 return -ENOMEM;
89051
89052diff --git a/kernel/cred.c b/kernel/cred.c
89053index e0573a4..26c0fd3 100644
89054--- a/kernel/cred.c
89055+++ b/kernel/cred.c
89056@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
89057 validate_creds(cred);
89058 alter_cred_subscribers(cred, -1);
89059 put_cred(cred);
89060+
89061+#ifdef CONFIG_GRKERNSEC_SETXID
89062+ cred = (struct cred *) tsk->delayed_cred;
89063+ if (cred != NULL) {
89064+ tsk->delayed_cred = NULL;
89065+ validate_creds(cred);
89066+ alter_cred_subscribers(cred, -1);
89067+ put_cred(cred);
89068+ }
89069+#endif
89070 }
89071
89072 /**
89073@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
89074 * Always returns 0 thus allowing this function to be tail-called at the end
89075 * of, say, sys_setgid().
89076 */
89077-int commit_creds(struct cred *new)
89078+static int __commit_creds(struct cred *new)
89079 {
89080 struct task_struct *task = current;
89081 const struct cred *old = task->real_cred;
89082@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
89083
89084 get_cred(new); /* we will require a ref for the subj creds too */
89085
89086+ gr_set_role_label(task, new->uid, new->gid);
89087+
89088 /* dumpability changes */
89089 if (!uid_eq(old->euid, new->euid) ||
89090 !gid_eq(old->egid, new->egid) ||
89091@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
89092 put_cred(old);
89093 return 0;
89094 }
89095+#ifdef CONFIG_GRKERNSEC_SETXID
89096+extern int set_user(struct cred *new);
89097+
89098+void gr_delayed_cred_worker(void)
89099+{
89100+ const struct cred *new = current->delayed_cred;
89101+ struct cred *ncred;
89102+
89103+ current->delayed_cred = NULL;
89104+
89105+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
89106+ // from doing get_cred on it when queueing this
89107+ put_cred(new);
89108+ return;
89109+ } else if (new == NULL)
89110+ return;
89111+
89112+ ncred = prepare_creds();
89113+ if (!ncred)
89114+ goto die;
89115+ // uids
89116+ ncred->uid = new->uid;
89117+ ncred->euid = new->euid;
89118+ ncred->suid = new->suid;
89119+ ncred->fsuid = new->fsuid;
89120+ // gids
89121+ ncred->gid = new->gid;
89122+ ncred->egid = new->egid;
89123+ ncred->sgid = new->sgid;
89124+ ncred->fsgid = new->fsgid;
89125+ // groups
89126+ set_groups(ncred, new->group_info);
89127+ // caps
89128+ ncred->securebits = new->securebits;
89129+ ncred->cap_inheritable = new->cap_inheritable;
89130+ ncred->cap_permitted = new->cap_permitted;
89131+ ncred->cap_effective = new->cap_effective;
89132+ ncred->cap_bset = new->cap_bset;
89133+
89134+ if (set_user(ncred)) {
89135+ abort_creds(ncred);
89136+ goto die;
89137+ }
89138+
89139+ // from doing get_cred on it when queueing this
89140+ put_cred(new);
89141+
89142+ __commit_creds(ncred);
89143+ return;
89144+die:
89145+ // from doing get_cred on it when queueing this
89146+ put_cred(new);
89147+ do_group_exit(SIGKILL);
89148+}
89149+#endif
89150+
89151+int commit_creds(struct cred *new)
89152+{
89153+#ifdef CONFIG_GRKERNSEC_SETXID
89154+ int ret;
89155+ int schedule_it = 0;
89156+ struct task_struct *t;
89157+ unsigned oldsecurebits = current_cred()->securebits;
89158+
89159+ /* we won't get called with tasklist_lock held for writing
89160+ and interrupts disabled as the cred struct in that case is
89161+ init_cred
89162+ */
89163+ if (grsec_enable_setxid && !current_is_single_threaded() &&
89164+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
89165+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
89166+ schedule_it = 1;
89167+ }
89168+ ret = __commit_creds(new);
89169+ if (schedule_it) {
89170+ rcu_read_lock();
89171+ read_lock(&tasklist_lock);
89172+ for (t = next_thread(current); t != current;
89173+ t = next_thread(t)) {
89174+ /* we'll check if the thread has uid 0 in
89175+ * the delayed worker routine
89176+ */
89177+ if (task_securebits(t) == oldsecurebits &&
89178+ t->delayed_cred == NULL) {
89179+ t->delayed_cred = get_cred(new);
89180+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
89181+ set_tsk_need_resched(t);
89182+ }
89183+ }
89184+ read_unlock(&tasklist_lock);
89185+ rcu_read_unlock();
89186+ }
89187+
89188+ return ret;
89189+#else
89190+ return __commit_creds(new);
89191+#endif
89192+}
89193+
89194 EXPORT_SYMBOL(commit_creds);
89195
89196 /**
89197diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
89198index 0874e2e..5b32cc9 100644
89199--- a/kernel/debug/debug_core.c
89200+++ b/kernel/debug/debug_core.c
89201@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
89202 */
89203 static atomic_t masters_in_kgdb;
89204 static atomic_t slaves_in_kgdb;
89205-static atomic_t kgdb_break_tasklet_var;
89206+static atomic_unchecked_t kgdb_break_tasklet_var;
89207 atomic_t kgdb_setting_breakpoint;
89208
89209 struct task_struct *kgdb_usethread;
89210@@ -137,7 +137,7 @@ int kgdb_single_step;
89211 static pid_t kgdb_sstep_pid;
89212
89213 /* to keep track of the CPU which is doing the single stepping*/
89214-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89215+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89216
89217 /*
89218 * If you are debugging a problem where roundup (the collection of
89219@@ -552,7 +552,7 @@ return_normal:
89220 * kernel will only try for the value of sstep_tries before
89221 * giving up and continuing on.
89222 */
89223- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
89224+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
89225 (kgdb_info[cpu].task &&
89226 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
89227 atomic_set(&kgdb_active, -1);
89228@@ -654,8 +654,8 @@ cpu_master_loop:
89229 }
89230
89231 kgdb_restore:
89232- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
89233- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
89234+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
89235+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
89236 if (kgdb_info[sstep_cpu].task)
89237 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
89238 else
89239@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
89240 static void kgdb_tasklet_bpt(unsigned long ing)
89241 {
89242 kgdb_breakpoint();
89243- atomic_set(&kgdb_break_tasklet_var, 0);
89244+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
89245 }
89246
89247 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
89248
89249 void kgdb_schedule_breakpoint(void)
89250 {
89251- if (atomic_read(&kgdb_break_tasklet_var) ||
89252+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
89253 atomic_read(&kgdb_active) != -1 ||
89254 atomic_read(&kgdb_setting_breakpoint))
89255 return;
89256- atomic_inc(&kgdb_break_tasklet_var);
89257+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
89258 tasklet_schedule(&kgdb_tasklet_breakpoint);
89259 }
89260 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
89261diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
89262index 4121345..861e178 100644
89263--- a/kernel/debug/kdb/kdb_main.c
89264+++ b/kernel/debug/kdb/kdb_main.c
89265@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
89266 continue;
89267
89268 kdb_printf("%-20s%8u 0x%p ", mod->name,
89269- mod->core_size, (void *)mod);
89270+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
89271 #ifdef CONFIG_MODULE_UNLOAD
89272 kdb_printf("%4d ", module_refcount(mod));
89273 #endif
89274@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
89275 kdb_printf(" (Loading)");
89276 else
89277 kdb_printf(" (Live)");
89278- kdb_printf(" 0x%p", mod->module_core);
89279+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
89280
89281 #ifdef CONFIG_MODULE_UNLOAD
89282 {
89283diff --git a/kernel/events/core.c b/kernel/events/core.c
89284index 2fabc06..79cceec 100644
89285--- a/kernel/events/core.c
89286+++ b/kernel/events/core.c
89287@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
89288 * 0 - disallow raw tracepoint access for unpriv
89289 * 1 - disallow cpu events for unpriv
89290 * 2 - disallow kernel profiling for unpriv
89291+ * 3 - disallow all unpriv perf event use
89292 */
89293-int sysctl_perf_event_paranoid __read_mostly = 1;
89294+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89295+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
89296+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
89297+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
89298+#else
89299+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
89300+#endif
89301
89302 /* Minimum for 512 kiB + 1 user control page */
89303 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
89304@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
89305
89306 tmp *= sysctl_perf_cpu_time_max_percent;
89307 do_div(tmp, 100);
89308- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
89309+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
89310 }
89311
89312 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
89313@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
89314 }
89315 }
89316
89317-static atomic64_t perf_event_id;
89318+static atomic64_unchecked_t perf_event_id;
89319
89320 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
89321 enum event_type_t event_type);
89322@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
89323
89324 static inline u64 perf_event_count(struct perf_event *event)
89325 {
89326- return local64_read(&event->count) + atomic64_read(&event->child_count);
89327+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
89328 }
89329
89330 static u64 perf_event_read(struct perf_event *event)
89331@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
89332 mutex_lock(&event->child_mutex);
89333 total += perf_event_read(event);
89334 *enabled += event->total_time_enabled +
89335- atomic64_read(&event->child_total_time_enabled);
89336+ atomic64_read_unchecked(&event->child_total_time_enabled);
89337 *running += event->total_time_running +
89338- atomic64_read(&event->child_total_time_running);
89339+ atomic64_read_unchecked(&event->child_total_time_running);
89340
89341 list_for_each_entry(child, &event->child_list, child_list) {
89342 total += perf_event_read(child);
89343@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
89344 userpg->offset -= local64_read(&event->hw.prev_count);
89345
89346 userpg->time_enabled = enabled +
89347- atomic64_read(&event->child_total_time_enabled);
89348+ atomic64_read_unchecked(&event->child_total_time_enabled);
89349
89350 userpg->time_running = running +
89351- atomic64_read(&event->child_total_time_running);
89352+ atomic64_read_unchecked(&event->child_total_time_running);
89353
89354 arch_perf_update_userpage(event, userpg, now);
89355
89356@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
89357
89358 /* Data. */
89359 sp = perf_user_stack_pointer(regs);
89360- rem = __output_copy_user(handle, (void *) sp, dump_size);
89361+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
89362 dyn_size = dump_size - rem;
89363
89364 perf_output_skip(handle, rem);
89365@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
89366 values[n++] = perf_event_count(event);
89367 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
89368 values[n++] = enabled +
89369- atomic64_read(&event->child_total_time_enabled);
89370+ atomic64_read_unchecked(&event->child_total_time_enabled);
89371 }
89372 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89373 values[n++] = running +
89374- atomic64_read(&event->child_total_time_running);
89375+ atomic64_read_unchecked(&event->child_total_time_running);
89376 }
89377 if (read_format & PERF_FORMAT_ID)
89378 values[n++] = primary_event_id(event);
89379@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89380 event->parent = parent_event;
89381
89382 event->ns = get_pid_ns(task_active_pid_ns(current));
89383- event->id = atomic64_inc_return(&perf_event_id);
89384+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89385
89386 event->state = PERF_EVENT_STATE_INACTIVE;
89387
89388@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
89389 if (flags & ~PERF_FLAG_ALL)
89390 return -EINVAL;
89391
89392+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89393+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89394+ return -EACCES;
89395+#endif
89396+
89397 err = perf_copy_attr(attr_uptr, &attr);
89398 if (err)
89399 return err;
89400@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
89401 /*
89402 * Add back the child's count to the parent's count:
89403 */
89404- atomic64_add(child_val, &parent_event->child_count);
89405- atomic64_add(child_event->total_time_enabled,
89406+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89407+ atomic64_add_unchecked(child_event->total_time_enabled,
89408 &parent_event->child_total_time_enabled);
89409- atomic64_add(child_event->total_time_running,
89410+ atomic64_add_unchecked(child_event->total_time_running,
89411 &parent_event->child_total_time_running);
89412
89413 /*
89414diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89415index 569b2187..19940d9 100644
89416--- a/kernel/events/internal.h
89417+++ b/kernel/events/internal.h
89418@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89419 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89420 }
89421
89422-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89423+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89424 static inline unsigned long \
89425 func_name(struct perf_output_handle *handle, \
89426- const void *buf, unsigned long len) \
89427+ const void user *buf, unsigned long len) \
89428 { \
89429 unsigned long size, written; \
89430 \
89431@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89432 return 0;
89433 }
89434
89435-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89436+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89437
89438 static inline unsigned long
89439 memcpy_skip(void *dst, const void *src, unsigned long n)
89440@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89441 return 0;
89442 }
89443
89444-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89445+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89446
89447 #ifndef arch_perf_out_copy_user
89448 #define arch_perf_out_copy_user arch_perf_out_copy_user
89449@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89450 }
89451 #endif
89452
89453-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89454+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89455
89456 /* Callchain handling */
89457 extern struct perf_callchain_entry *
89458diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89459index cb346f2..e4dc317 100644
89460--- a/kernel/events/uprobes.c
89461+++ b/kernel/events/uprobes.c
89462@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89463 {
89464 struct page *page;
89465 uprobe_opcode_t opcode;
89466- int result;
89467+ long result;
89468
89469 pagefault_disable();
89470 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89471diff --git a/kernel/exit.c b/kernel/exit.c
89472index feff10b..f623dd5 100644
89473--- a/kernel/exit.c
89474+++ b/kernel/exit.c
89475@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
89476 struct task_struct *leader;
89477 int zap_leader;
89478 repeat:
89479+#ifdef CONFIG_NET
89480+ gr_del_task_from_ip_table(p);
89481+#endif
89482+
89483 /* don't need to get the RCU readlock here - the process is dead and
89484 * can't be modifying its own credentials. But shut RCU-lockdep up */
89485 rcu_read_lock();
89486@@ -656,6 +660,8 @@ void do_exit(long code)
89487 int group_dead;
89488 TASKS_RCU(int tasks_rcu_i);
89489
89490+ set_fs(USER_DS);
89491+
89492 profile_task_exit(tsk);
89493
89494 WARN_ON(blk_needs_flush_plug(tsk));
89495@@ -672,7 +678,6 @@ void do_exit(long code)
89496 * mm_release()->clear_child_tid() from writing to a user-controlled
89497 * kernel address.
89498 */
89499- set_fs(USER_DS);
89500
89501 ptrace_event(PTRACE_EVENT_EXIT, code);
89502
89503@@ -730,6 +735,9 @@ void do_exit(long code)
89504 tsk->exit_code = code;
89505 taskstats_exit(tsk, group_dead);
89506
89507+ gr_acl_handle_psacct(tsk, code);
89508+ gr_acl_handle_exit();
89509+
89510 exit_mm(tsk);
89511
89512 if (group_dead)
89513@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89514 * Take down every thread in the group. This is called by fatal signals
89515 * as well as by sys_exit_group (below).
89516 */
89517-void
89518+__noreturn void
89519 do_group_exit(int exit_code)
89520 {
89521 struct signal_struct *sig = current->signal;
89522diff --git a/kernel/fork.c b/kernel/fork.c
89523index cf65139..704476e 100644
89524--- a/kernel/fork.c
89525+++ b/kernel/fork.c
89526@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
89527 void thread_info_cache_init(void)
89528 {
89529 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
89530- THREAD_SIZE, 0, NULL);
89531+ THREAD_SIZE, SLAB_USERCOPY, NULL);
89532 BUG_ON(thread_info_cache == NULL);
89533 }
89534 # endif
89535 #endif
89536
89537+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89538+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89539+ int node, void **lowmem_stack)
89540+{
89541+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89542+ void *ret = NULL;
89543+ unsigned int i;
89544+
89545+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89546+ if (*lowmem_stack == NULL)
89547+ goto out;
89548+
89549+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89550+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89551+
89552+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89553+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89554+ if (ret == NULL) {
89555+ free_thread_info(*lowmem_stack);
89556+ *lowmem_stack = NULL;
89557+ }
89558+
89559+out:
89560+ return ret;
89561+}
89562+
89563+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89564+{
89565+ unmap_process_stacks(tsk);
89566+}
89567+#else
89568+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89569+ int node, void **lowmem_stack)
89570+{
89571+ return alloc_thread_info_node(tsk, node);
89572+}
89573+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89574+{
89575+ free_thread_info(ti);
89576+}
89577+#endif
89578+
89579 /* SLAB cache for signal_struct structures (tsk->signal) */
89580 static struct kmem_cache *signal_cachep;
89581
89582@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89583 /* SLAB cache for mm_struct structures (tsk->mm) */
89584 static struct kmem_cache *mm_cachep;
89585
89586-static void account_kernel_stack(struct thread_info *ti, int account)
89587+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89588 {
89589+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89590+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89591+#else
89592 struct zone *zone = page_zone(virt_to_page(ti));
89593+#endif
89594
89595 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89596 }
89597
89598 void free_task(struct task_struct *tsk)
89599 {
89600- account_kernel_stack(tsk->stack, -1);
89601+ account_kernel_stack(tsk, tsk->stack, -1);
89602 arch_release_thread_info(tsk->stack);
89603- free_thread_info(tsk->stack);
89604+ gr_free_thread_info(tsk, tsk->stack);
89605 rt_mutex_debug_task_free(tsk);
89606 ftrace_graph_exit_task(tsk);
89607 put_seccomp_filter(tsk);
89608@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89609 {
89610 struct task_struct *tsk;
89611 struct thread_info *ti;
89612+ void *lowmem_stack;
89613 int node = tsk_fork_get_node(orig);
89614 int err;
89615
89616@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89617 if (!tsk)
89618 return NULL;
89619
89620- ti = alloc_thread_info_node(tsk, node);
89621+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89622 if (!ti)
89623 goto free_tsk;
89624
89625@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89626 goto free_ti;
89627
89628 tsk->stack = ti;
89629+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89630+ tsk->lowmem_stack = lowmem_stack;
89631+#endif
89632 #ifdef CONFIG_SECCOMP
89633 /*
89634 * We must handle setting up seccomp filters once we're under
89635@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89636 set_task_stack_end_magic(tsk);
89637
89638 #ifdef CONFIG_CC_STACKPROTECTOR
89639- tsk->stack_canary = get_random_int();
89640+ tsk->stack_canary = pax_get_random_long();
89641 #endif
89642
89643 /*
89644@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89645 tsk->splice_pipe = NULL;
89646 tsk->task_frag.page = NULL;
89647
89648- account_kernel_stack(ti, 1);
89649+ account_kernel_stack(tsk, ti, 1);
89650
89651 return tsk;
89652
89653 free_ti:
89654- free_thread_info(ti);
89655+ gr_free_thread_info(tsk, ti);
89656 free_tsk:
89657 free_task_struct(tsk);
89658 return NULL;
89659 }
89660
89661 #ifdef CONFIG_MMU
89662-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89663+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89664+{
89665+ struct vm_area_struct *tmp;
89666+ unsigned long charge;
89667+ struct file *file;
89668+ int retval;
89669+
89670+ charge = 0;
89671+ if (mpnt->vm_flags & VM_ACCOUNT) {
89672+ unsigned long len = vma_pages(mpnt);
89673+
89674+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89675+ goto fail_nomem;
89676+ charge = len;
89677+ }
89678+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89679+ if (!tmp)
89680+ goto fail_nomem;
89681+ *tmp = *mpnt;
89682+ tmp->vm_mm = mm;
89683+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89684+ retval = vma_dup_policy(mpnt, tmp);
89685+ if (retval)
89686+ goto fail_nomem_policy;
89687+ if (anon_vma_fork(tmp, mpnt))
89688+ goto fail_nomem_anon_vma_fork;
89689+ tmp->vm_flags &= ~VM_LOCKED;
89690+ tmp->vm_next = tmp->vm_prev = NULL;
89691+ tmp->vm_mirror = NULL;
89692+ file = tmp->vm_file;
89693+ if (file) {
89694+ struct inode *inode = file_inode(file);
89695+ struct address_space *mapping = file->f_mapping;
89696+
89697+ get_file(file);
89698+ if (tmp->vm_flags & VM_DENYWRITE)
89699+ atomic_dec(&inode->i_writecount);
89700+ i_mmap_lock_write(mapping);
89701+ if (tmp->vm_flags & VM_SHARED)
89702+ atomic_inc(&mapping->i_mmap_writable);
89703+ flush_dcache_mmap_lock(mapping);
89704+ /* insert tmp into the share list, just after mpnt */
89705+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89706+ flush_dcache_mmap_unlock(mapping);
89707+ i_mmap_unlock_write(mapping);
89708+ }
89709+
89710+ /*
89711+ * Clear hugetlb-related page reserves for children. This only
89712+ * affects MAP_PRIVATE mappings. Faults generated by the child
89713+ * are not guaranteed to succeed, even if read-only
89714+ */
89715+ if (is_vm_hugetlb_page(tmp))
89716+ reset_vma_resv_huge_pages(tmp);
89717+
89718+ return tmp;
89719+
89720+fail_nomem_anon_vma_fork:
89721+ mpol_put(vma_policy(tmp));
89722+fail_nomem_policy:
89723+ kmem_cache_free(vm_area_cachep, tmp);
89724+fail_nomem:
89725+ vm_unacct_memory(charge);
89726+ return NULL;
89727+}
89728+
89729+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89730 {
89731 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89732 struct rb_node **rb_link, *rb_parent;
89733 int retval;
89734- unsigned long charge;
89735
89736 uprobe_start_dup_mmap();
89737 down_write(&oldmm->mmap_sem);
89738@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89739
89740 prev = NULL;
89741 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89742- struct file *file;
89743-
89744 if (mpnt->vm_flags & VM_DONTCOPY) {
89745 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89746 -vma_pages(mpnt));
89747 continue;
89748 }
89749- charge = 0;
89750- if (mpnt->vm_flags & VM_ACCOUNT) {
89751- unsigned long len = vma_pages(mpnt);
89752-
89753- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89754- goto fail_nomem;
89755- charge = len;
89756- }
89757- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89758- if (!tmp)
89759- goto fail_nomem;
89760- *tmp = *mpnt;
89761- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89762- retval = vma_dup_policy(mpnt, tmp);
89763- if (retval)
89764- goto fail_nomem_policy;
89765- tmp->vm_mm = mm;
89766- if (anon_vma_fork(tmp, mpnt))
89767- goto fail_nomem_anon_vma_fork;
89768- tmp->vm_flags &= ~VM_LOCKED;
89769- tmp->vm_next = tmp->vm_prev = NULL;
89770- file = tmp->vm_file;
89771- if (file) {
89772- struct inode *inode = file_inode(file);
89773- struct address_space *mapping = file->f_mapping;
89774-
89775- get_file(file);
89776- if (tmp->vm_flags & VM_DENYWRITE)
89777- atomic_dec(&inode->i_writecount);
89778- i_mmap_lock_write(mapping);
89779- if (tmp->vm_flags & VM_SHARED)
89780- atomic_inc(&mapping->i_mmap_writable);
89781- flush_dcache_mmap_lock(mapping);
89782- /* insert tmp into the share list, just after mpnt */
89783- vma_interval_tree_insert_after(tmp, mpnt,
89784- &mapping->i_mmap);
89785- flush_dcache_mmap_unlock(mapping);
89786- i_mmap_unlock_write(mapping);
89787+ tmp = dup_vma(mm, oldmm, mpnt);
89788+ if (!tmp) {
89789+ retval = -ENOMEM;
89790+ goto out;
89791 }
89792
89793 /*
89794@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89795 if (retval)
89796 goto out;
89797 }
89798+
89799+#ifdef CONFIG_PAX_SEGMEXEC
89800+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89801+ struct vm_area_struct *mpnt_m;
89802+
89803+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89804+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89805+
89806+ if (!mpnt->vm_mirror)
89807+ continue;
89808+
89809+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89810+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89811+ mpnt->vm_mirror = mpnt_m;
89812+ } else {
89813+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89814+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89815+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89816+ mpnt->vm_mirror->vm_mirror = mpnt;
89817+ }
89818+ }
89819+ BUG_ON(mpnt_m);
89820+ }
89821+#endif
89822+
89823 /* a new mm has just been created */
89824 arch_dup_mmap(oldmm, mm);
89825 retval = 0;
89826@@ -482,14 +586,6 @@ out:
89827 up_write(&oldmm->mmap_sem);
89828 uprobe_end_dup_mmap();
89829 return retval;
89830-fail_nomem_anon_vma_fork:
89831- mpol_put(vma_policy(tmp));
89832-fail_nomem_policy:
89833- kmem_cache_free(vm_area_cachep, tmp);
89834-fail_nomem:
89835- retval = -ENOMEM;
89836- vm_unacct_memory(charge);
89837- goto out;
89838 }
89839
89840 static inline int mm_alloc_pgd(struct mm_struct *mm)
89841@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89842 return ERR_PTR(err);
89843
89844 mm = get_task_mm(task);
89845- if (mm && mm != current->mm &&
89846- !ptrace_may_access(task, mode)) {
89847+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89848+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89849 mmput(mm);
89850 mm = ERR_PTR(-EACCES);
89851 }
89852@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89853 spin_unlock(&fs->lock);
89854 return -EAGAIN;
89855 }
89856- fs->users++;
89857+ atomic_inc(&fs->users);
89858 spin_unlock(&fs->lock);
89859 return 0;
89860 }
89861 tsk->fs = copy_fs_struct(fs);
89862 if (!tsk->fs)
89863 return -ENOMEM;
89864+ /* Carry through gr_chroot_dentry and is_chrooted instead
89865+ of recomputing it here. Already copied when the task struct
89866+ is duplicated. This allows pivot_root to not be treated as
89867+ a chroot
89868+ */
89869+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89870+
89871 return 0;
89872 }
89873
89874@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89875 * parts of the process environment (as per the clone
89876 * flags). The actual kick-off is left to the caller.
89877 */
89878-static struct task_struct *copy_process(unsigned long clone_flags,
89879+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89880 unsigned long stack_start,
89881 unsigned long stack_size,
89882 int __user *child_tidptr,
89883@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89884 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89885 #endif
89886 retval = -EAGAIN;
89887+
89888+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89889+
89890 if (atomic_read(&p->real_cred->user->processes) >=
89891 task_rlimit(p, RLIMIT_NPROC)) {
89892 if (p->real_cred->user != INIT_USER &&
89893@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89894 goto bad_fork_free_pid;
89895 }
89896
89897+ /* synchronizes with gr_set_acls()
89898+ we need to call this past the point of no return for fork()
89899+ */
89900+ gr_copy_label(p);
89901+
89902 if (likely(p->pid)) {
89903 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89904
89905@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
89906 bad_fork_free:
89907 free_task(p);
89908 fork_out:
89909+ gr_log_forkfail(retval);
89910+
89911 return ERR_PTR(retval);
89912 }
89913
89914@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
89915
89916 p = copy_process(clone_flags, stack_start, stack_size,
89917 child_tidptr, NULL, trace);
89918+ add_latent_entropy();
89919 /*
89920 * Do this prior waking up the new thread - the thread pointer
89921 * might get invalid after that point, if the thread exits quickly.
89922@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
89923 if (clone_flags & CLONE_PARENT_SETTID)
89924 put_user(nr, parent_tidptr);
89925
89926+ gr_handle_brute_check();
89927+
89928 if (clone_flags & CLONE_VFORK) {
89929 p->vfork_done = &vfork;
89930 init_completion(&vfork);
89931@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
89932 mm_cachep = kmem_cache_create("mm_struct",
89933 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89934 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89935- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89936+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89937 mmap_init();
89938 nsproxy_cache_init();
89939 }
89940@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89941 return 0;
89942
89943 /* don't need lock here; in the worst case we'll do useless copy */
89944- if (fs->users == 1)
89945+ if (atomic_read(&fs->users) == 1)
89946 return 0;
89947
89948 *new_fsp = copy_fs_struct(fs);
89949@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89950 fs = current->fs;
89951 spin_lock(&fs->lock);
89952 current->fs = new_fs;
89953- if (--fs->users)
89954+ gr_set_chroot_entries(current, &current->fs->root);
89955+ if (atomic_dec_return(&fs->users))
89956 new_fs = NULL;
89957 else
89958 new_fs = fs;
89959diff --git a/kernel/futex.c b/kernel/futex.c
89960index 2a5e383..878bac6 100644
89961--- a/kernel/futex.c
89962+++ b/kernel/futex.c
89963@@ -201,7 +201,7 @@ struct futex_pi_state {
89964 atomic_t refcount;
89965
89966 union futex_key key;
89967-};
89968+} __randomize_layout;
89969
89970 /**
89971 * struct futex_q - The hashed futex queue entry, one per waiting task
89972@@ -235,7 +235,7 @@ struct futex_q {
89973 struct rt_mutex_waiter *rt_waiter;
89974 union futex_key *requeue_pi_key;
89975 u32 bitset;
89976-};
89977+} __randomize_layout;
89978
89979 static const struct futex_q futex_q_init = {
89980 /* list gets initialized in queue_me()*/
89981@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89982 struct page *page, *page_head;
89983 int err, ro = 0;
89984
89985+#ifdef CONFIG_PAX_SEGMEXEC
89986+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89987+ return -EFAULT;
89988+#endif
89989+
89990 /*
89991 * The futex address must be "naturally" aligned.
89992 */
89993@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89994
89995 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89996 {
89997- int ret;
89998+ unsigned long ret;
89999
90000 pagefault_disable();
90001 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
90002@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
90003 {
90004 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
90005 u32 curval;
90006+ mm_segment_t oldfs;
90007
90008 /*
90009 * This will fail and we want it. Some arch implementations do
90010@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
90011 * implementation, the non-functional ones will return
90012 * -ENOSYS.
90013 */
90014+ oldfs = get_fs();
90015+ set_fs(USER_DS);
90016 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
90017 futex_cmpxchg_enabled = 1;
90018+ set_fs(oldfs);
90019 #endif
90020 }
90021
90022diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90023index 55c8c93..9ba7ad6 100644
90024--- a/kernel/futex_compat.c
90025+++ b/kernel/futex_compat.c
90026@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
90027 return 0;
90028 }
90029
90030-static void __user *futex_uaddr(struct robust_list __user *entry,
90031+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
90032 compat_long_t futex_offset)
90033 {
90034 compat_uptr_t base = ptr_to_compat(entry);
90035diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90036index b358a80..fc25240 100644
90037--- a/kernel/gcov/base.c
90038+++ b/kernel/gcov/base.c
90039@@ -114,11 +114,6 @@ void gcov_enable_events(void)
90040 }
90041
90042 #ifdef CONFIG_MODULES
90043-static inline int within(void *addr, void *start, unsigned long size)
90044-{
90045- return ((addr >= start) && (addr < start + size));
90046-}
90047-
90048 /* Update list and generate events when modules are unloaded. */
90049 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90050 void *data)
90051@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90052
90053 /* Remove entries located in module from linked list. */
90054 while ((info = gcov_info_next(info))) {
90055- if (within(info, mod->module_core, mod->core_size)) {
90056+ if (within_module_core_rw((unsigned long)info, mod)) {
90057 gcov_info_unlink(prev, info);
90058 if (gcov_events_enabled)
90059 gcov_event(GCOV_REMOVE, info);
90060diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
90061index 886d09e..c7ff4e5 100644
90062--- a/kernel/irq/manage.c
90063+++ b/kernel/irq/manage.c
90064@@ -874,7 +874,7 @@ static int irq_thread(void *data)
90065
90066 action_ret = handler_fn(desc, action);
90067 if (action_ret == IRQ_HANDLED)
90068- atomic_inc(&desc->threads_handled);
90069+ atomic_inc_unchecked(&desc->threads_handled);
90070
90071 wake_threads_waitq(desc);
90072 }
90073diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
90074index e2514b0..de3dfe0 100644
90075--- a/kernel/irq/spurious.c
90076+++ b/kernel/irq/spurious.c
90077@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
90078 * count. We just care about the count being
90079 * different than the one we saw before.
90080 */
90081- handled = atomic_read(&desc->threads_handled);
90082+ handled = atomic_read_unchecked(&desc->threads_handled);
90083 handled |= SPURIOUS_DEFERRED;
90084 if (handled != desc->threads_handled_last) {
90085 action_ret = IRQ_HANDLED;
90086diff --git a/kernel/jump_label.c b/kernel/jump_label.c
90087index 9019f15..9a3c42e 100644
90088--- a/kernel/jump_label.c
90089+++ b/kernel/jump_label.c
90090@@ -14,6 +14,7 @@
90091 #include <linux/err.h>
90092 #include <linux/static_key.h>
90093 #include <linux/jump_label_ratelimit.h>
90094+#include <linux/mm.h>
90095
90096 #ifdef HAVE_JUMP_LABEL
90097
90098@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
90099
90100 size = (((unsigned long)stop - (unsigned long)start)
90101 / sizeof(struct jump_entry));
90102+ pax_open_kernel();
90103 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
90104+ pax_close_kernel();
90105 }
90106
90107 static void jump_label_update(struct static_key *key, int enable);
90108@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
90109 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
90110 struct jump_entry *iter;
90111
90112+ pax_open_kernel();
90113 for (iter = iter_start; iter < iter_stop; iter++) {
90114 if (within_module_init(iter->code, mod))
90115 iter->code = 0;
90116 }
90117+ pax_close_kernel();
90118 }
90119
90120 static int
90121diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
90122index 5c5987f..bc502b0 100644
90123--- a/kernel/kallsyms.c
90124+++ b/kernel/kallsyms.c
90125@@ -11,6 +11,9 @@
90126 * Changed the compression method from stem compression to "table lookup"
90127 * compression (see scripts/kallsyms.c for a more complete description)
90128 */
90129+#ifdef CONFIG_GRKERNSEC_HIDESYM
90130+#define __INCLUDED_BY_HIDESYM 1
90131+#endif
90132 #include <linux/kallsyms.h>
90133 #include <linux/module.h>
90134 #include <linux/init.h>
90135@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
90136
90137 static inline int is_kernel_inittext(unsigned long addr)
90138 {
90139+ if (system_state != SYSTEM_BOOTING)
90140+ return 0;
90141+
90142 if (addr >= (unsigned long)_sinittext
90143 && addr <= (unsigned long)_einittext)
90144 return 1;
90145 return 0;
90146 }
90147
90148+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90149+#ifdef CONFIG_MODULES
90150+static inline int is_module_text(unsigned long addr)
90151+{
90152+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
90153+ return 1;
90154+
90155+ addr = ktla_ktva(addr);
90156+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
90157+}
90158+#else
90159+static inline int is_module_text(unsigned long addr)
90160+{
90161+ return 0;
90162+}
90163+#endif
90164+#endif
90165+
90166 static inline int is_kernel_text(unsigned long addr)
90167 {
90168 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
90169@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
90170
90171 static inline int is_kernel(unsigned long addr)
90172 {
90173+
90174+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90175+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
90176+ return 1;
90177+
90178+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
90179+#else
90180 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
90181+#endif
90182+
90183 return 1;
90184 return in_gate_area_no_mm(addr);
90185 }
90186
90187 static int is_ksym_addr(unsigned long addr)
90188 {
90189+
90190+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90191+ if (is_module_text(addr))
90192+ return 0;
90193+#endif
90194+
90195 if (all_var)
90196 return is_kernel(addr);
90197
90198@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
90199
90200 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
90201 {
90202- iter->name[0] = '\0';
90203 iter->nameoff = get_symbol_offset(new_pos);
90204 iter->pos = new_pos;
90205 }
90206@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
90207 {
90208 struct kallsym_iter *iter = m->private;
90209
90210+#ifdef CONFIG_GRKERNSEC_HIDESYM
90211+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
90212+ return 0;
90213+#endif
90214+
90215 /* Some debugging symbols have no name. Ignore them. */
90216 if (!iter->name[0])
90217 return 0;
90218@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
90219 */
90220 type = iter->exported ? toupper(iter->type) :
90221 tolower(iter->type);
90222+
90223 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
90224 type, iter->name, iter->module_name);
90225 } else
90226diff --git a/kernel/kcmp.c b/kernel/kcmp.c
90227index 0aa69ea..a7fcafb 100644
90228--- a/kernel/kcmp.c
90229+++ b/kernel/kcmp.c
90230@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
90231 struct task_struct *task1, *task2;
90232 int ret;
90233
90234+#ifdef CONFIG_GRKERNSEC
90235+ return -ENOSYS;
90236+#endif
90237+
90238 rcu_read_lock();
90239
90240 /*
90241diff --git a/kernel/kexec.c b/kernel/kexec.c
90242index 38c25b1..12b3f69 100644
90243--- a/kernel/kexec.c
90244+++ b/kernel/kexec.c
90245@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
90246 compat_ulong_t, flags)
90247 {
90248 struct compat_kexec_segment in;
90249- struct kexec_segment out, __user *ksegments;
90250+ struct kexec_segment out;
90251+ struct kexec_segment __user *ksegments;
90252 unsigned long i, result;
90253
90254 /* Don't allow clients that don't understand the native
90255diff --git a/kernel/kmod.c b/kernel/kmod.c
90256index 2777f40..a689506 100644
90257--- a/kernel/kmod.c
90258+++ b/kernel/kmod.c
90259@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
90260 kfree(info->argv);
90261 }
90262
90263-static int call_modprobe(char *module_name, int wait)
90264+static int call_modprobe(char *module_name, char *module_param, int wait)
90265 {
90266 struct subprocess_info *info;
90267 static char *envp[] = {
90268@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
90269 NULL
90270 };
90271
90272- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
90273+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
90274 if (!argv)
90275 goto out;
90276
90277@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
90278 argv[1] = "-q";
90279 argv[2] = "--";
90280 argv[3] = module_name; /* check free_modprobe_argv() */
90281- argv[4] = NULL;
90282+ argv[4] = module_param;
90283+ argv[5] = NULL;
90284
90285 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
90286 NULL, free_modprobe_argv, NULL);
90287@@ -122,9 +123,8 @@ out:
90288 * If module auto-loading support is disabled then this function
90289 * becomes a no-operation.
90290 */
90291-int __request_module(bool wait, const char *fmt, ...)
90292+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
90293 {
90294- va_list args;
90295 char module_name[MODULE_NAME_LEN];
90296 unsigned int max_modprobes;
90297 int ret;
90298@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
90299 if (!modprobe_path[0])
90300 return 0;
90301
90302- va_start(args, fmt);
90303- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
90304- va_end(args);
90305+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
90306 if (ret >= MODULE_NAME_LEN)
90307 return -ENAMETOOLONG;
90308
90309@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
90310 if (ret)
90311 return ret;
90312
90313+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90314+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90315+ /* hack to workaround consolekit/udisks stupidity */
90316+ read_lock(&tasklist_lock);
90317+ if (!strcmp(current->comm, "mount") &&
90318+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
90319+ read_unlock(&tasklist_lock);
90320+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
90321+ return -EPERM;
90322+ }
90323+ read_unlock(&tasklist_lock);
90324+ }
90325+#endif
90326+
90327 /* If modprobe needs a service that is in a module, we get a recursive
90328 * loop. Limit the number of running kmod threads to max_threads/2 or
90329 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
90330@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
90331
90332 trace_module_request(module_name, wait, _RET_IP_);
90333
90334- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90335+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90336
90337 atomic_dec(&kmod_concurrent);
90338 return ret;
90339 }
90340+
90341+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
90342+{
90343+ va_list args;
90344+ int ret;
90345+
90346+ va_start(args, fmt);
90347+ ret = ____request_module(wait, module_param, fmt, args);
90348+ va_end(args);
90349+
90350+ return ret;
90351+}
90352+
90353+int __request_module(bool wait, const char *fmt, ...)
90354+{
90355+ va_list args;
90356+ int ret;
90357+
90358+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90359+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90360+ char module_param[MODULE_NAME_LEN];
90361+
90362+ memset(module_param, 0, sizeof(module_param));
90363+
90364+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
90365+
90366+ va_start(args, fmt);
90367+ ret = ____request_module(wait, module_param, fmt, args);
90368+ va_end(args);
90369+
90370+ return ret;
90371+ }
90372+#endif
90373+
90374+ va_start(args, fmt);
90375+ ret = ____request_module(wait, NULL, fmt, args);
90376+ va_end(args);
90377+
90378+ return ret;
90379+}
90380+
90381 EXPORT_SYMBOL(__request_module);
90382 #endif /* CONFIG_MODULES */
90383
90384 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90385 {
90386+#ifdef CONFIG_GRKERNSEC
90387+ kfree(info->path);
90388+ info->path = info->origpath;
90389+#endif
90390 if (info->cleanup)
90391 (*info->cleanup)(info);
90392 kfree(info);
90393@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
90394 */
90395 set_user_nice(current, 0);
90396
90397+#ifdef CONFIG_GRKERNSEC
90398+ /* this is race-free as far as userland is concerned as we copied
90399+ out the path to be used prior to this point and are now operating
90400+ on that copy
90401+ */
90402+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90403+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90404+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
90405+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90406+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
90407+ retval = -EPERM;
90408+ goto out;
90409+ }
90410+#endif
90411+
90412 retval = -ENOMEM;
90413 new = prepare_kernel_cred(current);
90414 if (!new)
90415@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
90416 commit_creds(new);
90417
90418 retval = do_execve(getname_kernel(sub_info->path),
90419- (const char __user *const __user *)sub_info->argv,
90420- (const char __user *const __user *)sub_info->envp);
90421+ (const char __user *const __force_user *)sub_info->argv,
90422+ (const char __user *const __force_user *)sub_info->envp);
90423 out:
90424 sub_info->retval = retval;
90425 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
90426@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
90427 *
90428 * Thus the __user pointer cast is valid here.
90429 */
90430- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90431+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90432
90433 /*
90434 * If ret is 0, either ____call_usermodehelper failed and the
90435@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90436 goto out;
90437
90438 INIT_WORK(&sub_info->work, __call_usermodehelper);
90439+#ifdef CONFIG_GRKERNSEC
90440+ sub_info->origpath = path;
90441+ sub_info->path = kstrdup(path, gfp_mask);
90442+#else
90443 sub_info->path = path;
90444+#endif
90445 sub_info->argv = argv;
90446 sub_info->envp = envp;
90447
90448@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90449 static int proc_cap_handler(struct ctl_table *table, int write,
90450 void __user *buffer, size_t *lenp, loff_t *ppos)
90451 {
90452- struct ctl_table t;
90453+ ctl_table_no_const t;
90454 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90455 kernel_cap_t new_cap;
90456 int err, i;
90457diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90458index c90e417..e6c515d 100644
90459--- a/kernel/kprobes.c
90460+++ b/kernel/kprobes.c
90461@@ -31,6 +31,9 @@
90462 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90463 * <prasanna@in.ibm.com> added function-return probes.
90464 */
90465+#ifdef CONFIG_GRKERNSEC_HIDESYM
90466+#define __INCLUDED_BY_HIDESYM 1
90467+#endif
90468 #include <linux/kprobes.h>
90469 #include <linux/hash.h>
90470 #include <linux/init.h>
90471@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90472
90473 static void *alloc_insn_page(void)
90474 {
90475- return module_alloc(PAGE_SIZE);
90476+ return module_alloc_exec(PAGE_SIZE);
90477 }
90478
90479 static void free_insn_page(void *page)
90480 {
90481- module_memfree(page);
90482+ module_memfree_exec(page);
90483 }
90484
90485 struct kprobe_insn_cache kprobe_insn_slots = {
90486@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90487 kprobe_type = "k";
90488
90489 if (sym)
90490- seq_printf(pi, "%p %s %s+0x%x %s ",
90491+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90492 p->addr, kprobe_type, sym, offset,
90493 (modname ? modname : " "));
90494 else
90495- seq_printf(pi, "%p %s %p ",
90496+ seq_printf(pi, "%pK %s %pK ",
90497 p->addr, kprobe_type, p->addr);
90498
90499 if (!pp)
90500diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90501index 6683cce..daf8999 100644
90502--- a/kernel/ksysfs.c
90503+++ b/kernel/ksysfs.c
90504@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90505 {
90506 if (count+1 > UEVENT_HELPER_PATH_LEN)
90507 return -ENOENT;
90508+ if (!capable(CAP_SYS_ADMIN))
90509+ return -EPERM;
90510 memcpy(uevent_helper, buf, count);
90511 uevent_helper[count] = '\0';
90512 if (count && uevent_helper[count-1] == '\n')
90513@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90514 return count;
90515 }
90516
90517-static struct bin_attribute notes_attr = {
90518+static bin_attribute_no_const notes_attr __read_only = {
90519 .attr = {
90520 .name = "notes",
90521 .mode = S_IRUGO,
90522diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90523index ba77ab5..d6a3e20 100644
90524--- a/kernel/locking/lockdep.c
90525+++ b/kernel/locking/lockdep.c
90526@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90527 end = (unsigned long) &_end,
90528 addr = (unsigned long) obj;
90529
90530+#ifdef CONFIG_PAX_KERNEXEC
90531+ start = ktla_ktva(start);
90532+#endif
90533+
90534 /*
90535 * static variable?
90536 */
90537@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90538 if (!static_obj(lock->key)) {
90539 debug_locks_off();
90540 printk("INFO: trying to register non-static key.\n");
90541+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90542 printk("the code is fine but needs lockdep annotation.\n");
90543 printk("turning off the locking correctness validator.\n");
90544 dump_stack();
90545@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90546 if (!class)
90547 return 0;
90548 }
90549- atomic_inc((atomic_t *)&class->ops);
90550+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90551 if (very_verbose(class)) {
90552 printk("\nacquire class [%p] %s", class->key, class->name);
90553 if (class->name_version > 1)
90554diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90555index ef43ac4..2720dfa 100644
90556--- a/kernel/locking/lockdep_proc.c
90557+++ b/kernel/locking/lockdep_proc.c
90558@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90559 return 0;
90560 }
90561
90562- seq_printf(m, "%p", class->key);
90563+ seq_printf(m, "%pK", class->key);
90564 #ifdef CONFIG_DEBUG_LOCKDEP
90565 seq_printf(m, " OPS:%8ld", class->ops);
90566 #endif
90567@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90568
90569 list_for_each_entry(entry, &class->locks_after, entry) {
90570 if (entry->distance == 1) {
90571- seq_printf(m, " -> [%p] ", entry->class->key);
90572+ seq_printf(m, " -> [%pK] ", entry->class->key);
90573 print_name(m, entry->class);
90574 seq_puts(m, "\n");
90575 }
90576@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90577 if (!class->key)
90578 continue;
90579
90580- seq_printf(m, "[%p] ", class->key);
90581+ seq_printf(m, "[%pK] ", class->key);
90582 print_name(m, class);
90583 seq_puts(m, "\n");
90584 }
90585@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90586 if (!i)
90587 seq_line(m, '-', 40-namelen, namelen);
90588
90589- snprintf(ip, sizeof(ip), "[<%p>]",
90590+ snprintf(ip, sizeof(ip), "[<%pK>]",
90591 (void *)class->contention_point[i]);
90592 seq_printf(m, "%40s %14lu %29s %pS\n",
90593 name, stats->contention_point[i],
90594@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90595 if (!i)
90596 seq_line(m, '-', 40-namelen, namelen);
90597
90598- snprintf(ip, sizeof(ip), "[<%p>]",
90599+ snprintf(ip, sizeof(ip), "[<%pK>]",
90600 (void *)class->contending_point[i]);
90601 seq_printf(m, "%40s %14lu %29s %pS\n",
90602 name, stats->contending_point[i],
90603diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90604index d1fe2ba..180cd65e 100644
90605--- a/kernel/locking/mcs_spinlock.h
90606+++ b/kernel/locking/mcs_spinlock.h
90607@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90608 */
90609 return;
90610 }
90611- ACCESS_ONCE(prev->next) = node;
90612+ ACCESS_ONCE_RW(prev->next) = node;
90613
90614 /* Wait until the lock holder passes the lock down. */
90615 arch_mcs_spin_lock_contended(&node->locked);
90616diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90617index 3ef3736..9c951fa 100644
90618--- a/kernel/locking/mutex-debug.c
90619+++ b/kernel/locking/mutex-debug.c
90620@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90621 }
90622
90623 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90624- struct thread_info *ti)
90625+ struct task_struct *task)
90626 {
90627 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90628
90629 /* Mark the current thread as blocked on the lock: */
90630- ti->task->blocked_on = waiter;
90631+ task->blocked_on = waiter;
90632 }
90633
90634 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90635- struct thread_info *ti)
90636+ struct task_struct *task)
90637 {
90638 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90639- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90640- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90641- ti->task->blocked_on = NULL;
90642+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90643+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90644+ task->blocked_on = NULL;
90645
90646 list_del_init(&waiter->list);
90647 waiter->task = NULL;
90648diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90649index 0799fd3..d06ae3b 100644
90650--- a/kernel/locking/mutex-debug.h
90651+++ b/kernel/locking/mutex-debug.h
90652@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90653 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90654 extern void debug_mutex_add_waiter(struct mutex *lock,
90655 struct mutex_waiter *waiter,
90656- struct thread_info *ti);
90657+ struct task_struct *task);
90658 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90659- struct thread_info *ti);
90660+ struct task_struct *task);
90661 extern void debug_mutex_unlock(struct mutex *lock);
90662 extern void debug_mutex_init(struct mutex *lock, const char *name,
90663 struct lock_class_key *key);
90664diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90665index 94674e5..de4966f 100644
90666--- a/kernel/locking/mutex.c
90667+++ b/kernel/locking/mutex.c
90668@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90669 goto skip_wait;
90670
90671 debug_mutex_lock_common(lock, &waiter);
90672- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90673+ debug_mutex_add_waiter(lock, &waiter, task);
90674
90675 /* add waiting tasks to the end of the waitqueue (FIFO): */
90676 list_add_tail(&waiter.list, &lock->wait_list);
90677@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90678 }
90679 __set_task_state(task, TASK_RUNNING);
90680
90681- mutex_remove_waiter(lock, &waiter, current_thread_info());
90682+ mutex_remove_waiter(lock, &waiter, task);
90683 /* set it to 0 if there are no waiters left: */
90684 if (likely(list_empty(&lock->wait_list)))
90685 atomic_set(&lock->count, 0);
90686@@ -610,7 +610,7 @@ skip_wait:
90687 return 0;
90688
90689 err:
90690- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90691+ mutex_remove_waiter(lock, &waiter, task);
90692 spin_unlock_mutex(&lock->wait_lock, flags);
90693 debug_mutex_free_waiter(&waiter);
90694 mutex_release(&lock->dep_map, 1, ip);
90695diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
90696index c112d00..1946ad9 100644
90697--- a/kernel/locking/osq_lock.c
90698+++ b/kernel/locking/osq_lock.c
90699@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90700
90701 prev = decode_cpu(old);
90702 node->prev = prev;
90703- ACCESS_ONCE(prev->next) = node;
90704+ ACCESS_ONCE_RW(prev->next) = node;
90705
90706 /*
90707 * Normally @prev is untouchable after the above store; because at that
90708@@ -170,8 +170,8 @@ unqueue:
90709 * it will wait in Step-A.
90710 */
90711
90712- ACCESS_ONCE(next->prev) = prev;
90713- ACCESS_ONCE(prev->next) = next;
90714+ ACCESS_ONCE_RW(next->prev) = prev;
90715+ ACCESS_ONCE_RW(prev->next) = next;
90716
90717 return false;
90718 }
90719@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90720 node = this_cpu_ptr(&osq_node);
90721 next = xchg(&node->next, NULL);
90722 if (next) {
90723- ACCESS_ONCE(next->locked) = 1;
90724+ ACCESS_ONCE_RW(next->locked) = 1;
90725 return;
90726 }
90727
90728 next = osq_wait_next(lock, node, NULL);
90729 if (next)
90730- ACCESS_ONCE(next->locked) = 1;
90731+ ACCESS_ONCE_RW(next->locked) = 1;
90732 }
90733diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90734index 1d96dd0..994ff19 100644
90735--- a/kernel/locking/rtmutex-tester.c
90736+++ b/kernel/locking/rtmutex-tester.c
90737@@ -22,7 +22,7 @@
90738 #define MAX_RT_TEST_MUTEXES 8
90739
90740 static spinlock_t rttest_lock;
90741-static atomic_t rttest_event;
90742+static atomic_unchecked_t rttest_event;
90743
90744 struct test_thread_data {
90745 int opcode;
90746@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90747
90748 case RTTEST_LOCKCONT:
90749 td->mutexes[td->opdata] = 1;
90750- td->event = atomic_add_return(1, &rttest_event);
90751+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90752 return 0;
90753
90754 case RTTEST_RESET:
90755@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90756 return 0;
90757
90758 case RTTEST_RESETEVENT:
90759- atomic_set(&rttest_event, 0);
90760+ atomic_set_unchecked(&rttest_event, 0);
90761 return 0;
90762
90763 default:
90764@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90765 return ret;
90766
90767 td->mutexes[id] = 1;
90768- td->event = atomic_add_return(1, &rttest_event);
90769+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90770 rt_mutex_lock(&mutexes[id]);
90771- td->event = atomic_add_return(1, &rttest_event);
90772+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90773 td->mutexes[id] = 4;
90774 return 0;
90775
90776@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90777 return ret;
90778
90779 td->mutexes[id] = 1;
90780- td->event = atomic_add_return(1, &rttest_event);
90781+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90782 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90783- td->event = atomic_add_return(1, &rttest_event);
90784+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90785 td->mutexes[id] = ret ? 0 : 4;
90786 return ret ? -EINTR : 0;
90787
90788@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90789 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90790 return ret;
90791
90792- td->event = atomic_add_return(1, &rttest_event);
90793+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90794 rt_mutex_unlock(&mutexes[id]);
90795- td->event = atomic_add_return(1, &rttest_event);
90796+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90797 td->mutexes[id] = 0;
90798 return 0;
90799
90800@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90801 break;
90802
90803 td->mutexes[dat] = 2;
90804- td->event = atomic_add_return(1, &rttest_event);
90805+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90806 break;
90807
90808 default:
90809@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90810 return;
90811
90812 td->mutexes[dat] = 3;
90813- td->event = atomic_add_return(1, &rttest_event);
90814+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90815 break;
90816
90817 case RTTEST_LOCKNOWAIT:
90818@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90819 return;
90820
90821 td->mutexes[dat] = 1;
90822- td->event = atomic_add_return(1, &rttest_event);
90823+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90824 return;
90825
90826 default:
90827diff --git a/kernel/module.c b/kernel/module.c
90828index ec53f59..67d9655 100644
90829--- a/kernel/module.c
90830+++ b/kernel/module.c
90831@@ -59,6 +59,7 @@
90832 #include <linux/jump_label.h>
90833 #include <linux/pfn.h>
90834 #include <linux/bsearch.h>
90835+#include <linux/grsecurity.h>
90836 #include <uapi/linux/module.h>
90837 #include "module-internal.h"
90838
90839@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90840
90841 /* Bounds of module allocation, for speeding __module_address.
90842 * Protected by module_mutex. */
90843-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90844+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90845+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90846
90847 int register_module_notifier(struct notifier_block *nb)
90848 {
90849@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90850 return true;
90851
90852 list_for_each_entry_rcu(mod, &modules, list) {
90853- struct symsearch arr[] = {
90854+ struct symsearch modarr[] = {
90855 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90856 NOT_GPL_ONLY, false },
90857 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90858@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90859 if (mod->state == MODULE_STATE_UNFORMED)
90860 continue;
90861
90862- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90863+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90864 return true;
90865 }
90866 return false;
90867@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90868 if (!pcpusec->sh_size)
90869 return 0;
90870
90871- if (align > PAGE_SIZE) {
90872+ if (align-1 >= PAGE_SIZE) {
90873 pr_warn("%s: per-cpu alignment %li > %li\n",
90874 mod->name, align, PAGE_SIZE);
90875 align = PAGE_SIZE;
90876@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90877 static ssize_t show_coresize(struct module_attribute *mattr,
90878 struct module_kobject *mk, char *buffer)
90879 {
90880- return sprintf(buffer, "%u\n", mk->mod->core_size);
90881+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90882 }
90883
90884 static struct module_attribute modinfo_coresize =
90885@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90886 static ssize_t show_initsize(struct module_attribute *mattr,
90887 struct module_kobject *mk, char *buffer)
90888 {
90889- return sprintf(buffer, "%u\n", mk->mod->init_size);
90890+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90891 }
90892
90893 static struct module_attribute modinfo_initsize =
90894@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90895 goto bad_version;
90896 }
90897
90898+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90899+ /*
90900+ * avoid potentially printing jibberish on attempted load
90901+ * of a module randomized with a different seed
90902+ */
90903+ pr_warn("no symbol version for %s\n", symname);
90904+#else
90905 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90906+#endif
90907 return 0;
90908
90909 bad_version:
90910+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90911+ /*
90912+ * avoid potentially printing jibberish on attempted load
90913+ * of a module randomized with a different seed
90914+ */
90915+ pr_warn("attempted module disagrees about version of symbol %s\n",
90916+ symname);
90917+#else
90918 pr_warn("%s: disagrees about version of symbol %s\n",
90919 mod->name, symname);
90920+#endif
90921 return 0;
90922 }
90923
90924@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
90925 */
90926 #ifdef CONFIG_SYSFS
90927
90928-#ifdef CONFIG_KALLSYMS
90929+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90930 static inline bool sect_empty(const Elf_Shdr *sect)
90931 {
90932 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90933@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90934 {
90935 unsigned int notes, loaded, i;
90936 struct module_notes_attrs *notes_attrs;
90937- struct bin_attribute *nattr;
90938+ bin_attribute_no_const *nattr;
90939
90940 /* failed to create section attributes, so can't create notes */
90941 if (!mod->sect_attrs)
90942@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
90943 static int module_add_modinfo_attrs(struct module *mod)
90944 {
90945 struct module_attribute *attr;
90946- struct module_attribute *temp_attr;
90947+ module_attribute_no_const *temp_attr;
90948 int error = 0;
90949 int i;
90950
90951@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
90952
90953 static void unset_module_core_ro_nx(struct module *mod)
90954 {
90955- set_page_attributes(mod->module_core + mod->core_text_size,
90956- mod->module_core + mod->core_size,
90957+ set_page_attributes(mod->module_core_rw,
90958+ mod->module_core_rw + mod->core_size_rw,
90959 set_memory_x);
90960- set_page_attributes(mod->module_core,
90961- mod->module_core + mod->core_ro_size,
90962+ set_page_attributes(mod->module_core_rx,
90963+ mod->module_core_rx + mod->core_size_rx,
90964 set_memory_rw);
90965 }
90966
90967 static void unset_module_init_ro_nx(struct module *mod)
90968 {
90969- set_page_attributes(mod->module_init + mod->init_text_size,
90970- mod->module_init + mod->init_size,
90971+ set_page_attributes(mod->module_init_rw,
90972+ mod->module_init_rw + mod->init_size_rw,
90973 set_memory_x);
90974- set_page_attributes(mod->module_init,
90975- mod->module_init + mod->init_ro_size,
90976+ set_page_attributes(mod->module_init_rx,
90977+ mod->module_init_rx + mod->init_size_rx,
90978 set_memory_rw);
90979 }
90980
90981@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
90982 list_for_each_entry_rcu(mod, &modules, list) {
90983 if (mod->state == MODULE_STATE_UNFORMED)
90984 continue;
90985- if ((mod->module_core) && (mod->core_text_size)) {
90986- set_page_attributes(mod->module_core,
90987- mod->module_core + mod->core_text_size,
90988+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90989+ set_page_attributes(mod->module_core_rx,
90990+ mod->module_core_rx + mod->core_size_rx,
90991 set_memory_rw);
90992 }
90993- if ((mod->module_init) && (mod->init_text_size)) {
90994- set_page_attributes(mod->module_init,
90995- mod->module_init + mod->init_text_size,
90996+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90997+ set_page_attributes(mod->module_init_rx,
90998+ mod->module_init_rx + mod->init_size_rx,
90999 set_memory_rw);
91000 }
91001 }
91002@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
91003 list_for_each_entry_rcu(mod, &modules, list) {
91004 if (mod->state == MODULE_STATE_UNFORMED)
91005 continue;
91006- if ((mod->module_core) && (mod->core_text_size)) {
91007- set_page_attributes(mod->module_core,
91008- mod->module_core + mod->core_text_size,
91009+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91010+ set_page_attributes(mod->module_core_rx,
91011+ mod->module_core_rx + mod->core_size_rx,
91012 set_memory_ro);
91013 }
91014- if ((mod->module_init) && (mod->init_text_size)) {
91015- set_page_attributes(mod->module_init,
91016- mod->module_init + mod->init_text_size,
91017+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91018+ set_page_attributes(mod->module_init_rx,
91019+ mod->module_init_rx + mod->init_size_rx,
91020 set_memory_ro);
91021 }
91022 }
91023@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
91024 #else
91025 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
91026 static void unset_module_core_ro_nx(struct module *mod) { }
91027-static void unset_module_init_ro_nx(struct module *mod) { }
91028+static void unset_module_init_ro_nx(struct module *mod)
91029+{
91030+
91031+#ifdef CONFIG_PAX_KERNEXEC
91032+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
91033+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
91034+#endif
91035+
91036+}
91037 #endif
91038
91039 void __weak module_memfree(void *module_region)
91040@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
91041 /* This may be NULL, but that's OK */
91042 unset_module_init_ro_nx(mod);
91043 module_arch_freeing_init(mod);
91044- module_memfree(mod->module_init);
91045+ module_memfree(mod->module_init_rw);
91046+ module_memfree_exec(mod->module_init_rx);
91047 kfree(mod->args);
91048 percpu_modfree(mod);
91049
91050 /* Free lock-classes; relies on the preceding sync_rcu(). */
91051- lockdep_free_key_range(mod->module_core, mod->core_size);
91052+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91053+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91054
91055 /* Finally, free the core (containing the module structure) */
91056 unset_module_core_ro_nx(mod);
91057- module_memfree(mod->module_core);
91058+ module_memfree_exec(mod->module_core_rx);
91059+ module_memfree(mod->module_core_rw);
91060
91061 #ifdef CONFIG_MPU
91062 update_protections(current->mm);
91063@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91064 int ret = 0;
91065 const struct kernel_symbol *ksym;
91066
91067+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91068+ int is_fs_load = 0;
91069+ int register_filesystem_found = 0;
91070+ char *p;
91071+
91072+ p = strstr(mod->args, "grsec_modharden_fs");
91073+ if (p) {
91074+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
91075+ /* copy \0 as well */
91076+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91077+ is_fs_load = 1;
91078+ }
91079+#endif
91080+
91081 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
91082 const char *name = info->strtab + sym[i].st_name;
91083
91084+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91085+ /* it's a real shame this will never get ripped and copied
91086+ upstream! ;(
91087+ */
91088+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91089+ register_filesystem_found = 1;
91090+#endif
91091+
91092 switch (sym[i].st_shndx) {
91093 case SHN_COMMON:
91094 /* Ignore common symbols */
91095@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91096 ksym = resolve_symbol_wait(mod, info, name);
91097 /* Ok if resolved. */
91098 if (ksym && !IS_ERR(ksym)) {
91099+ pax_open_kernel();
91100 sym[i].st_value = ksym->value;
91101+ pax_close_kernel();
91102 break;
91103 }
91104
91105@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91106 secbase = (unsigned long)mod_percpu(mod);
91107 else
91108 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
91109+ pax_open_kernel();
91110 sym[i].st_value += secbase;
91111+ pax_close_kernel();
91112 break;
91113 }
91114 }
91115
91116+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91117+ if (is_fs_load && !register_filesystem_found) {
91118+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
91119+ ret = -EPERM;
91120+ }
91121+#endif
91122+
91123 return ret;
91124 }
91125
91126@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
91127 || s->sh_entsize != ~0UL
91128 || strstarts(sname, ".init"))
91129 continue;
91130- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
91131+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91132+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
91133+ else
91134+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
91135 pr_debug("\t%s\n", sname);
91136 }
91137- switch (m) {
91138- case 0: /* executable */
91139- mod->core_size = debug_align(mod->core_size);
91140- mod->core_text_size = mod->core_size;
91141- break;
91142- case 1: /* RO: text and ro-data */
91143- mod->core_size = debug_align(mod->core_size);
91144- mod->core_ro_size = mod->core_size;
91145- break;
91146- case 3: /* whole core */
91147- mod->core_size = debug_align(mod->core_size);
91148- break;
91149- }
91150 }
91151
91152 pr_debug("Init section allocation order:\n");
91153@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
91154 || s->sh_entsize != ~0UL
91155 || !strstarts(sname, ".init"))
91156 continue;
91157- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
91158- | INIT_OFFSET_MASK);
91159+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91160+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
91161+ else
91162+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
91163+ s->sh_entsize |= INIT_OFFSET_MASK;
91164 pr_debug("\t%s\n", sname);
91165 }
91166- switch (m) {
91167- case 0: /* executable */
91168- mod->init_size = debug_align(mod->init_size);
91169- mod->init_text_size = mod->init_size;
91170- break;
91171- case 1: /* RO: text and ro-data */
91172- mod->init_size = debug_align(mod->init_size);
91173- mod->init_ro_size = mod->init_size;
91174- break;
91175- case 3: /* whole init */
91176- mod->init_size = debug_align(mod->init_size);
91177- break;
91178- }
91179 }
91180 }
91181
91182@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91183
91184 /* Put symbol section at end of init part of module. */
91185 symsect->sh_flags |= SHF_ALLOC;
91186- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
91187+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
91188 info->index.sym) | INIT_OFFSET_MASK;
91189 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
91190
91191@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91192 }
91193
91194 /* Append room for core symbols at end of core part. */
91195- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
91196- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
91197- mod->core_size += strtab_size;
91198- mod->core_size = debug_align(mod->core_size);
91199+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
91200+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
91201+ mod->core_size_rx += strtab_size;
91202+ mod->core_size_rx = debug_align(mod->core_size_rx);
91203
91204 /* Put string table section at end of init part of module. */
91205 strsect->sh_flags |= SHF_ALLOC;
91206- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
91207+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
91208 info->index.str) | INIT_OFFSET_MASK;
91209- mod->init_size = debug_align(mod->init_size);
91210+ mod->init_size_rx = debug_align(mod->init_size_rx);
91211 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
91212 }
91213
91214@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91215 /* Make sure we get permanent strtab: don't use info->strtab. */
91216 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
91217
91218+ pax_open_kernel();
91219+
91220 /* Set types up while we still have access to sections. */
91221 for (i = 0; i < mod->num_symtab; i++)
91222 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
91223
91224- mod->core_symtab = dst = mod->module_core + info->symoffs;
91225- mod->core_strtab = s = mod->module_core + info->stroffs;
91226+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
91227+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
91228 src = mod->symtab;
91229 for (ndst = i = 0; i < mod->num_symtab; i++) {
91230 if (i == 0 ||
91231@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91232 }
91233 }
91234 mod->core_num_syms = ndst;
91235+
91236+ pax_close_kernel();
91237 }
91238 #else
91239 static inline void layout_symtab(struct module *mod, struct load_info *info)
91240@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
91241 return vmalloc_exec(size);
91242 }
91243
91244-static void *module_alloc_update_bounds(unsigned long size)
91245+static void *module_alloc_update_bounds_rw(unsigned long size)
91246 {
91247 void *ret = module_alloc(size);
91248
91249 if (ret) {
91250 mutex_lock(&module_mutex);
91251 /* Update module bounds. */
91252- if ((unsigned long)ret < module_addr_min)
91253- module_addr_min = (unsigned long)ret;
91254- if ((unsigned long)ret + size > module_addr_max)
91255- module_addr_max = (unsigned long)ret + size;
91256+ if ((unsigned long)ret < module_addr_min_rw)
91257+ module_addr_min_rw = (unsigned long)ret;
91258+ if ((unsigned long)ret + size > module_addr_max_rw)
91259+ module_addr_max_rw = (unsigned long)ret + size;
91260+ mutex_unlock(&module_mutex);
91261+ }
91262+ return ret;
91263+}
91264+
91265+static void *module_alloc_update_bounds_rx(unsigned long size)
91266+{
91267+ void *ret = module_alloc_exec(size);
91268+
91269+ if (ret) {
91270+ mutex_lock(&module_mutex);
91271+ /* Update module bounds. */
91272+ if ((unsigned long)ret < module_addr_min_rx)
91273+ module_addr_min_rx = (unsigned long)ret;
91274+ if ((unsigned long)ret + size > module_addr_max_rx)
91275+ module_addr_max_rx = (unsigned long)ret + size;
91276 mutex_unlock(&module_mutex);
91277 }
91278 return ret;
91279@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91280 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
91281
91282 if (info->index.sym == 0) {
91283+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91284+ /*
91285+ * avoid potentially printing jibberish on attempted load
91286+ * of a module randomized with a different seed
91287+ */
91288+ pr_warn("module has no symbols (stripped?)\n");
91289+#else
91290 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
91291+#endif
91292 return ERR_PTR(-ENOEXEC);
91293 }
91294
91295@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91296 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91297 {
91298 const char *modmagic = get_modinfo(info, "vermagic");
91299+ const char *license = get_modinfo(info, "license");
91300 int err;
91301
91302+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
91303+ if (!license || !license_is_gpl_compatible(license))
91304+ return -ENOEXEC;
91305+#endif
91306+
91307 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
91308 modmagic = NULL;
91309
91310@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91311 }
91312
91313 /* Set up license info based on the info section */
91314- set_license(mod, get_modinfo(info, "license"));
91315+ set_license(mod, license);
91316
91317 return 0;
91318 }
91319@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
91320 void *ptr;
91321
91322 /* Do the allocs. */
91323- ptr = module_alloc_update_bounds(mod->core_size);
91324+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91325 /*
91326 * The pointer to this block is stored in the module structure
91327 * which is inside the block. Just mark it as not being a
91328@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
91329 if (!ptr)
91330 return -ENOMEM;
91331
91332- memset(ptr, 0, mod->core_size);
91333- mod->module_core = ptr;
91334+ memset(ptr, 0, mod->core_size_rw);
91335+ mod->module_core_rw = ptr;
91336
91337- if (mod->init_size) {
91338- ptr = module_alloc_update_bounds(mod->init_size);
91339+ if (mod->init_size_rw) {
91340+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91341 /*
91342 * The pointer to this block is stored in the module structure
91343 * which is inside the block. This block doesn't need to be
91344@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
91345 */
91346 kmemleak_ignore(ptr);
91347 if (!ptr) {
91348- module_memfree(mod->module_core);
91349+ module_memfree(mod->module_core_rw);
91350 return -ENOMEM;
91351 }
91352- memset(ptr, 0, mod->init_size);
91353- mod->module_init = ptr;
91354+ memset(ptr, 0, mod->init_size_rw);
91355+ mod->module_init_rw = ptr;
91356 } else
91357- mod->module_init = NULL;
91358+ mod->module_init_rw = NULL;
91359+
91360+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91361+ kmemleak_not_leak(ptr);
91362+ if (!ptr) {
91363+ if (mod->module_init_rw)
91364+ module_memfree(mod->module_init_rw);
91365+ module_memfree(mod->module_core_rw);
91366+ return -ENOMEM;
91367+ }
91368+
91369+ pax_open_kernel();
91370+ memset(ptr, 0, mod->core_size_rx);
91371+ pax_close_kernel();
91372+ mod->module_core_rx = ptr;
91373+
91374+ if (mod->init_size_rx) {
91375+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91376+ kmemleak_ignore(ptr);
91377+ if (!ptr && mod->init_size_rx) {
91378+ module_memfree_exec(mod->module_core_rx);
91379+ if (mod->module_init_rw)
91380+ module_memfree(mod->module_init_rw);
91381+ module_memfree(mod->module_core_rw);
91382+ return -ENOMEM;
91383+ }
91384+
91385+ pax_open_kernel();
91386+ memset(ptr, 0, mod->init_size_rx);
91387+ pax_close_kernel();
91388+ mod->module_init_rx = ptr;
91389+ } else
91390+ mod->module_init_rx = NULL;
91391
91392 /* Transfer each section which specifies SHF_ALLOC */
91393 pr_debug("final section addresses:\n");
91394@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
91395 if (!(shdr->sh_flags & SHF_ALLOC))
91396 continue;
91397
91398- if (shdr->sh_entsize & INIT_OFFSET_MASK)
91399- dest = mod->module_init
91400- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91401- else
91402- dest = mod->module_core + shdr->sh_entsize;
91403+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
91404+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91405+ dest = mod->module_init_rw
91406+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91407+ else
91408+ dest = mod->module_init_rx
91409+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91410+ } else {
91411+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91412+ dest = mod->module_core_rw + shdr->sh_entsize;
91413+ else
91414+ dest = mod->module_core_rx + shdr->sh_entsize;
91415+ }
91416+
91417+ if (shdr->sh_type != SHT_NOBITS) {
91418+
91419+#ifdef CONFIG_PAX_KERNEXEC
91420+#ifdef CONFIG_X86_64
91421+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91422+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91423+#endif
91424+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91425+ pax_open_kernel();
91426+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91427+ pax_close_kernel();
91428+ } else
91429+#endif
91430
91431- if (shdr->sh_type != SHT_NOBITS)
91432 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91433+ }
91434 /* Update sh_addr to point to copy in image. */
91435- shdr->sh_addr = (unsigned long)dest;
91436+
91437+#ifdef CONFIG_PAX_KERNEXEC
91438+ if (shdr->sh_flags & SHF_EXECINSTR)
91439+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91440+ else
91441+#endif
91442+
91443+ shdr->sh_addr = (unsigned long)dest;
91444 pr_debug("\t0x%lx %s\n",
91445 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91446 }
91447@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
91448 * Do it before processing of module parameters, so the module
91449 * can provide parameter accessor functions of its own.
91450 */
91451- if (mod->module_init)
91452- flush_icache_range((unsigned long)mod->module_init,
91453- (unsigned long)mod->module_init
91454- + mod->init_size);
91455- flush_icache_range((unsigned long)mod->module_core,
91456- (unsigned long)mod->module_core + mod->core_size);
91457+ if (mod->module_init_rx)
91458+ flush_icache_range((unsigned long)mod->module_init_rx,
91459+ (unsigned long)mod->module_init_rx
91460+ + mod->init_size_rx);
91461+ flush_icache_range((unsigned long)mod->module_core_rx,
91462+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91463
91464 set_fs(old_fs);
91465 }
91466@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
91467 {
91468 percpu_modfree(mod);
91469 module_arch_freeing_init(mod);
91470- module_memfree(mod->module_init);
91471- module_memfree(mod->module_core);
91472+ module_memfree_exec(mod->module_init_rx);
91473+ module_memfree_exec(mod->module_core_rx);
91474+ module_memfree(mod->module_init_rw);
91475+ module_memfree(mod->module_core_rw);
91476 }
91477
91478 int __weak module_finalize(const Elf_Ehdr *hdr,
91479@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91480 static int post_relocation(struct module *mod, const struct load_info *info)
91481 {
91482 /* Sort exception table now relocations are done. */
91483+ pax_open_kernel();
91484 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91485+ pax_close_kernel();
91486
91487 /* Copy relocated percpu area over. */
91488 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91489@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
91490 /* For freeing module_init on success, in case kallsyms traversing */
91491 struct mod_initfree {
91492 struct rcu_head rcu;
91493- void *module_init;
91494+ void *module_init_rw;
91495+ void *module_init_rx;
91496 };
91497
91498 static void do_free_init(struct rcu_head *head)
91499 {
91500 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
91501- module_memfree(m->module_init);
91502+ module_memfree(m->module_init_rw);
91503+ module_memfree_exec(m->module_init_rx);
91504 kfree(m);
91505 }
91506
91507@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
91508 ret = -ENOMEM;
91509 goto fail;
91510 }
91511- freeinit->module_init = mod->module_init;
91512+ freeinit->module_init_rw = mod->module_init_rw;
91513+ freeinit->module_init_rx = mod->module_init_rx;
91514
91515 /*
91516 * We want to find out whether @mod uses async during init. Clear
91517@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
91518 #endif
91519 unset_module_init_ro_nx(mod);
91520 module_arch_freeing_init(mod);
91521- mod->module_init = NULL;
91522- mod->init_size = 0;
91523- mod->init_ro_size = 0;
91524- mod->init_text_size = 0;
91525+ mod->module_init_rw = NULL;
91526+ mod->module_init_rx = NULL;
91527+ mod->init_size_rw = 0;
91528+ mod->init_size_rx = 0;
91529 /*
91530 * We want to free module_init, but be aware that kallsyms may be
91531 * walking this with preempt disabled. In all the failure paths,
91532@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91533 module_bug_finalize(info->hdr, info->sechdrs, mod);
91534
91535 /* Set RO and NX regions for core */
91536- set_section_ro_nx(mod->module_core,
91537- mod->core_text_size,
91538- mod->core_ro_size,
91539- mod->core_size);
91540+ set_section_ro_nx(mod->module_core_rx,
91541+ mod->core_size_rx,
91542+ mod->core_size_rx,
91543+ mod->core_size_rx);
91544
91545 /* Set RO and NX regions for init */
91546- set_section_ro_nx(mod->module_init,
91547- mod->init_text_size,
91548- mod->init_ro_size,
91549- mod->init_size);
91550+ set_section_ro_nx(mod->module_init_rx,
91551+ mod->init_size_rx,
91552+ mod->init_size_rx,
91553+ mod->init_size_rx);
91554
91555 /* Mark state as coming so strong_try_module_get() ignores us,
91556 * but kallsyms etc. can see us. */
91557@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91558 if (err)
91559 goto free_unload;
91560
91561+ /* Now copy in args */
91562+ mod->args = strndup_user(uargs, ~0UL >> 1);
91563+ if (IS_ERR(mod->args)) {
91564+ err = PTR_ERR(mod->args);
91565+ goto free_unload;
91566+ }
91567+
91568 /* Set up MODINFO_ATTR fields */
91569 setup_modinfo(mod, info);
91570
91571+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91572+ {
91573+ char *p, *p2;
91574+
91575+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91576+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91577+ err = -EPERM;
91578+ goto free_modinfo;
91579+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91580+ p += sizeof("grsec_modharden_normal") - 1;
91581+ p2 = strstr(p, "_");
91582+ if (p2) {
91583+ *p2 = '\0';
91584+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91585+ *p2 = '_';
91586+ }
91587+ err = -EPERM;
91588+ goto free_modinfo;
91589+ }
91590+ }
91591+#endif
91592+
91593 /* Fix up syms, so that st_value is a pointer to location. */
91594 err = simplify_symbols(mod, info);
91595 if (err < 0)
91596@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91597
91598 flush_module_icache(mod);
91599
91600- /* Now copy in args */
91601- mod->args = strndup_user(uargs, ~0UL >> 1);
91602- if (IS_ERR(mod->args)) {
91603- err = PTR_ERR(mod->args);
91604- goto free_arch_cleanup;
91605- }
91606-
91607 dynamic_debug_setup(info->debug, info->num_debug);
91608
91609 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91610@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91611 ddebug_cleanup:
91612 dynamic_debug_remove(info->debug);
91613 synchronize_sched();
91614- kfree(mod->args);
91615- free_arch_cleanup:
91616 module_arch_cleanup(mod);
91617 free_modinfo:
91618 free_modinfo(mod);
91619+ kfree(mod->args);
91620 free_unload:
91621 module_unload_free(mod);
91622 unlink_mod:
91623@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
91624 mutex_unlock(&module_mutex);
91625 free_module:
91626 /* Free lock-classes; relies on the preceding sync_rcu() */
91627- lockdep_free_key_range(mod->module_core, mod->core_size);
91628+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91629+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91630
91631 module_deallocate(mod, info);
91632 free_copy:
91633@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
91634 unsigned long nextval;
91635
91636 /* At worse, next value is at end of module */
91637- if (within_module_init(addr, mod))
91638- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91639+ if (within_module_init_rx(addr, mod))
91640+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91641+ else if (within_module_init_rw(addr, mod))
91642+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91643+ else if (within_module_core_rx(addr, mod))
91644+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91645+ else if (within_module_core_rw(addr, mod))
91646+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91647 else
91648- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91649+ return NULL;
91650
91651 /* Scan for closest preceding symbol, and next symbol. (ELF
91652 starts real symbols at 1). */
91653@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
91654 return 0;
91655
91656 seq_printf(m, "%s %u",
91657- mod->name, mod->init_size + mod->core_size);
91658+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91659 print_unload_info(m, mod);
91660
91661 /* Informative for users. */
91662@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
91663 mod->state == MODULE_STATE_COMING ? "Loading" :
91664 "Live");
91665 /* Used by oprofile and other similar tools. */
91666- seq_printf(m, " 0x%pK", mod->module_core);
91667+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91668
91669 /* Taints info */
91670 if (mod->taints)
91671@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
91672
91673 static int __init proc_modules_init(void)
91674 {
91675+#ifndef CONFIG_GRKERNSEC_HIDESYM
91676+#ifdef CONFIG_GRKERNSEC_PROC_USER
91677+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91678+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91679+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91680+#else
91681 proc_create("modules", 0, NULL, &proc_modules_operations);
91682+#endif
91683+#else
91684+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91685+#endif
91686 return 0;
91687 }
91688 module_init(proc_modules_init);
91689@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
91690 {
91691 struct module *mod;
91692
91693- if (addr < module_addr_min || addr > module_addr_max)
91694+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91695+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91696 return NULL;
91697
91698 list_for_each_entry_rcu(mod, &modules, list) {
91699@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
91700 */
91701 struct module *__module_text_address(unsigned long addr)
91702 {
91703- struct module *mod = __module_address(addr);
91704+ struct module *mod;
91705+
91706+#ifdef CONFIG_X86_32
91707+ addr = ktla_ktva(addr);
91708+#endif
91709+
91710+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91711+ return NULL;
91712+
91713+ mod = __module_address(addr);
91714+
91715 if (mod) {
91716 /* Make sure it's within the text section. */
91717- if (!within(addr, mod->module_init, mod->init_text_size)
91718- && !within(addr, mod->module_core, mod->core_text_size))
91719+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91720 mod = NULL;
91721 }
91722 return mod;
91723diff --git a/kernel/notifier.c b/kernel/notifier.c
91724index ae9fc7c..5085fbf 100644
91725--- a/kernel/notifier.c
91726+++ b/kernel/notifier.c
91727@@ -5,6 +5,7 @@
91728 #include <linux/rcupdate.h>
91729 #include <linux/vmalloc.h>
91730 #include <linux/reboot.h>
91731+#include <linux/mm.h>
91732
91733 /*
91734 * Notifier list for kernel code which wants to be called
91735@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91736 while ((*nl) != NULL) {
91737 if (n->priority > (*nl)->priority)
91738 break;
91739- nl = &((*nl)->next);
91740+ nl = (struct notifier_block **)&((*nl)->next);
91741 }
91742- n->next = *nl;
91743+ pax_open_kernel();
91744+ *(const void **)&n->next = *nl;
91745 rcu_assign_pointer(*nl, n);
91746+ pax_close_kernel();
91747 return 0;
91748 }
91749
91750@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91751 return 0;
91752 if (n->priority > (*nl)->priority)
91753 break;
91754- nl = &((*nl)->next);
91755+ nl = (struct notifier_block **)&((*nl)->next);
91756 }
91757- n->next = *nl;
91758+ pax_open_kernel();
91759+ *(const void **)&n->next = *nl;
91760 rcu_assign_pointer(*nl, n);
91761+ pax_close_kernel();
91762 return 0;
91763 }
91764
91765@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91766 {
91767 while ((*nl) != NULL) {
91768 if ((*nl) == n) {
91769+ pax_open_kernel();
91770 rcu_assign_pointer(*nl, n->next);
91771+ pax_close_kernel();
91772 return 0;
91773 }
91774- nl = &((*nl)->next);
91775+ nl = (struct notifier_block **)&((*nl)->next);
91776 }
91777 return -ENOENT;
91778 }
91779diff --git a/kernel/padata.c b/kernel/padata.c
91780index b38bea9..91acfbe 100644
91781--- a/kernel/padata.c
91782+++ b/kernel/padata.c
91783@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91784 * seq_nr mod. number of cpus in use.
91785 */
91786
91787- seq_nr = atomic_inc_return(&pd->seq_nr);
91788+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91789 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91790
91791 return padata_index_to_cpu(pd, cpu_index);
91792@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91793 padata_init_pqueues(pd);
91794 padata_init_squeues(pd);
91795 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91796- atomic_set(&pd->seq_nr, -1);
91797+ atomic_set_unchecked(&pd->seq_nr, -1);
91798 atomic_set(&pd->reorder_objects, 0);
91799 atomic_set(&pd->refcnt, 0);
91800 pd->pinst = pinst;
91801diff --git a/kernel/panic.c b/kernel/panic.c
91802index 8136ad7..15c857b 100644
91803--- a/kernel/panic.c
91804+++ b/kernel/panic.c
91805@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91806 /*
91807 * Stop ourself in panic -- architecture code may override this
91808 */
91809-void __weak panic_smp_self_stop(void)
91810+void __weak __noreturn panic_smp_self_stop(void)
91811 {
91812 while (1)
91813 cpu_relax();
91814@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91815 disable_trace_on_warning();
91816
91817 pr_warn("------------[ cut here ]------------\n");
91818- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91819+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91820 raw_smp_processor_id(), current->pid, file, line, caller);
91821
91822 if (args)
91823@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91824 */
91825 __visible void __stack_chk_fail(void)
91826 {
91827- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91828+ dump_stack();
91829+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91830 __builtin_return_address(0));
91831 }
91832 EXPORT_SYMBOL(__stack_chk_fail);
91833diff --git a/kernel/pid.c b/kernel/pid.c
91834index cd36a5e..11f185d 100644
91835--- a/kernel/pid.c
91836+++ b/kernel/pid.c
91837@@ -33,6 +33,7 @@
91838 #include <linux/rculist.h>
91839 #include <linux/bootmem.h>
91840 #include <linux/hash.h>
91841+#include <linux/security.h>
91842 #include <linux/pid_namespace.h>
91843 #include <linux/init_task.h>
91844 #include <linux/syscalls.h>
91845@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91846
91847 int pid_max = PID_MAX_DEFAULT;
91848
91849-#define RESERVED_PIDS 300
91850+#define RESERVED_PIDS 500
91851
91852 int pid_max_min = RESERVED_PIDS + 1;
91853 int pid_max_max = PID_MAX_LIMIT;
91854@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91855 */
91856 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91857 {
91858+ struct task_struct *task;
91859+
91860 rcu_lockdep_assert(rcu_read_lock_held(),
91861 "find_task_by_pid_ns() needs rcu_read_lock()"
91862 " protection");
91863- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91864+
91865+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91866+
91867+ if (gr_pid_is_chrooted(task))
91868+ return NULL;
91869+
91870+ return task;
91871 }
91872
91873 struct task_struct *find_task_by_vpid(pid_t vnr)
91874@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91875 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91876 }
91877
91878+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91879+{
91880+ rcu_lockdep_assert(rcu_read_lock_held(),
91881+ "find_task_by_pid_ns() needs rcu_read_lock()"
91882+ " protection");
91883+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91884+}
91885+
91886 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91887 {
91888 struct pid *pid;
91889diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91890index a65ba13..f600dbb 100644
91891--- a/kernel/pid_namespace.c
91892+++ b/kernel/pid_namespace.c
91893@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91894 void __user *buffer, size_t *lenp, loff_t *ppos)
91895 {
91896 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91897- struct ctl_table tmp = *table;
91898+ ctl_table_no_const tmp = *table;
91899
91900 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91901 return -EPERM;
91902diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91903index 7e01f78..f5da19d 100644
91904--- a/kernel/power/Kconfig
91905+++ b/kernel/power/Kconfig
91906@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91907 config HIBERNATION
91908 bool "Hibernation (aka 'suspend to disk')"
91909 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91910+ depends on !GRKERNSEC_KMEM
91911+ depends on !PAX_MEMORY_SANITIZE
91912 select HIBERNATE_CALLBACKS
91913 select LZO_COMPRESS
91914 select LZO_DECOMPRESS
91915diff --git a/kernel/power/process.c b/kernel/power/process.c
91916index 564f786..361a18e 100644
91917--- a/kernel/power/process.c
91918+++ b/kernel/power/process.c
91919@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91920 unsigned int elapsed_msecs;
91921 bool wakeup = false;
91922 int sleep_usecs = USEC_PER_MSEC;
91923+ bool timedout = false;
91924
91925 do_gettimeofday(&start);
91926
91927@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91928
91929 while (true) {
91930 todo = 0;
91931+ if (time_after(jiffies, end_time))
91932+ timedout = true;
91933 read_lock(&tasklist_lock);
91934 for_each_process_thread(g, p) {
91935 if (p == current || !freeze_task(p))
91936 continue;
91937
91938- if (!freezer_should_skip(p))
91939+ if (!freezer_should_skip(p)) {
91940 todo++;
91941+ if (timedout) {
91942+ printk(KERN_ERR "Task refusing to freeze:\n");
91943+ sched_show_task(p);
91944+ }
91945+ }
91946 }
91947 read_unlock(&tasklist_lock);
91948
91949@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91950 todo += wq_busy;
91951 }
91952
91953- if (!todo || time_after(jiffies, end_time))
91954+ if (!todo || timedout)
91955 break;
91956
91957 if (pm_wakeup_pending()) {
91958diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91959index bb0635b..9aff9f3 100644
91960--- a/kernel/printk/printk.c
91961+++ b/kernel/printk/printk.c
91962@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91963 if (from_file && type != SYSLOG_ACTION_OPEN)
91964 return 0;
91965
91966+#ifdef CONFIG_GRKERNSEC_DMESG
91967+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91968+ return -EPERM;
91969+#endif
91970+
91971 if (syslog_action_restricted(type)) {
91972 if (capable(CAP_SYSLOG))
91973 return 0;
91974diff --git a/kernel/profile.c b/kernel/profile.c
91975index a7bcd28..5b368fa 100644
91976--- a/kernel/profile.c
91977+++ b/kernel/profile.c
91978@@ -37,7 +37,7 @@ struct profile_hit {
91979 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91980 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91981
91982-static atomic_t *prof_buffer;
91983+static atomic_unchecked_t *prof_buffer;
91984 static unsigned long prof_len, prof_shift;
91985
91986 int prof_on __read_mostly;
91987@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91988 hits[i].pc = 0;
91989 continue;
91990 }
91991- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91992+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91993 hits[i].hits = hits[i].pc = 0;
91994 }
91995 }
91996@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91997 * Add the current hit(s) and flush the write-queue out
91998 * to the global buffer:
91999 */
92000- atomic_add(nr_hits, &prof_buffer[pc]);
92001+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
92002 for (i = 0; i < NR_PROFILE_HIT; ++i) {
92003- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92004+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92005 hits[i].pc = hits[i].hits = 0;
92006 }
92007 out:
92008@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92009 {
92010 unsigned long pc;
92011 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
92012- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92013+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92014 }
92015 #endif /* !CONFIG_SMP */
92016
92017@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
92018 return -EFAULT;
92019 buf++; p++; count--; read++;
92020 }
92021- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
92022+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
92023 if (copy_to_user(buf, (void *)pnt, count))
92024 return -EFAULT;
92025 read += count;
92026@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
92027 }
92028 #endif
92029 profile_discard_flip_buffers();
92030- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
92031+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
92032 return count;
92033 }
92034
92035diff --git a/kernel/ptrace.c b/kernel/ptrace.c
92036index 227fec3..3aea55b 100644
92037--- a/kernel/ptrace.c
92038+++ b/kernel/ptrace.c
92039@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
92040 if (seize)
92041 flags |= PT_SEIZED;
92042 rcu_read_lock();
92043- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92044+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92045 flags |= PT_PTRACE_CAP;
92046 rcu_read_unlock();
92047 task->ptrace = flags;
92048@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
92049 break;
92050 return -EIO;
92051 }
92052- if (copy_to_user(dst, buf, retval))
92053+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
92054 return -EFAULT;
92055 copied += retval;
92056 src += retval;
92057@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
92058 bool seized = child->ptrace & PT_SEIZED;
92059 int ret = -EIO;
92060 siginfo_t siginfo, *si;
92061- void __user *datavp = (void __user *) data;
92062+ void __user *datavp = (__force void __user *) data;
92063 unsigned long __user *datalp = datavp;
92064 unsigned long flags;
92065
92066@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
92067 goto out;
92068 }
92069
92070+ if (gr_handle_ptrace(child, request)) {
92071+ ret = -EPERM;
92072+ goto out_put_task_struct;
92073+ }
92074+
92075 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92076 ret = ptrace_attach(child, request, addr, data);
92077 /*
92078 * Some architectures need to do book-keeping after
92079 * a ptrace attach.
92080 */
92081- if (!ret)
92082+ if (!ret) {
92083 arch_ptrace_attach(child);
92084+ gr_audit_ptrace(child);
92085+ }
92086 goto out_put_task_struct;
92087 }
92088
92089@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
92090 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
92091 if (copied != sizeof(tmp))
92092 return -EIO;
92093- return put_user(tmp, (unsigned long __user *)data);
92094+ return put_user(tmp, (__force unsigned long __user *)data);
92095 }
92096
92097 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
92098@@ -1157,7 +1164,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
92099 }
92100
92101 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92102- compat_long_t, addr, compat_long_t, data)
92103+ compat_ulong_t, addr, compat_ulong_t, data)
92104 {
92105 struct task_struct *child;
92106 long ret;
92107@@ -1173,14 +1180,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92108 goto out;
92109 }
92110
92111+ if (gr_handle_ptrace(child, request)) {
92112+ ret = -EPERM;
92113+ goto out_put_task_struct;
92114+ }
92115+
92116 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92117 ret = ptrace_attach(child, request, addr, data);
92118 /*
92119 * Some architectures need to do book-keeping after
92120 * a ptrace attach.
92121 */
92122- if (!ret)
92123+ if (!ret) {
92124 arch_ptrace_attach(child);
92125+ gr_audit_ptrace(child);
92126+ }
92127 goto out_put_task_struct;
92128 }
92129
92130diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
92131index 30d42aa..cac5d66 100644
92132--- a/kernel/rcu/rcutorture.c
92133+++ b/kernel/rcu/rcutorture.c
92134@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92135 rcu_torture_count) = { 0 };
92136 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92137 rcu_torture_batch) = { 0 };
92138-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92139-static atomic_t n_rcu_torture_alloc;
92140-static atomic_t n_rcu_torture_alloc_fail;
92141-static atomic_t n_rcu_torture_free;
92142-static atomic_t n_rcu_torture_mberror;
92143-static atomic_t n_rcu_torture_error;
92144+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92145+static atomic_unchecked_t n_rcu_torture_alloc;
92146+static atomic_unchecked_t n_rcu_torture_alloc_fail;
92147+static atomic_unchecked_t n_rcu_torture_free;
92148+static atomic_unchecked_t n_rcu_torture_mberror;
92149+static atomic_unchecked_t n_rcu_torture_error;
92150 static long n_rcu_torture_barrier_error;
92151 static long n_rcu_torture_boost_ktrerror;
92152 static long n_rcu_torture_boost_rterror;
92153@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
92154 static long n_rcu_torture_timers;
92155 static long n_barrier_attempts;
92156 static long n_barrier_successes;
92157-static atomic_long_t n_cbfloods;
92158+static atomic_long_unchecked_t n_cbfloods;
92159 static struct list_head rcu_torture_removed;
92160
92161 static int rcu_torture_writer_state;
92162@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
92163
92164 spin_lock_bh(&rcu_torture_lock);
92165 if (list_empty(&rcu_torture_freelist)) {
92166- atomic_inc(&n_rcu_torture_alloc_fail);
92167+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
92168 spin_unlock_bh(&rcu_torture_lock);
92169 return NULL;
92170 }
92171- atomic_inc(&n_rcu_torture_alloc);
92172+ atomic_inc_unchecked(&n_rcu_torture_alloc);
92173 p = rcu_torture_freelist.next;
92174 list_del_init(p);
92175 spin_unlock_bh(&rcu_torture_lock);
92176@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
92177 static void
92178 rcu_torture_free(struct rcu_torture *p)
92179 {
92180- atomic_inc(&n_rcu_torture_free);
92181+ atomic_inc_unchecked(&n_rcu_torture_free);
92182 spin_lock_bh(&rcu_torture_lock);
92183 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
92184 spin_unlock_bh(&rcu_torture_lock);
92185@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
92186 i = rp->rtort_pipe_count;
92187 if (i > RCU_TORTURE_PIPE_LEN)
92188 i = RCU_TORTURE_PIPE_LEN;
92189- atomic_inc(&rcu_torture_wcount[i]);
92190+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92191 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
92192 rp->rtort_mbtest = 0;
92193 return true;
92194@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
92195 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
92196 do {
92197 schedule_timeout_interruptible(cbflood_inter_holdoff);
92198- atomic_long_inc(&n_cbfloods);
92199+ atomic_long_inc_unchecked(&n_cbfloods);
92200 WARN_ON(signal_pending(current));
92201 for (i = 0; i < cbflood_n_burst; i++) {
92202 for (j = 0; j < cbflood_n_per_burst; j++) {
92203@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
92204 i = old_rp->rtort_pipe_count;
92205 if (i > RCU_TORTURE_PIPE_LEN)
92206 i = RCU_TORTURE_PIPE_LEN;
92207- atomic_inc(&rcu_torture_wcount[i]);
92208+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92209 old_rp->rtort_pipe_count++;
92210 switch (synctype[torture_random(&rand) % nsynctypes]) {
92211 case RTWS_DEF_FREE:
92212@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
92213 return;
92214 }
92215 if (p->rtort_mbtest == 0)
92216- atomic_inc(&n_rcu_torture_mberror);
92217+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92218 spin_lock(&rand_lock);
92219 cur_ops->read_delay(&rand);
92220 n_rcu_torture_timers++;
92221@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
92222 continue;
92223 }
92224 if (p->rtort_mbtest == 0)
92225- atomic_inc(&n_rcu_torture_mberror);
92226+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92227 cur_ops->read_delay(&rand);
92228 preempt_disable();
92229 pipe_count = p->rtort_pipe_count;
92230@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
92231 rcu_torture_current,
92232 rcu_torture_current_version,
92233 list_empty(&rcu_torture_freelist),
92234- atomic_read(&n_rcu_torture_alloc),
92235- atomic_read(&n_rcu_torture_alloc_fail),
92236- atomic_read(&n_rcu_torture_free));
92237+ atomic_read_unchecked(&n_rcu_torture_alloc),
92238+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
92239+ atomic_read_unchecked(&n_rcu_torture_free));
92240 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
92241- atomic_read(&n_rcu_torture_mberror),
92242+ atomic_read_unchecked(&n_rcu_torture_mberror),
92243 n_rcu_torture_boost_ktrerror,
92244 n_rcu_torture_boost_rterror);
92245 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
92246@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
92247 n_barrier_successes,
92248 n_barrier_attempts,
92249 n_rcu_torture_barrier_error);
92250- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
92251+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
92252
92253 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
92254- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
92255+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
92256 n_rcu_torture_barrier_error != 0 ||
92257 n_rcu_torture_boost_ktrerror != 0 ||
92258 n_rcu_torture_boost_rterror != 0 ||
92259 n_rcu_torture_boost_failure != 0 ||
92260 i > 1) {
92261 pr_cont("%s", "!!! ");
92262- atomic_inc(&n_rcu_torture_error);
92263+ atomic_inc_unchecked(&n_rcu_torture_error);
92264 WARN_ON_ONCE(1);
92265 }
92266 pr_cont("Reader Pipe: ");
92267@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
92268 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
92269 pr_cont("Free-Block Circulation: ");
92270 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92271- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
92272+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
92273 }
92274 pr_cont("\n");
92275
92276@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
92277
92278 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
92279
92280- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92281+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92282 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
92283 else if (torture_onoff_failures())
92284 rcu_torture_print_module_parms(cur_ops,
92285@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
92286
92287 rcu_torture_current = NULL;
92288 rcu_torture_current_version = 0;
92289- atomic_set(&n_rcu_torture_alloc, 0);
92290- atomic_set(&n_rcu_torture_alloc_fail, 0);
92291- atomic_set(&n_rcu_torture_free, 0);
92292- atomic_set(&n_rcu_torture_mberror, 0);
92293- atomic_set(&n_rcu_torture_error, 0);
92294+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
92295+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
92296+ atomic_set_unchecked(&n_rcu_torture_free, 0);
92297+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
92298+ atomic_set_unchecked(&n_rcu_torture_error, 0);
92299 n_rcu_torture_barrier_error = 0;
92300 n_rcu_torture_boost_ktrerror = 0;
92301 n_rcu_torture_boost_rterror = 0;
92302 n_rcu_torture_boost_failure = 0;
92303 n_rcu_torture_boosts = 0;
92304 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
92305- atomic_set(&rcu_torture_wcount[i], 0);
92306+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
92307 for_each_possible_cpu(cpu) {
92308 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92309 per_cpu(rcu_torture_count, cpu)[i] = 0;
92310diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
92311index cc9ceca..ce075a6 100644
92312--- a/kernel/rcu/tiny.c
92313+++ b/kernel/rcu/tiny.c
92314@@ -42,7 +42,7 @@
92315 /* Forward declarations for tiny_plugin.h. */
92316 struct rcu_ctrlblk;
92317 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
92318-static void rcu_process_callbacks(struct softirq_action *unused);
92319+static void rcu_process_callbacks(void);
92320 static void __call_rcu(struct rcu_head *head,
92321 void (*func)(struct rcu_head *rcu),
92322 struct rcu_ctrlblk *rcp);
92323@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
92324 false));
92325 }
92326
92327-static void rcu_process_callbacks(struct softirq_action *unused)
92328+static __latent_entropy void rcu_process_callbacks(void)
92329 {
92330 __rcu_process_callbacks(&rcu_sched_ctrlblk);
92331 __rcu_process_callbacks(&rcu_bh_ctrlblk);
92332diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
92333index f94e209..d2985bd 100644
92334--- a/kernel/rcu/tiny_plugin.h
92335+++ b/kernel/rcu/tiny_plugin.h
92336@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
92337 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
92338 jiffies - rcp->gp_start, rcp->qlen);
92339 dump_stack();
92340- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
92341+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
92342 3 * rcu_jiffies_till_stall_check() + 3;
92343 } else if (ULONG_CMP_GE(j, js)) {
92344- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92345+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92346 }
92347 }
92348
92349@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
92350 {
92351 rcp->ticks_this_gp = 0;
92352 rcp->gp_start = jiffies;
92353- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92354+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92355 }
92356
92357 static void check_cpu_stalls(void)
92358diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
92359index 48d640c..9401d30 100644
92360--- a/kernel/rcu/tree.c
92361+++ b/kernel/rcu/tree.c
92362@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
92363 */
92364 rdtp = this_cpu_ptr(&rcu_dynticks);
92365 smp_mb__before_atomic(); /* Earlier stuff before QS. */
92366- atomic_add(2, &rdtp->dynticks); /* QS. */
92367+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
92368 smp_mb__after_atomic(); /* Later stuff after QS. */
92369 break;
92370 }
92371@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
92372 rcu_prepare_for_idle();
92373 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92374 smp_mb__before_atomic(); /* See above. */
92375- atomic_inc(&rdtp->dynticks);
92376+ atomic_inc_unchecked(&rdtp->dynticks);
92377 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
92378- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92379+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92380 rcu_dynticks_task_enter();
92381
92382 /*
92383@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
92384
92385 rcu_dynticks_task_exit();
92386 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
92387- atomic_inc(&rdtp->dynticks);
92388+ atomic_inc_unchecked(&rdtp->dynticks);
92389 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92390 smp_mb__after_atomic(); /* See above. */
92391- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92392+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92393 rcu_cleanup_after_idle();
92394 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
92395 if (!user && !is_idle_task(current)) {
92396@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
92397 * to be in the outermost NMI handler that interrupted an RCU-idle
92398 * period (observation due to Andy Lutomirski).
92399 */
92400- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
92401+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
92402 smp_mb__before_atomic(); /* Force delay from prior write. */
92403- atomic_inc(&rdtp->dynticks);
92404+ atomic_inc_unchecked(&rdtp->dynticks);
92405 /* atomic_inc() before later RCU read-side crit sects */
92406 smp_mb__after_atomic(); /* See above. */
92407- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92408+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92409 incby = 1;
92410 }
92411 rdtp->dynticks_nmi_nesting += incby;
92412@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
92413 * to us!)
92414 */
92415 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
92416- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92417+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92418
92419 /*
92420 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
92421@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
92422 rdtp->dynticks_nmi_nesting = 0;
92423 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92424 smp_mb__before_atomic(); /* See above. */
92425- atomic_inc(&rdtp->dynticks);
92426+ atomic_inc_unchecked(&rdtp->dynticks);
92427 smp_mb__after_atomic(); /* Force delay to next write. */
92428- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92429+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92430 }
92431
92432 /**
92433@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
92434 */
92435 bool notrace __rcu_is_watching(void)
92436 {
92437- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92438+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92439 }
92440
92441 /**
92442@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
92443 static int dyntick_save_progress_counter(struct rcu_data *rdp,
92444 bool *isidle, unsigned long *maxj)
92445 {
92446- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
92447+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92448 rcu_sysidle_check_cpu(rdp, isidle, maxj);
92449 if ((rdp->dynticks_snap & 0x1) == 0) {
92450 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
92451@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
92452 } else {
92453 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
92454 rdp->mynode->gpnum))
92455- ACCESS_ONCE(rdp->gpwrap) = true;
92456+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
92457 return 0;
92458 }
92459 }
92460@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92461 int *rcrmp;
92462 unsigned int snap;
92463
92464- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
92465+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92466 snap = (unsigned int)rdp->dynticks_snap;
92467
92468 /*
92469@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92470 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
92471 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
92472 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
92473- ACCESS_ONCE(rdp->cond_resched_completed) =
92474+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92475 ACCESS_ONCE(rdp->mynode->completed);
92476 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92477- ACCESS_ONCE(*rcrmp) =
92478+ ACCESS_ONCE_RW(*rcrmp) =
92479 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92480 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92481 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92482@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92483 rsp->gp_start = j;
92484 smp_wmb(); /* Record start time before stall time. */
92485 j1 = rcu_jiffies_till_stall_check();
92486- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92487+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92488 rsp->jiffies_resched = j + j1 / 2;
92489 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
92490 }
92491@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
92492 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92493 return;
92494 }
92495- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92496+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92497 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92498
92499 /*
92500@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92501
92502 raw_spin_lock_irqsave(&rnp->lock, flags);
92503 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92504- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92505+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92506 3 * rcu_jiffies_till_stall_check() + 3;
92507 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92508
92509@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
92510 struct rcu_state *rsp;
92511
92512 for_each_rcu_flavor(rsp)
92513- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92514+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92515 }
92516
92517 /*
92518@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
92519 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
92520 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
92521 zero_cpu_stall_ticks(rdp);
92522- ACCESS_ONCE(rdp->gpwrap) = false;
92523+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
92524 }
92525 return ret;
92526 }
92527@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92528 struct rcu_data *rdp;
92529 struct rcu_node *rnp = rcu_get_root(rsp);
92530
92531- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92532+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92533 rcu_bind_gp_kthread();
92534 raw_spin_lock_irq(&rnp->lock);
92535 smp_mb__after_unlock_lock();
92536@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92537 raw_spin_unlock_irq(&rnp->lock);
92538 return 0;
92539 }
92540- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92541+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92542
92543 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92544 /*
92545@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92546 rdp = this_cpu_ptr(rsp->rda);
92547 rcu_preempt_check_blocked_tasks(rnp);
92548 rnp->qsmask = rnp->qsmaskinit;
92549- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92550+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92551 WARN_ON_ONCE(rnp->completed != rsp->completed);
92552- ACCESS_ONCE(rnp->completed) = rsp->completed;
92553+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92554 if (rnp == rdp->mynode)
92555 (void)__note_gp_changes(rsp, rnp, rdp);
92556 rcu_preempt_boost_start_gp(rnp);
92557@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92558 rnp->grphi, rnp->qsmask);
92559 raw_spin_unlock_irq(&rnp->lock);
92560 cond_resched_rcu_qs();
92561- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92562+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92563 }
92564
92565 mutex_unlock(&rsp->onoff_mutex);
92566@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92567 unsigned long maxj;
92568 struct rcu_node *rnp = rcu_get_root(rsp);
92569
92570- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92571+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92572 rsp->n_force_qs++;
92573 if (fqs_state == RCU_SAVE_DYNTICK) {
92574 /* Collect dyntick-idle snapshots. */
92575@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92576 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92577 raw_spin_lock_irq(&rnp->lock);
92578 smp_mb__after_unlock_lock();
92579- ACCESS_ONCE(rsp->gp_flags) =
92580+ ACCESS_ONCE_RW(rsp->gp_flags) =
92581 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
92582 raw_spin_unlock_irq(&rnp->lock);
92583 }
92584@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92585 struct rcu_data *rdp;
92586 struct rcu_node *rnp = rcu_get_root(rsp);
92587
92588- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92589+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92590 raw_spin_lock_irq(&rnp->lock);
92591 smp_mb__after_unlock_lock();
92592 gp_duration = jiffies - rsp->gp_start;
92593@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92594 rcu_for_each_node_breadth_first(rsp, rnp) {
92595 raw_spin_lock_irq(&rnp->lock);
92596 smp_mb__after_unlock_lock();
92597- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92598+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92599 rdp = this_cpu_ptr(rsp->rda);
92600 if (rnp == rdp->mynode)
92601 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92602@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92603 nocb += rcu_future_gp_cleanup(rsp, rnp);
92604 raw_spin_unlock_irq(&rnp->lock);
92605 cond_resched_rcu_qs();
92606- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92607+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92608 }
92609 rnp = rcu_get_root(rsp);
92610 raw_spin_lock_irq(&rnp->lock);
92611@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92612 rcu_nocb_gp_set(rnp, nocb);
92613
92614 /* Declare grace period done. */
92615- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92616+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92617 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92618 rsp->fqs_state = RCU_GP_IDLE;
92619 rdp = this_cpu_ptr(rsp->rda);
92620 /* Advance CBs to reduce false positives below. */
92621 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92622 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92623- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92624+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92625 trace_rcu_grace_period(rsp->name,
92626 ACCESS_ONCE(rsp->gpnum),
92627 TPS("newreq"));
92628@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
92629 if (rcu_gp_init(rsp))
92630 break;
92631 cond_resched_rcu_qs();
92632- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92633+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92634 WARN_ON(signal_pending(current));
92635 trace_rcu_grace_period(rsp->name,
92636 ACCESS_ONCE(rsp->gpnum),
92637@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
92638 ACCESS_ONCE(rsp->gpnum),
92639 TPS("fqsend"));
92640 cond_resched_rcu_qs();
92641- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92642+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92643 } else {
92644 /* Deal with stray signal. */
92645 cond_resched_rcu_qs();
92646- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92647+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92648 WARN_ON(signal_pending(current));
92649 trace_rcu_grace_period(rsp->name,
92650 ACCESS_ONCE(rsp->gpnum),
92651@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92652 */
92653 return false;
92654 }
92655- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92656+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92657 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92658 TPS("newreq"));
92659
92660@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92661 rsp->qlen += rdp->qlen;
92662 rdp->n_cbs_orphaned += rdp->qlen;
92663 rdp->qlen_lazy = 0;
92664- ACCESS_ONCE(rdp->qlen) = 0;
92665+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92666 }
92667
92668 /*
92669@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92670 }
92671 smp_mb(); /* List handling before counting for rcu_barrier(). */
92672 rdp->qlen_lazy -= count_lazy;
92673- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92674+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92675 rdp->n_cbs_invoked += count;
92676
92677 /* Reinstate batch limit if we have worked down the excess. */
92678@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92679 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92680 return; /* Someone beat us to it. */
92681 }
92682- ACCESS_ONCE(rsp->gp_flags) =
92683+ ACCESS_ONCE_RW(rsp->gp_flags) =
92684 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92685 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92686 rcu_gp_kthread_wake(rsp);
92687@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92688 /*
92689 * Do RCU core processing for the current CPU.
92690 */
92691-static void rcu_process_callbacks(struct softirq_action *unused)
92692+static void rcu_process_callbacks(void)
92693 {
92694 struct rcu_state *rsp;
92695
92696@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92697 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92698 if (debug_rcu_head_queue(head)) {
92699 /* Probable double call_rcu(), so leak the callback. */
92700- ACCESS_ONCE(head->func) = rcu_leak_callback;
92701+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92702 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92703 return;
92704 }
92705@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92706 local_irq_restore(flags);
92707 return;
92708 }
92709- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92710+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92711 if (lazy)
92712 rdp->qlen_lazy++;
92713 else
92714@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
92715 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92716 * course be required on a 64-bit system.
92717 */
92718- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92719+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92720 (ulong)atomic_long_read(&rsp->expedited_done) +
92721 ULONG_MAX / 8)) {
92722 synchronize_sched();
92723- atomic_long_inc(&rsp->expedited_wrap);
92724+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92725 return;
92726 }
92727
92728@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
92729 * Take a ticket. Note that atomic_inc_return() implies a
92730 * full memory barrier.
92731 */
92732- snap = atomic_long_inc_return(&rsp->expedited_start);
92733+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92734 firstsnap = snap;
92735 if (!try_get_online_cpus()) {
92736 /* CPU hotplug operation in flight, fall back to normal GP. */
92737 wait_rcu_gp(call_rcu_sched);
92738- atomic_long_inc(&rsp->expedited_normal);
92739+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92740 return;
92741 }
92742 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92743@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
92744 for_each_cpu(cpu, cm) {
92745 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92746
92747- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92748+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92749 cpumask_clear_cpu(cpu, cm);
92750 }
92751 if (cpumask_weight(cm) == 0)
92752@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
92753 synchronize_sched_expedited_cpu_stop,
92754 NULL) == -EAGAIN) {
92755 put_online_cpus();
92756- atomic_long_inc(&rsp->expedited_tryfail);
92757+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92758
92759 /* Check to see if someone else did our work for us. */
92760 s = atomic_long_read(&rsp->expedited_done);
92761 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92762 /* ensure test happens before caller kfree */
92763 smp_mb__before_atomic(); /* ^^^ */
92764- atomic_long_inc(&rsp->expedited_workdone1);
92765+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92766 free_cpumask_var(cm);
92767 return;
92768 }
92769@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
92770 udelay(trycount * num_online_cpus());
92771 } else {
92772 wait_rcu_gp(call_rcu_sched);
92773- atomic_long_inc(&rsp->expedited_normal);
92774+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92775 free_cpumask_var(cm);
92776 return;
92777 }
92778@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
92779 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92780 /* ensure test happens before caller kfree */
92781 smp_mb__before_atomic(); /* ^^^ */
92782- atomic_long_inc(&rsp->expedited_workdone2);
92783+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92784 free_cpumask_var(cm);
92785 return;
92786 }
92787@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
92788 if (!try_get_online_cpus()) {
92789 /* CPU hotplug operation in flight, use normal GP. */
92790 wait_rcu_gp(call_rcu_sched);
92791- atomic_long_inc(&rsp->expedited_normal);
92792+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92793 free_cpumask_var(cm);
92794 return;
92795 }
92796- snap = atomic_long_read(&rsp->expedited_start);
92797+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92798 smp_mb(); /* ensure read is before try_stop_cpus(). */
92799 }
92800- atomic_long_inc(&rsp->expedited_stoppedcpus);
92801+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92802
92803 all_cpus_idle:
92804 free_cpumask_var(cm);
92805@@ -3212,16 +3212,16 @@ all_cpus_idle:
92806 * than we did already did their update.
92807 */
92808 do {
92809- atomic_long_inc(&rsp->expedited_done_tries);
92810+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92811 s = atomic_long_read(&rsp->expedited_done);
92812 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92813 /* ensure test happens before caller kfree */
92814 smp_mb__before_atomic(); /* ^^^ */
92815- atomic_long_inc(&rsp->expedited_done_lost);
92816+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92817 break;
92818 }
92819 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92820- atomic_long_inc(&rsp->expedited_done_exit);
92821+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92822
92823 put_online_cpus();
92824 }
92825@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92826 * ACCESS_ONCE() to prevent the compiler from speculating
92827 * the increment to precede the early-exit check.
92828 */
92829- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92830+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92831 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92832 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92833 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92834@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92835
92836 /* Increment ->n_barrier_done to prevent duplicate work. */
92837 smp_mb(); /* Keep increment after above mechanism. */
92838- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92839+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92840 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92841 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92842 smp_mb(); /* Keep increment before caller's subsequent code. */
92843@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92844 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92845 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92846 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92847- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92848+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92849 rdp->cpu = cpu;
92850 rdp->rsp = rsp;
92851 rcu_boot_init_nocb_percpu_data(rdp);
92852@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92853 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92854 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92855 rcu_sysidle_init_percpu_data(rdp->dynticks);
92856- atomic_set(&rdp->dynticks->dynticks,
92857- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92858+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92859+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92860 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92861
92862 /* Add CPU to rcu_node bitmasks. */
92863diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92864index 119de39..f07d31a 100644
92865--- a/kernel/rcu/tree.h
92866+++ b/kernel/rcu/tree.h
92867@@ -86,11 +86,11 @@ struct rcu_dynticks {
92868 long long dynticks_nesting; /* Track irq/process nesting level. */
92869 /* Process level is worth LLONG_MAX/2. */
92870 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92871- atomic_t dynticks; /* Even value for idle, else odd. */
92872+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92873 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92874 long long dynticks_idle_nesting;
92875 /* irq/process nesting level from idle. */
92876- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92877+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92878 /* "Idle" excludes userspace execution. */
92879 unsigned long dynticks_idle_jiffies;
92880 /* End of last non-NMI non-idle period. */
92881@@ -457,17 +457,17 @@ struct rcu_state {
92882 /* _rcu_barrier(). */
92883 /* End of fields guarded by barrier_mutex. */
92884
92885- atomic_long_t expedited_start; /* Starting ticket. */
92886- atomic_long_t expedited_done; /* Done ticket. */
92887- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92888- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92889- atomic_long_t expedited_workdone1; /* # done by others #1. */
92890- atomic_long_t expedited_workdone2; /* # done by others #2. */
92891- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92892- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92893- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92894- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92895- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92896+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92897+ atomic_long_t expedited_done; /* Done ticket. */
92898+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92899+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92900+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92901+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92902+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92903+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92904+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92905+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92906+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92907
92908 unsigned long jiffies_force_qs; /* Time at which to invoke */
92909 /* force_quiescent_state(). */
92910diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92911index 0a571e9..fbfd611 100644
92912--- a/kernel/rcu/tree_plugin.h
92913+++ b/kernel/rcu/tree_plugin.h
92914@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92915 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92916 {
92917 return !rcu_preempted_readers_exp(rnp) &&
92918- ACCESS_ONCE(rnp->expmask) == 0;
92919+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92920 }
92921
92922 /*
92923@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
92924
92925 /* Clean up and exit. */
92926 smp_mb(); /* ensure expedited GP seen before counter increment. */
92927- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92928+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92929 sync_rcu_preempt_exp_count + 1;
92930 unlock_mb_ret:
92931 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92932@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92933 free_cpumask_var(cm);
92934 }
92935
92936-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92937+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92938 .store = &rcu_cpu_kthread_task,
92939 .thread_should_run = rcu_cpu_kthread_should_run,
92940 .thread_fn = rcu_cpu_kthread,
92941@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92942 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92943 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
92944 cpu, ticks_value, ticks_title,
92945- atomic_read(&rdtp->dynticks) & 0xfff,
92946+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92947 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92948 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92949 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
92950@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92951 return;
92952 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92953 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92954- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92955+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92956 wake_up(&rdp_leader->nocb_wq);
92957 }
92958 }
92959@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92960 atomic_long_add(rhcount, &rdp->nocb_q_count);
92961 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
92962 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92963- ACCESS_ONCE(*old_rhpp) = rhp;
92964+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92965 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92966 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92967
92968@@ -2167,7 +2167,7 @@ wait_again:
92969 continue; /* No CBs here, try next follower. */
92970
92971 /* Move callbacks to wait-for-GP list, which is empty. */
92972- ACCESS_ONCE(rdp->nocb_head) = NULL;
92973+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92974 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92975 gotcbs = true;
92976 }
92977@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
92978 list = ACCESS_ONCE(rdp->nocb_follower_head);
92979 BUG_ON(!list);
92980 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92981- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92982+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92983 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92984
92985 /* Each pass through the following loop invokes a callback. */
92986@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92987 if (!rcu_nocb_need_deferred_wakeup(rdp))
92988 return;
92989 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92990- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92991+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92992 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92993 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92994 }
92995@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92996 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92997 "rcuo%c/%d", rsp->abbr, cpu);
92998 BUG_ON(IS_ERR(t));
92999- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
93000+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
93001 }
93002
93003 /*
93004@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
93005
93006 /* Record start of fully idle period. */
93007 j = jiffies;
93008- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
93009+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
93010 smp_mb__before_atomic();
93011- atomic_inc(&rdtp->dynticks_idle);
93012+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93013 smp_mb__after_atomic();
93014- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
93015+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
93016 }
93017
93018 /*
93019@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
93020
93021 /* Record end of idle period. */
93022 smp_mb__before_atomic();
93023- atomic_inc(&rdtp->dynticks_idle);
93024+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93025 smp_mb__after_atomic();
93026- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
93027+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
93028
93029 /*
93030 * If we are the timekeeping CPU, we are permitted to be non-idle
93031@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
93032 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
93033
93034 /* Pick up current idle and NMI-nesting counter and check. */
93035- cur = atomic_read(&rdtp->dynticks_idle);
93036+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
93037 if (cur & 0x1) {
93038 *isidle = false; /* We are not idle! */
93039 return;
93040@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
93041 case RCU_SYSIDLE_NOT:
93042
93043 /* First time all are idle, so note a short idle period. */
93044- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93045+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93046 break;
93047
93048 case RCU_SYSIDLE_SHORT:
93049@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
93050 {
93051 smp_mb();
93052 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
93053- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
93054+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
93055 }
93056
93057 /*
93058@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
93059 smp_mb(); /* grace period precedes setting inuse. */
93060
93061 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
93062- ACCESS_ONCE(rshp->inuse) = 0;
93063+ ACCESS_ONCE_RW(rshp->inuse) = 0;
93064 }
93065
93066 /*
93067@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
93068 static void rcu_dynticks_task_enter(void)
93069 {
93070 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
93071- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
93072+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
93073 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
93074 }
93075
93076@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
93077 static void rcu_dynticks_task_exit(void)
93078 {
93079 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
93080- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
93081+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
93082 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
93083 }
93084diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
93085index fbb6240..f6c5097 100644
93086--- a/kernel/rcu/tree_trace.c
93087+++ b/kernel/rcu/tree_trace.c
93088@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
93089 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
93090 rdp->qs_pending);
93091 seq_printf(m, " dt=%d/%llx/%d df=%lu",
93092- atomic_read(&rdp->dynticks->dynticks),
93093+ atomic_read_unchecked(&rdp->dynticks->dynticks),
93094 rdp->dynticks->dynticks_nesting,
93095 rdp->dynticks->dynticks_nmi_nesting,
93096 rdp->dynticks_fqs);
93097@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
93098 struct rcu_state *rsp = (struct rcu_state *)m->private;
93099
93100 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
93101- atomic_long_read(&rsp->expedited_start),
93102+ atomic_long_read_unchecked(&rsp->expedited_start),
93103 atomic_long_read(&rsp->expedited_done),
93104- atomic_long_read(&rsp->expedited_wrap),
93105- atomic_long_read(&rsp->expedited_tryfail),
93106- atomic_long_read(&rsp->expedited_workdone1),
93107- atomic_long_read(&rsp->expedited_workdone2),
93108- atomic_long_read(&rsp->expedited_normal),
93109- atomic_long_read(&rsp->expedited_stoppedcpus),
93110- atomic_long_read(&rsp->expedited_done_tries),
93111- atomic_long_read(&rsp->expedited_done_lost),
93112- atomic_long_read(&rsp->expedited_done_exit));
93113+ atomic_long_read_unchecked(&rsp->expedited_wrap),
93114+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
93115+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
93116+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
93117+ atomic_long_read_unchecked(&rsp->expedited_normal),
93118+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
93119+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
93120+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
93121+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
93122 return 0;
93123 }
93124
93125diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
93126index e0d31a3..f4dafe3 100644
93127--- a/kernel/rcu/update.c
93128+++ b/kernel/rcu/update.c
93129@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
93130 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
93131 */
93132 if (till_stall_check < 3) {
93133- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
93134+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
93135 till_stall_check = 3;
93136 } else if (till_stall_check > 300) {
93137- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
93138+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
93139 till_stall_check = 300;
93140 }
93141 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
93142@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
93143 !ACCESS_ONCE(t->on_rq) ||
93144 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
93145 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
93146- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
93147+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
93148 list_del_init(&t->rcu_tasks_holdout_list);
93149 put_task_struct(t);
93150 return;
93151@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
93152 !is_idle_task(t)) {
93153 get_task_struct(t);
93154 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
93155- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
93156+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
93157 list_add(&t->rcu_tasks_holdout_list,
93158 &rcu_tasks_holdouts);
93159 }
93160@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
93161 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
93162 BUG_ON(IS_ERR(t));
93163 smp_mb(); /* Ensure others see full kthread. */
93164- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
93165+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
93166 mutex_unlock(&rcu_tasks_kthread_mutex);
93167 }
93168
93169diff --git a/kernel/resource.c b/kernel/resource.c
93170index 19f2357..ebe7f35 100644
93171--- a/kernel/resource.c
93172+++ b/kernel/resource.c
93173@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
93174
93175 static int __init ioresources_init(void)
93176 {
93177+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93178+#ifdef CONFIG_GRKERNSEC_PROC_USER
93179+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93180+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93181+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93182+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93183+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93184+#endif
93185+#else
93186 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93187 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93188+#endif
93189 return 0;
93190 }
93191 __initcall(ioresources_init);
93192diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
93193index eae160d..c9aa22e 100644
93194--- a/kernel/sched/auto_group.c
93195+++ b/kernel/sched/auto_group.c
93196@@ -11,7 +11,7 @@
93197
93198 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
93199 static struct autogroup autogroup_default;
93200-static atomic_t autogroup_seq_nr;
93201+static atomic_unchecked_t autogroup_seq_nr;
93202
93203 void __init autogroup_init(struct task_struct *init_task)
93204 {
93205@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
93206
93207 kref_init(&ag->kref);
93208 init_rwsem(&ag->lock);
93209- ag->id = atomic_inc_return(&autogroup_seq_nr);
93210+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
93211 ag->tg = tg;
93212 #ifdef CONFIG_RT_GROUP_SCHED
93213 /*
93214diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
93215index 8d0f35d..c16360d 100644
93216--- a/kernel/sched/completion.c
93217+++ b/kernel/sched/completion.c
93218@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
93219 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93220 * or number of jiffies left till timeout) if completed.
93221 */
93222-long __sched
93223+long __sched __intentional_overflow(-1)
93224 wait_for_completion_interruptible_timeout(struct completion *x,
93225 unsigned long timeout)
93226 {
93227@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
93228 *
93229 * Return: -ERESTARTSYS if interrupted, 0 if completed.
93230 */
93231-int __sched wait_for_completion_killable(struct completion *x)
93232+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
93233 {
93234 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
93235 if (t == -ERESTARTSYS)
93236@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
93237 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93238 * or number of jiffies left till timeout) if completed.
93239 */
93240-long __sched
93241+long __sched __intentional_overflow(-1)
93242 wait_for_completion_killable_timeout(struct completion *x,
93243 unsigned long timeout)
93244 {
93245diff --git a/kernel/sched/core.c b/kernel/sched/core.c
93246index 62671f5..7b3505b 100644
93247--- a/kernel/sched/core.c
93248+++ b/kernel/sched/core.c
93249@@ -1847,7 +1847,7 @@ void set_numabalancing_state(bool enabled)
93250 int sysctl_numa_balancing(struct ctl_table *table, int write,
93251 void __user *buffer, size_t *lenp, loff_t *ppos)
93252 {
93253- struct ctl_table t;
93254+ ctl_table_no_const t;
93255 int err;
93256 int state = numabalancing_enabled;
93257
93258@@ -2297,8 +2297,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
93259 next->active_mm = oldmm;
93260 atomic_inc(&oldmm->mm_count);
93261 enter_lazy_tlb(oldmm, next);
93262- } else
93263+ } else {
93264 switch_mm(oldmm, mm, next);
93265+ populate_stack();
93266+ }
93267
93268 if (!prev->mm) {
93269 prev->active_mm = NULL;
93270@@ -3109,6 +3111,8 @@ int can_nice(const struct task_struct *p, const int nice)
93271 /* convert nice value [19,-20] to rlimit style value [1,40] */
93272 int nice_rlim = nice_to_rlimit(nice);
93273
93274+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
93275+
93276 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
93277 capable(CAP_SYS_NICE));
93278 }
93279@@ -3135,7 +3139,8 @@ SYSCALL_DEFINE1(nice, int, increment)
93280 nice = task_nice(current) + increment;
93281
93282 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
93283- if (increment < 0 && !can_nice(current, nice))
93284+ if (increment < 0 && (!can_nice(current, nice) ||
93285+ gr_handle_chroot_nice()))
93286 return -EPERM;
93287
93288 retval = security_task_setnice(current, nice);
93289@@ -3444,6 +3449,7 @@ recheck:
93290 if (policy != p->policy && !rlim_rtprio)
93291 return -EPERM;
93292
93293+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
93294 /* can't increase priority */
93295 if (attr->sched_priority > p->rt_priority &&
93296 attr->sched_priority > rlim_rtprio)
93297@@ -4931,6 +4937,7 @@ void idle_task_exit(void)
93298
93299 if (mm != &init_mm) {
93300 switch_mm(mm, &init_mm, current);
93301+ populate_stack();
93302 finish_arch_post_lock_switch();
93303 }
93304 mmdrop(mm);
93305@@ -5026,7 +5033,7 @@ static void migrate_tasks(unsigned int dead_cpu)
93306
93307 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
93308
93309-static struct ctl_table sd_ctl_dir[] = {
93310+static ctl_table_no_const sd_ctl_dir[] __read_only = {
93311 {
93312 .procname = "sched_domain",
93313 .mode = 0555,
93314@@ -5043,17 +5050,17 @@ static struct ctl_table sd_ctl_root[] = {
93315 {}
93316 };
93317
93318-static struct ctl_table *sd_alloc_ctl_entry(int n)
93319+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
93320 {
93321- struct ctl_table *entry =
93322+ ctl_table_no_const *entry =
93323 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
93324
93325 return entry;
93326 }
93327
93328-static void sd_free_ctl_entry(struct ctl_table **tablep)
93329+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
93330 {
93331- struct ctl_table *entry;
93332+ ctl_table_no_const *entry;
93333
93334 /*
93335 * In the intermediate directories, both the child directory and
93336@@ -5061,22 +5068,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
93337 * will always be set. In the lowest directory the names are
93338 * static strings and all have proc handlers.
93339 */
93340- for (entry = *tablep; entry->mode; entry++) {
93341- if (entry->child)
93342- sd_free_ctl_entry(&entry->child);
93343+ for (entry = tablep; entry->mode; entry++) {
93344+ if (entry->child) {
93345+ sd_free_ctl_entry(entry->child);
93346+ pax_open_kernel();
93347+ entry->child = NULL;
93348+ pax_close_kernel();
93349+ }
93350 if (entry->proc_handler == NULL)
93351 kfree(entry->procname);
93352 }
93353
93354- kfree(*tablep);
93355- *tablep = NULL;
93356+ kfree(tablep);
93357 }
93358
93359 static int min_load_idx = 0;
93360 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
93361
93362 static void
93363-set_table_entry(struct ctl_table *entry,
93364+set_table_entry(ctl_table_no_const *entry,
93365 const char *procname, void *data, int maxlen,
93366 umode_t mode, proc_handler *proc_handler,
93367 bool load_idx)
93368@@ -5096,7 +5106,7 @@ set_table_entry(struct ctl_table *entry,
93369 static struct ctl_table *
93370 sd_alloc_ctl_domain_table(struct sched_domain *sd)
93371 {
93372- struct ctl_table *table = sd_alloc_ctl_entry(14);
93373+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
93374
93375 if (table == NULL)
93376 return NULL;
93377@@ -5134,9 +5144,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
93378 return table;
93379 }
93380
93381-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
93382+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
93383 {
93384- struct ctl_table *entry, *table;
93385+ ctl_table_no_const *entry, *table;
93386 struct sched_domain *sd;
93387 int domain_num = 0, i;
93388 char buf[32];
93389@@ -5163,11 +5173,13 @@ static struct ctl_table_header *sd_sysctl_header;
93390 static void register_sched_domain_sysctl(void)
93391 {
93392 int i, cpu_num = num_possible_cpus();
93393- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
93394+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
93395 char buf[32];
93396
93397 WARN_ON(sd_ctl_dir[0].child);
93398+ pax_open_kernel();
93399 sd_ctl_dir[0].child = entry;
93400+ pax_close_kernel();
93401
93402 if (entry == NULL)
93403 return;
93404@@ -5190,8 +5202,12 @@ static void unregister_sched_domain_sysctl(void)
93405 if (sd_sysctl_header)
93406 unregister_sysctl_table(sd_sysctl_header);
93407 sd_sysctl_header = NULL;
93408- if (sd_ctl_dir[0].child)
93409- sd_free_ctl_entry(&sd_ctl_dir[0].child);
93410+ if (sd_ctl_dir[0].child) {
93411+ sd_free_ctl_entry(sd_ctl_dir[0].child);
93412+ pax_open_kernel();
93413+ sd_ctl_dir[0].child = NULL;
93414+ pax_close_kernel();
93415+ }
93416 }
93417 #else
93418 static void register_sched_domain_sysctl(void)
93419diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
93420index 241213b..6a64c91 100644
93421--- a/kernel/sched/fair.c
93422+++ b/kernel/sched/fair.c
93423@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
93424
93425 static void reset_ptenuma_scan(struct task_struct *p)
93426 {
93427- ACCESS_ONCE(p->mm->numa_scan_seq)++;
93428+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
93429 p->mm->numa_scan_offset = 0;
93430 }
93431
93432@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
93433 * run_rebalance_domains is triggered when needed from the scheduler tick.
93434 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
93435 */
93436-static void run_rebalance_domains(struct softirq_action *h)
93437+static __latent_entropy void run_rebalance_domains(void)
93438 {
93439 struct rq *this_rq = this_rq();
93440 enum cpu_idle_type idle = this_rq->idle_balance ?
93441diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
93442index dc0f435..ae2e085 100644
93443--- a/kernel/sched/sched.h
93444+++ b/kernel/sched/sched.h
93445@@ -1200,7 +1200,7 @@ struct sched_class {
93446 #ifdef CONFIG_FAIR_GROUP_SCHED
93447 void (*task_move_group) (struct task_struct *p, int on_rq);
93448 #endif
93449-};
93450+} __do_const;
93451
93452 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
93453 {
93454diff --git a/kernel/signal.c b/kernel/signal.c
93455index a390499..ebe9a21 100644
93456--- a/kernel/signal.c
93457+++ b/kernel/signal.c
93458@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
93459
93460 int print_fatal_signals __read_mostly;
93461
93462-static void __user *sig_handler(struct task_struct *t, int sig)
93463+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93464 {
93465 return t->sighand->action[sig - 1].sa.sa_handler;
93466 }
93467
93468-static int sig_handler_ignored(void __user *handler, int sig)
93469+static int sig_handler_ignored(__sighandler_t handler, int sig)
93470 {
93471 /* Is it explicitly or implicitly ignored? */
93472 return handler == SIG_IGN ||
93473@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93474
93475 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
93476 {
93477- void __user *handler;
93478+ __sighandler_t handler;
93479
93480 handler = sig_handler(t, sig);
93481
93482@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
93483 atomic_inc(&user->sigpending);
93484 rcu_read_unlock();
93485
93486+ if (!override_rlimit)
93487+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93488+
93489 if (override_rlimit ||
93490 atomic_read(&user->sigpending) <=
93491 task_rlimit(t, RLIMIT_SIGPENDING)) {
93492@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93493
93494 int unhandled_signal(struct task_struct *tsk, int sig)
93495 {
93496- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93497+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93498 if (is_global_init(tsk))
93499 return 1;
93500 if (handler != SIG_IGN && handler != SIG_DFL)
93501@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93502 }
93503 }
93504
93505+ /* allow glibc communication via tgkill to other threads in our
93506+ thread group */
93507+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93508+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93509+ && gr_handle_signal(t, sig))
93510+ return -EPERM;
93511+
93512 return security_task_kill(t, info, sig, 0);
93513 }
93514
93515@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93516 return send_signal(sig, info, p, 1);
93517 }
93518
93519-static int
93520+int
93521 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93522 {
93523 return send_signal(sig, info, t, 0);
93524@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93525 unsigned long int flags;
93526 int ret, blocked, ignored;
93527 struct k_sigaction *action;
93528+ int is_unhandled = 0;
93529
93530 spin_lock_irqsave(&t->sighand->siglock, flags);
93531 action = &t->sighand->action[sig-1];
93532@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93533 }
93534 if (action->sa.sa_handler == SIG_DFL)
93535 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93536+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93537+ is_unhandled = 1;
93538 ret = specific_send_sig_info(sig, info, t);
93539 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93540
93541+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93542+ normal operation */
93543+ if (is_unhandled) {
93544+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93545+ gr_handle_crash(t, sig);
93546+ }
93547+
93548 return ret;
93549 }
93550
93551@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93552 ret = check_kill_permission(sig, info, p);
93553 rcu_read_unlock();
93554
93555- if (!ret && sig)
93556+ if (!ret && sig) {
93557 ret = do_send_sig_info(sig, info, p, true);
93558+ if (!ret)
93559+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93560+ }
93561
93562 return ret;
93563 }
93564@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93565 int error = -ESRCH;
93566
93567 rcu_read_lock();
93568- p = find_task_by_vpid(pid);
93569+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93570+ /* allow glibc communication via tgkill to other threads in our
93571+ thread group */
93572+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93573+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93574+ p = find_task_by_vpid_unrestricted(pid);
93575+ else
93576+#endif
93577+ p = find_task_by_vpid(pid);
93578 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93579 error = check_kill_permission(sig, info, p);
93580 /*
93581@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93582 }
93583 seg = get_fs();
93584 set_fs(KERNEL_DS);
93585- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93586- (stack_t __force __user *) &uoss,
93587+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93588+ (stack_t __force_user *) &uoss,
93589 compat_user_stack_pointer());
93590 set_fs(seg);
93591 if (ret >= 0 && uoss_ptr) {
93592diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93593index 40190f2..8861d40 100644
93594--- a/kernel/smpboot.c
93595+++ b/kernel/smpboot.c
93596@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93597 }
93598 smpboot_unpark_thread(plug_thread, cpu);
93599 }
93600- list_add(&plug_thread->list, &hotplug_threads);
93601+ pax_list_add(&plug_thread->list, &hotplug_threads);
93602 out:
93603 mutex_unlock(&smpboot_threads_lock);
93604 put_online_cpus();
93605@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93606 {
93607 get_online_cpus();
93608 mutex_lock(&smpboot_threads_lock);
93609- list_del(&plug_thread->list);
93610+ pax_list_del(&plug_thread->list);
93611 smpboot_destroy_threads(plug_thread);
93612 mutex_unlock(&smpboot_threads_lock);
93613 put_online_cpus();
93614diff --git a/kernel/softirq.c b/kernel/softirq.c
93615index 479e443..66d845e1 100644
93616--- a/kernel/softirq.c
93617+++ b/kernel/softirq.c
93618@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93619 EXPORT_SYMBOL(irq_stat);
93620 #endif
93621
93622-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93623+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93624
93625 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93626
93627@@ -270,7 +270,7 @@ restart:
93628 kstat_incr_softirqs_this_cpu(vec_nr);
93629
93630 trace_softirq_entry(vec_nr);
93631- h->action(h);
93632+ h->action();
93633 trace_softirq_exit(vec_nr);
93634 if (unlikely(prev_count != preempt_count())) {
93635 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93636@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93637 or_softirq_pending(1UL << nr);
93638 }
93639
93640-void open_softirq(int nr, void (*action)(struct softirq_action *))
93641+void __init open_softirq(int nr, void (*action)(void))
93642 {
93643 softirq_vec[nr].action = action;
93644 }
93645@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93646 }
93647 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93648
93649-static void tasklet_action(struct softirq_action *a)
93650+static void tasklet_action(void)
93651 {
93652 struct tasklet_struct *list;
93653
93654@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
93655 }
93656 }
93657
93658-static void tasklet_hi_action(struct softirq_action *a)
93659+static __latent_entropy void tasklet_hi_action(void)
93660 {
93661 struct tasklet_struct *list;
93662
93663@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
93664 .notifier_call = cpu_callback
93665 };
93666
93667-static struct smp_hotplug_thread softirq_threads = {
93668+static struct smp_hotplug_thread softirq_threads __read_only = {
93669 .store = &ksoftirqd,
93670 .thread_should_run = ksoftirqd_should_run,
93671 .thread_fn = run_ksoftirqd,
93672diff --git a/kernel/sys.c b/kernel/sys.c
93673index a03d9cd..55dbe9c 100644
93674--- a/kernel/sys.c
93675+++ b/kernel/sys.c
93676@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93677 error = -EACCES;
93678 goto out;
93679 }
93680+
93681+ if (gr_handle_chroot_setpriority(p, niceval)) {
93682+ error = -EACCES;
93683+ goto out;
93684+ }
93685+
93686 no_nice = security_task_setnice(p, niceval);
93687 if (no_nice) {
93688 error = no_nice;
93689@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93690 goto error;
93691 }
93692
93693+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93694+ goto error;
93695+
93696+ if (!gid_eq(new->gid, old->gid)) {
93697+ /* make sure we generate a learn log for what will
93698+ end up being a role transition after a full-learning
93699+ policy is generated
93700+ CAP_SETGID is required to perform a transition
93701+ we may not log a CAP_SETGID check above, e.g.
93702+ in the case where new rgid = old egid
93703+ */
93704+ gr_learn_cap(current, new, CAP_SETGID);
93705+ }
93706+
93707 if (rgid != (gid_t) -1 ||
93708 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93709 new->sgid = new->egid;
93710@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93711 old = current_cred();
93712
93713 retval = -EPERM;
93714+
93715+ if (gr_check_group_change(kgid, kgid, kgid))
93716+ goto error;
93717+
93718 if (ns_capable(old->user_ns, CAP_SETGID))
93719 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93720 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93721@@ -417,7 +441,7 @@ error:
93722 /*
93723 * change the user struct in a credentials set to match the new UID
93724 */
93725-static int set_user(struct cred *new)
93726+int set_user(struct cred *new)
93727 {
93728 struct user_struct *new_user;
93729
93730@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93731 goto error;
93732 }
93733
93734+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93735+ goto error;
93736+
93737 if (!uid_eq(new->uid, old->uid)) {
93738+ /* make sure we generate a learn log for what will
93739+ end up being a role transition after a full-learning
93740+ policy is generated
93741+ CAP_SETUID is required to perform a transition
93742+ we may not log a CAP_SETUID check above, e.g.
93743+ in the case where new ruid = old euid
93744+ */
93745+ gr_learn_cap(current, new, CAP_SETUID);
93746 retval = set_user(new);
93747 if (retval < 0)
93748 goto error;
93749@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93750 old = current_cred();
93751
93752 retval = -EPERM;
93753+
93754+ if (gr_check_crash_uid(kuid))
93755+ goto error;
93756+ if (gr_check_user_change(kuid, kuid, kuid))
93757+ goto error;
93758+
93759 if (ns_capable(old->user_ns, CAP_SETUID)) {
93760 new->suid = new->uid = kuid;
93761 if (!uid_eq(kuid, old->uid)) {
93762@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93763 goto error;
93764 }
93765
93766+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93767+ goto error;
93768+
93769 if (ruid != (uid_t) -1) {
93770 new->uid = kruid;
93771 if (!uid_eq(kruid, old->uid)) {
93772@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93773 goto error;
93774 }
93775
93776+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93777+ goto error;
93778+
93779 if (rgid != (gid_t) -1)
93780 new->gid = krgid;
93781 if (egid != (gid_t) -1)
93782@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93783 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93784 ns_capable(old->user_ns, CAP_SETUID)) {
93785 if (!uid_eq(kuid, old->fsuid)) {
93786+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93787+ goto error;
93788+
93789 new->fsuid = kuid;
93790 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93791 goto change_okay;
93792 }
93793 }
93794
93795+error:
93796 abort_creds(new);
93797 return old_fsuid;
93798
93799@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93800 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93801 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93802 ns_capable(old->user_ns, CAP_SETGID)) {
93803+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93804+ goto error;
93805+
93806 if (!gid_eq(kgid, old->fsgid)) {
93807 new->fsgid = kgid;
93808 goto change_okay;
93809 }
93810 }
93811
93812+error:
93813 abort_creds(new);
93814 return old_fsgid;
93815
93816@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93817 return -EFAULT;
93818
93819 down_read(&uts_sem);
93820- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93821+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93822 __OLD_UTS_LEN);
93823 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93824- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93825+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93826 __OLD_UTS_LEN);
93827 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93828- error |= __copy_to_user(&name->release, &utsname()->release,
93829+ error |= __copy_to_user(name->release, &utsname()->release,
93830 __OLD_UTS_LEN);
93831 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93832- error |= __copy_to_user(&name->version, &utsname()->version,
93833+ error |= __copy_to_user(name->version, &utsname()->version,
93834 __OLD_UTS_LEN);
93835 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93836- error |= __copy_to_user(&name->machine, &utsname()->machine,
93837+ error |= __copy_to_user(name->machine, &utsname()->machine,
93838 __OLD_UTS_LEN);
93839 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93840 up_read(&uts_sem);
93841@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93842 */
93843 new_rlim->rlim_cur = 1;
93844 }
93845+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93846+ is changed to a lower value. Since tasks can be created by the same
93847+ user in between this limit change and an execve by this task, force
93848+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93849+ */
93850+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93851+ tsk->flags |= PF_NPROC_EXCEEDED;
93852 }
93853 if (!retval) {
93854 if (old_rlim)
93855diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93856index ce410bb..cd276f0 100644
93857--- a/kernel/sysctl.c
93858+++ b/kernel/sysctl.c
93859@@ -94,7 +94,6 @@
93860
93861
93862 #if defined(CONFIG_SYSCTL)
93863-
93864 /* External variables not in a header file. */
93865 extern int max_threads;
93866 extern int suid_dumpable;
93867@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93868
93869 /* Constants used for minimum and maximum */
93870 #ifdef CONFIG_LOCKUP_DETECTOR
93871-static int sixty = 60;
93872+static int sixty __read_only = 60;
93873 #endif
93874
93875-static int __maybe_unused neg_one = -1;
93876+static int __maybe_unused neg_one __read_only = -1;
93877
93878-static int zero;
93879-static int __maybe_unused one = 1;
93880-static int __maybe_unused two = 2;
93881-static int __maybe_unused four = 4;
93882-static unsigned long one_ul = 1;
93883-static int one_hundred = 100;
93884+static int zero __read_only = 0;
93885+static int __maybe_unused one __read_only = 1;
93886+static int __maybe_unused two __read_only = 2;
93887+static int __maybe_unused three __read_only = 3;
93888+static int __maybe_unused four __read_only = 4;
93889+static unsigned long one_ul __read_only = 1;
93890+static int one_hundred __read_only = 100;
93891 #ifdef CONFIG_PRINTK
93892-static int ten_thousand = 10000;
93893+static int ten_thousand __read_only = 10000;
93894 #endif
93895
93896 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93897@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93898 void __user *buffer, size_t *lenp, loff_t *ppos);
93899 #endif
93900
93901-#ifdef CONFIG_PRINTK
93902 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93903 void __user *buffer, size_t *lenp, loff_t *ppos);
93904-#endif
93905
93906 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93907 void __user *buffer, size_t *lenp, loff_t *ppos);
93908@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93909
93910 #endif
93911
93912+extern struct ctl_table grsecurity_table[];
93913+
93914 static struct ctl_table kern_table[];
93915 static struct ctl_table vm_table[];
93916 static struct ctl_table fs_table[];
93917@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93918 int sysctl_legacy_va_layout;
93919 #endif
93920
93921+#ifdef CONFIG_PAX_SOFTMODE
93922+static struct ctl_table pax_table[] = {
93923+ {
93924+ .procname = "softmode",
93925+ .data = &pax_softmode,
93926+ .maxlen = sizeof(unsigned int),
93927+ .mode = 0600,
93928+ .proc_handler = &proc_dointvec,
93929+ },
93930+
93931+ { }
93932+};
93933+#endif
93934+
93935 /* The default sysctl tables: */
93936
93937 static struct ctl_table sysctl_base_table[] = {
93938@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93939 #endif
93940
93941 static struct ctl_table kern_table[] = {
93942+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93943+ {
93944+ .procname = "grsecurity",
93945+ .mode = 0500,
93946+ .child = grsecurity_table,
93947+ },
93948+#endif
93949+
93950+#ifdef CONFIG_PAX_SOFTMODE
93951+ {
93952+ .procname = "pax",
93953+ .mode = 0500,
93954+ .child = pax_table,
93955+ },
93956+#endif
93957+
93958 {
93959 .procname = "sched_child_runs_first",
93960 .data = &sysctl_sched_child_runs_first,
93961@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93962 .data = &modprobe_path,
93963 .maxlen = KMOD_PATH_LEN,
93964 .mode = 0644,
93965- .proc_handler = proc_dostring,
93966+ .proc_handler = proc_dostring_modpriv,
93967 },
93968 {
93969 .procname = "modules_disabled",
93970@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93971 .extra1 = &zero,
93972 .extra2 = &one,
93973 },
93974+#endif
93975 {
93976 .procname = "kptr_restrict",
93977 .data = &kptr_restrict,
93978 .maxlen = sizeof(int),
93979 .mode = 0644,
93980 .proc_handler = proc_dointvec_minmax_sysadmin,
93981+#ifdef CONFIG_GRKERNSEC_HIDESYM
93982+ .extra1 = &two,
93983+#else
93984 .extra1 = &zero,
93985+#endif
93986 .extra2 = &two,
93987 },
93988-#endif
93989 {
93990 .procname = "ngroups_max",
93991 .data = &ngroups_max,
93992@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93993 */
93994 {
93995 .procname = "perf_event_paranoid",
93996- .data = &sysctl_perf_event_paranoid,
93997- .maxlen = sizeof(sysctl_perf_event_paranoid),
93998+ .data = &sysctl_perf_event_legitimately_concerned,
93999+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
94000 .mode = 0644,
94001- .proc_handler = proc_dointvec,
94002+ /* go ahead, be a hero */
94003+ .proc_handler = proc_dointvec_minmax_sysadmin,
94004+ .extra1 = &neg_one,
94005+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
94006+ .extra2 = &three,
94007+#else
94008+ .extra2 = &two,
94009+#endif
94010 },
94011 {
94012 .procname = "perf_event_mlock_kb",
94013@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
94014 .proc_handler = proc_dointvec_minmax,
94015 .extra1 = &zero,
94016 },
94017+ {
94018+ .procname = "heap_stack_gap",
94019+ .data = &sysctl_heap_stack_gap,
94020+ .maxlen = sizeof(sysctl_heap_stack_gap),
94021+ .mode = 0644,
94022+ .proc_handler = proc_doulongvec_minmax,
94023+ },
94024 #else
94025 {
94026 .procname = "nr_trim_pages",
94027@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
94028 (char __user *)buffer, lenp, ppos);
94029 }
94030
94031+int proc_dostring_modpriv(struct ctl_table *table, int write,
94032+ void __user *buffer, size_t *lenp, loff_t *ppos)
94033+{
94034+ if (write && !capable(CAP_SYS_MODULE))
94035+ return -EPERM;
94036+
94037+ return _proc_do_string(table->data, table->maxlen, write,
94038+ buffer, lenp, ppos);
94039+}
94040+
94041 static size_t proc_skip_spaces(char **buf)
94042 {
94043 size_t ret;
94044@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
94045 len = strlen(tmp);
94046 if (len > *size)
94047 len = *size;
94048+ if (len > sizeof(tmp))
94049+ len = sizeof(tmp);
94050 if (copy_to_user(*buf, tmp, len))
94051 return -EFAULT;
94052 *size -= len;
94053@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
94054 static int proc_taint(struct ctl_table *table, int write,
94055 void __user *buffer, size_t *lenp, loff_t *ppos)
94056 {
94057- struct ctl_table t;
94058+ ctl_table_no_const t;
94059 unsigned long tmptaint = get_taint();
94060 int err;
94061
94062@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
94063 return err;
94064 }
94065
94066-#ifdef CONFIG_PRINTK
94067 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94068 void __user *buffer, size_t *lenp, loff_t *ppos)
94069 {
94070@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94071
94072 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
94073 }
94074-#endif
94075
94076 struct do_proc_dointvec_minmax_conv_param {
94077 int *min;
94078@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
94079 return -ENOSYS;
94080 }
94081
94082+int proc_dostring_modpriv(struct ctl_table *table, int write,
94083+ void __user *buffer, size_t *lenp, loff_t *ppos)
94084+{
94085+ return -ENOSYS;
94086+}
94087+
94088 int proc_dointvec(struct ctl_table *table, int write,
94089 void __user *buffer, size_t *lenp, loff_t *ppos)
94090 {
94091@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94092 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94093 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94094 EXPORT_SYMBOL(proc_dostring);
94095+EXPORT_SYMBOL(proc_dostring_modpriv);
94096 EXPORT_SYMBOL(proc_doulongvec_minmax);
94097 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94098diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94099index 21f82c2..c1984e5 100644
94100--- a/kernel/taskstats.c
94101+++ b/kernel/taskstats.c
94102@@ -28,9 +28,12 @@
94103 #include <linux/fs.h>
94104 #include <linux/file.h>
94105 #include <linux/pid_namespace.h>
94106+#include <linux/grsecurity.h>
94107 #include <net/genetlink.h>
94108 #include <linux/atomic.h>
94109
94110+extern int gr_is_taskstats_denied(int pid);
94111+
94112 /*
94113 * Maximum length of a cpumask that can be specified in
94114 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94115@@ -567,6 +570,9 @@ err:
94116
94117 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94118 {
94119+ if (gr_is_taskstats_denied(current->pid))
94120+ return -EACCES;
94121+
94122 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
94123 return cmd_attr_register_cpumask(info);
94124 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
94125diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
94126index 1b001ed..55ef9e4 100644
94127--- a/kernel/time/alarmtimer.c
94128+++ b/kernel/time/alarmtimer.c
94129@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
94130 struct platform_device *pdev;
94131 int error = 0;
94132 int i;
94133- struct k_clock alarm_clock = {
94134+ static struct k_clock alarm_clock = {
94135 .clock_getres = alarm_clock_getres,
94136 .clock_get = alarm_clock_get,
94137 .timer_create = alarm_timer_create,
94138diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
94139index bee0c1f..a23fe2d 100644
94140--- a/kernel/time/hrtimer.c
94141+++ b/kernel/time/hrtimer.c
94142@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
94143 local_irq_restore(flags);
94144 }
94145
94146-static void run_hrtimer_softirq(struct softirq_action *h)
94147+static __latent_entropy void run_hrtimer_softirq(void)
94148 {
94149 hrtimer_peek_ahead_timers();
94150 }
94151diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
94152index 0075da7..63cc872 100644
94153--- a/kernel/time/posix-cpu-timers.c
94154+++ b/kernel/time/posix-cpu-timers.c
94155@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
94156
94157 static __init int init_posix_cpu_timers(void)
94158 {
94159- struct k_clock process = {
94160+ static struct k_clock process = {
94161 .clock_getres = process_cpu_clock_getres,
94162 .clock_get = process_cpu_clock_get,
94163 .timer_create = process_cpu_timer_create,
94164 .nsleep = process_cpu_nsleep,
94165 .nsleep_restart = process_cpu_nsleep_restart,
94166 };
94167- struct k_clock thread = {
94168+ static struct k_clock thread = {
94169 .clock_getres = thread_cpu_clock_getres,
94170 .clock_get = thread_cpu_clock_get,
94171 .timer_create = thread_cpu_timer_create,
94172diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
94173index 31ea01f..7fc61ef 100644
94174--- a/kernel/time/posix-timers.c
94175+++ b/kernel/time/posix-timers.c
94176@@ -43,6 +43,7 @@
94177 #include <linux/hash.h>
94178 #include <linux/posix-clock.h>
94179 #include <linux/posix-timers.h>
94180+#include <linux/grsecurity.h>
94181 #include <linux/syscalls.h>
94182 #include <linux/wait.h>
94183 #include <linux/workqueue.h>
94184@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
94185 * which we beg off on and pass to do_sys_settimeofday().
94186 */
94187
94188-static struct k_clock posix_clocks[MAX_CLOCKS];
94189+static struct k_clock *posix_clocks[MAX_CLOCKS];
94190
94191 /*
94192 * These ones are defined below.
94193@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
94194 */
94195 static __init int init_posix_timers(void)
94196 {
94197- struct k_clock clock_realtime = {
94198+ static struct k_clock clock_realtime = {
94199 .clock_getres = hrtimer_get_res,
94200 .clock_get = posix_clock_realtime_get,
94201 .clock_set = posix_clock_realtime_set,
94202@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
94203 .timer_get = common_timer_get,
94204 .timer_del = common_timer_del,
94205 };
94206- struct k_clock clock_monotonic = {
94207+ static struct k_clock clock_monotonic = {
94208 .clock_getres = hrtimer_get_res,
94209 .clock_get = posix_ktime_get_ts,
94210 .nsleep = common_nsleep,
94211@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
94212 .timer_get = common_timer_get,
94213 .timer_del = common_timer_del,
94214 };
94215- struct k_clock clock_monotonic_raw = {
94216+ static struct k_clock clock_monotonic_raw = {
94217 .clock_getres = hrtimer_get_res,
94218 .clock_get = posix_get_monotonic_raw,
94219 };
94220- struct k_clock clock_realtime_coarse = {
94221+ static struct k_clock clock_realtime_coarse = {
94222 .clock_getres = posix_get_coarse_res,
94223 .clock_get = posix_get_realtime_coarse,
94224 };
94225- struct k_clock clock_monotonic_coarse = {
94226+ static struct k_clock clock_monotonic_coarse = {
94227 .clock_getres = posix_get_coarse_res,
94228 .clock_get = posix_get_monotonic_coarse,
94229 };
94230- struct k_clock clock_tai = {
94231+ static struct k_clock clock_tai = {
94232 .clock_getres = hrtimer_get_res,
94233 .clock_get = posix_get_tai,
94234 .nsleep = common_nsleep,
94235@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
94236 .timer_get = common_timer_get,
94237 .timer_del = common_timer_del,
94238 };
94239- struct k_clock clock_boottime = {
94240+ static struct k_clock clock_boottime = {
94241 .clock_getres = hrtimer_get_res,
94242 .clock_get = posix_get_boottime,
94243 .nsleep = common_nsleep,
94244@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
94245 return;
94246 }
94247
94248- posix_clocks[clock_id] = *new_clock;
94249+ posix_clocks[clock_id] = new_clock;
94250 }
94251 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
94252
94253@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
94254 return (id & CLOCKFD_MASK) == CLOCKFD ?
94255 &clock_posix_dynamic : &clock_posix_cpu;
94256
94257- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
94258+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
94259 return NULL;
94260- return &posix_clocks[id];
94261+ return posix_clocks[id];
94262 }
94263
94264 static int common_timer_create(struct k_itimer *new_timer)
94265@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
94266 struct k_clock *kc = clockid_to_kclock(which_clock);
94267 struct k_itimer *new_timer;
94268 int error, new_timer_id;
94269- sigevent_t event;
94270+ sigevent_t event = { };
94271 int it_id_set = IT_ID_NOT_SET;
94272
94273 if (!kc)
94274@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
94275 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
94276 return -EFAULT;
94277
94278+ /* only the CLOCK_REALTIME clock can be set, all other clocks
94279+ have their clock_set fptr set to a nosettime dummy function
94280+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
94281+ call common_clock_set, which calls do_sys_settimeofday, which
94282+ we hook
94283+ */
94284+
94285 return kc->clock_set(which_clock, &new_tp);
94286 }
94287
94288diff --git a/kernel/time/time.c b/kernel/time/time.c
94289index 2c85b77..6530536 100644
94290--- a/kernel/time/time.c
94291+++ b/kernel/time/time.c
94292@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
94293 return error;
94294
94295 if (tz) {
94296+ /* we log in do_settimeofday called below, so don't log twice
94297+ */
94298+ if (!tv)
94299+ gr_log_timechange();
94300+
94301 sys_tz = *tz;
94302 update_vsyscall_tz();
94303 if (firsttime) {
94304diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94305index 91db941..a371671 100644
94306--- a/kernel/time/timekeeping.c
94307+++ b/kernel/time/timekeeping.c
94308@@ -15,6 +15,7 @@
94309 #include <linux/init.h>
94310 #include <linux/mm.h>
94311 #include <linux/sched.h>
94312+#include <linux/grsecurity.h>
94313 #include <linux/syscore_ops.h>
94314 #include <linux/clocksource.h>
94315 #include <linux/jiffies.h>
94316@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
94317 if (!timespec64_valid_strict(ts))
94318 return -EINVAL;
94319
94320+ gr_log_timechange();
94321+
94322 raw_spin_lock_irqsave(&timekeeper_lock, flags);
94323 write_seqcount_begin(&tk_core.seq);
94324
94325diff --git a/kernel/time/timer.c b/kernel/time/timer.c
94326index 2d3f5c5..7ed7dc5 100644
94327--- a/kernel/time/timer.c
94328+++ b/kernel/time/timer.c
94329@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
94330 /*
94331 * This function runs timers and the timer-tq in bottom half context.
94332 */
94333-static void run_timer_softirq(struct softirq_action *h)
94334+static __latent_entropy void run_timer_softirq(void)
94335 {
94336 struct tvec_base *base = __this_cpu_read(tvec_bases);
94337
94338@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
94339 *
94340 * In all cases the return value is guaranteed to be non-negative.
94341 */
94342-signed long __sched schedule_timeout(signed long timeout)
94343+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
94344 {
94345 struct timer_list timer;
94346 unsigned long expire;
94347diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94348index 61ed862..3b52c65 100644
94349--- a/kernel/time/timer_list.c
94350+++ b/kernel/time/timer_list.c
94351@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94352
94353 static void print_name_offset(struct seq_file *m, void *sym)
94354 {
94355+#ifdef CONFIG_GRKERNSEC_HIDESYM
94356+ SEQ_printf(m, "<%p>", NULL);
94357+#else
94358 char symname[KSYM_NAME_LEN];
94359
94360 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94361 SEQ_printf(m, "<%pK>", sym);
94362 else
94363 SEQ_printf(m, "%s", symname);
94364+#endif
94365 }
94366
94367 static void
94368@@ -119,7 +123,11 @@ next_one:
94369 static void
94370 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94371 {
94372+#ifdef CONFIG_GRKERNSEC_HIDESYM
94373+ SEQ_printf(m, " .base: %p\n", NULL);
94374+#else
94375 SEQ_printf(m, " .base: %pK\n", base);
94376+#endif
94377 SEQ_printf(m, " .index: %d\n",
94378 base->index);
94379 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94380@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
94381 {
94382 struct proc_dir_entry *pe;
94383
94384+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94385+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94386+#else
94387 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94388+#endif
94389 if (!pe)
94390 return -ENOMEM;
94391 return 0;
94392diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94393index 1fb08f2..ca4bb1e 100644
94394--- a/kernel/time/timer_stats.c
94395+++ b/kernel/time/timer_stats.c
94396@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94397 static unsigned long nr_entries;
94398 static struct entry entries[MAX_ENTRIES];
94399
94400-static atomic_t overflow_count;
94401+static atomic_unchecked_t overflow_count;
94402
94403 /*
94404 * The entries are in a hash-table, for fast lookup:
94405@@ -140,7 +140,7 @@ static void reset_entries(void)
94406 nr_entries = 0;
94407 memset(entries, 0, sizeof(entries));
94408 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94409- atomic_set(&overflow_count, 0);
94410+ atomic_set_unchecked(&overflow_count, 0);
94411 }
94412
94413 static struct entry *alloc_entry(void)
94414@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94415 if (likely(entry))
94416 entry->count++;
94417 else
94418- atomic_inc(&overflow_count);
94419+ atomic_inc_unchecked(&overflow_count);
94420
94421 out_unlock:
94422 raw_spin_unlock_irqrestore(lock, flags);
94423@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94424
94425 static void print_name_offset(struct seq_file *m, unsigned long addr)
94426 {
94427+#ifdef CONFIG_GRKERNSEC_HIDESYM
94428+ seq_printf(m, "<%p>", NULL);
94429+#else
94430 char symname[KSYM_NAME_LEN];
94431
94432 if (lookup_symbol_name(addr, symname) < 0)
94433- seq_printf(m, "<%p>", (void *)addr);
94434+ seq_printf(m, "<%pK>", (void *)addr);
94435 else
94436 seq_printf(m, "%s", symname);
94437+#endif
94438 }
94439
94440 static int tstats_show(struct seq_file *m, void *v)
94441@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
94442
94443 seq_puts(m, "Timer Stats Version: v0.3\n");
94444 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94445- if (atomic_read(&overflow_count))
94446- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
94447+ if (atomic_read_unchecked(&overflow_count))
94448+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
94449 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
94450
94451 for (i = 0; i < nr_entries; i++) {
94452@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
94453 {
94454 struct proc_dir_entry *pe;
94455
94456+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94457+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94458+#else
94459 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94460+#endif
94461 if (!pe)
94462 return -ENOMEM;
94463 return 0;
94464diff --git a/kernel/torture.c b/kernel/torture.c
94465index dd70993..0bf694b 100644
94466--- a/kernel/torture.c
94467+++ b/kernel/torture.c
94468@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
94469 mutex_lock(&fullstop_mutex);
94470 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
94471 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
94472- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
94473+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
94474 } else {
94475 pr_warn("Concurrent rmmod and shutdown illegal!\n");
94476 }
94477@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
94478 if (!torture_must_stop()) {
94479 if (stutter > 1) {
94480 schedule_timeout_interruptible(stutter - 1);
94481- ACCESS_ONCE(stutter_pause_test) = 2;
94482+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
94483 }
94484 schedule_timeout_interruptible(1);
94485- ACCESS_ONCE(stutter_pause_test) = 1;
94486+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
94487 }
94488 if (!torture_must_stop())
94489 schedule_timeout_interruptible(stutter);
94490- ACCESS_ONCE(stutter_pause_test) = 0;
94491+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
94492 torture_shutdown_absorb("torture_stutter");
94493 } while (!torture_must_stop());
94494 torture_kthread_stopping("torture_stutter");
94495@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
94496 schedule_timeout_uninterruptible(10);
94497 return true;
94498 }
94499- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
94500+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
94501 mutex_unlock(&fullstop_mutex);
94502 torture_shutdown_cleanup();
94503 torture_shuffle_cleanup();
94504diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94505index 483cecf..ac46091 100644
94506--- a/kernel/trace/blktrace.c
94507+++ b/kernel/trace/blktrace.c
94508@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94509 struct blk_trace *bt = filp->private_data;
94510 char buf[16];
94511
94512- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94513+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94514
94515 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94516 }
94517@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94518 return 1;
94519
94520 bt = buf->chan->private_data;
94521- atomic_inc(&bt->dropped);
94522+ atomic_inc_unchecked(&bt->dropped);
94523 return 0;
94524 }
94525
94526@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94527
94528 bt->dir = dir;
94529 bt->dev = dev;
94530- atomic_set(&bt->dropped, 0);
94531+ atomic_set_unchecked(&bt->dropped, 0);
94532 INIT_LIST_HEAD(&bt->running_list);
94533
94534 ret = -EIO;
94535diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94536index 4f22802..bd268b1 100644
94537--- a/kernel/trace/ftrace.c
94538+++ b/kernel/trace/ftrace.c
94539@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94540 if (unlikely(ftrace_disabled))
94541 return 0;
94542
94543+ ret = ftrace_arch_code_modify_prepare();
94544+ FTRACE_WARN_ON(ret);
94545+ if (ret)
94546+ return 0;
94547+
94548 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94549+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94550 if (ret) {
94551 ftrace_bug(ret, rec);
94552- return 0;
94553 }
94554- return 1;
94555+ return ret ? 0 : 1;
94556 }
94557
94558 /*
94559@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
94560 if (!count)
94561 return 0;
94562
94563+ pax_open_kernel();
94564 sort(start, count, sizeof(*start),
94565 ftrace_cmp_ips, ftrace_swap_ips);
94566+ pax_close_kernel();
94567
94568 start_pg = ftrace_allocate_pages(count);
94569 if (!start_pg)
94570@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
94571
94572 if (t->ret_stack == NULL) {
94573 atomic_set(&t->tracing_graph_pause, 0);
94574- atomic_set(&t->trace_overrun, 0);
94575+ atomic_set_unchecked(&t->trace_overrun, 0);
94576 t->curr_ret_stack = -1;
94577 /* Make sure the tasks see the -1 first: */
94578 smp_wmb();
94579@@ -5876,7 +5883,7 @@ static void
94580 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
94581 {
94582 atomic_set(&t->tracing_graph_pause, 0);
94583- atomic_set(&t->trace_overrun, 0);
94584+ atomic_set_unchecked(&t->trace_overrun, 0);
94585 t->ftrace_timestamp = 0;
94586 /* make curr_ret_stack visible before we add the ret_stack */
94587 smp_wmb();
94588diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94589index 5040d44..d43b2b9 100644
94590--- a/kernel/trace/ring_buffer.c
94591+++ b/kernel/trace/ring_buffer.c
94592@@ -348,9 +348,9 @@ struct buffer_data_page {
94593 */
94594 struct buffer_page {
94595 struct list_head list; /* list of buffer pages */
94596- local_t write; /* index for next write */
94597+ local_unchecked_t write; /* index for next write */
94598 unsigned read; /* index for next read */
94599- local_t entries; /* entries on this page */
94600+ local_unchecked_t entries; /* entries on this page */
94601 unsigned long real_end; /* real end of data */
94602 struct buffer_data_page *page; /* Actual data page */
94603 };
94604@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
94605 unsigned long last_overrun;
94606 local_t entries_bytes;
94607 local_t entries;
94608- local_t overrun;
94609- local_t commit_overrun;
94610- local_t dropped_events;
94611+ local_unchecked_t overrun;
94612+ local_unchecked_t commit_overrun;
94613+ local_unchecked_t dropped_events;
94614 local_t committing;
94615- local_t commits;
94616+ local_unchecked_t commits;
94617 unsigned long read;
94618 unsigned long read_bytes;
94619 u64 write_stamp;
94620@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94621 *
94622 * We add a counter to the write field to denote this.
94623 */
94624- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94625- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94626+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94627+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94628
94629 /*
94630 * Just make sure we have seen our old_write and synchronize
94631@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94632 * cmpxchg to only update if an interrupt did not already
94633 * do it for us. If the cmpxchg fails, we don't care.
94634 */
94635- (void)local_cmpxchg(&next_page->write, old_write, val);
94636- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94637+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94638+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94639
94640 /*
94641 * No need to worry about races with clearing out the commit.
94642@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94643
94644 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94645 {
94646- return local_read(&bpage->entries) & RB_WRITE_MASK;
94647+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94648 }
94649
94650 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94651 {
94652- return local_read(&bpage->write) & RB_WRITE_MASK;
94653+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94654 }
94655
94656 static int
94657@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94658 * bytes consumed in ring buffer from here.
94659 * Increment overrun to account for the lost events.
94660 */
94661- local_add(page_entries, &cpu_buffer->overrun);
94662+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94663 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94664 }
94665
94666@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94667 * it is our responsibility to update
94668 * the counters.
94669 */
94670- local_add(entries, &cpu_buffer->overrun);
94671+ local_add_unchecked(entries, &cpu_buffer->overrun);
94672 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94673
94674 /*
94675@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94676 if (tail == BUF_PAGE_SIZE)
94677 tail_page->real_end = 0;
94678
94679- local_sub(length, &tail_page->write);
94680+ local_sub_unchecked(length, &tail_page->write);
94681 return;
94682 }
94683
94684@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94685 rb_event_set_padding(event);
94686
94687 /* Set the write back to the previous setting */
94688- local_sub(length, &tail_page->write);
94689+ local_sub_unchecked(length, &tail_page->write);
94690 return;
94691 }
94692
94693@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94694
94695 /* Set write to end of buffer */
94696 length = (tail + length) - BUF_PAGE_SIZE;
94697- local_sub(length, &tail_page->write);
94698+ local_sub_unchecked(length, &tail_page->write);
94699 }
94700
94701 /*
94702@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94703 * about it.
94704 */
94705 if (unlikely(next_page == commit_page)) {
94706- local_inc(&cpu_buffer->commit_overrun);
94707+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94708 goto out_reset;
94709 }
94710
94711@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94712 * this is easy, just stop here.
94713 */
94714 if (!(buffer->flags & RB_FL_OVERWRITE)) {
94715- local_inc(&cpu_buffer->dropped_events);
94716+ local_inc_unchecked(&cpu_buffer->dropped_events);
94717 goto out_reset;
94718 }
94719
94720@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94721 cpu_buffer->tail_page) &&
94722 (cpu_buffer->commit_page ==
94723 cpu_buffer->reader_page))) {
94724- local_inc(&cpu_buffer->commit_overrun);
94725+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94726 goto out_reset;
94727 }
94728 }
94729@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94730 length += RB_LEN_TIME_EXTEND;
94731
94732 tail_page = cpu_buffer->tail_page;
94733- write = local_add_return(length, &tail_page->write);
94734+ write = local_add_return_unchecked(length, &tail_page->write);
94735
94736 /* set write to only the index of the write */
94737 write &= RB_WRITE_MASK;
94738@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94739 kmemcheck_annotate_bitfield(event, bitfield);
94740 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94741
94742- local_inc(&tail_page->entries);
94743+ local_inc_unchecked(&tail_page->entries);
94744
94745 /*
94746 * If this is the first commit on the page, then update
94747@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94748
94749 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94750 unsigned long write_mask =
94751- local_read(&bpage->write) & ~RB_WRITE_MASK;
94752+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94753 unsigned long event_length = rb_event_length(event);
94754 /*
94755 * This is on the tail page. It is possible that
94756@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94757 */
94758 old_index += write_mask;
94759 new_index += write_mask;
94760- index = local_cmpxchg(&bpage->write, old_index, new_index);
94761+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94762 if (index == old_index) {
94763 /* update counters */
94764 local_sub(event_length, &cpu_buffer->entries_bytes);
94765@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94766 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
94767 {
94768 local_inc(&cpu_buffer->committing);
94769- local_inc(&cpu_buffer->commits);
94770+ local_inc_unchecked(&cpu_buffer->commits);
94771 }
94772
94773 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94774@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94775 return;
94776
94777 again:
94778- commits = local_read(&cpu_buffer->commits);
94779+ commits = local_read_unchecked(&cpu_buffer->commits);
94780 /* synchronize with interrupts */
94781 barrier();
94782 if (local_read(&cpu_buffer->committing) == 1)
94783@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94784 * updating of the commit page and the clearing of the
94785 * committing counter.
94786 */
94787- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
94788+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
94789 !local_read(&cpu_buffer->committing)) {
94790 local_inc(&cpu_buffer->committing);
94791 goto again;
94792@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
94793 barrier();
94794 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
94795 local_dec(&cpu_buffer->committing);
94796- local_dec(&cpu_buffer->commits);
94797+ local_dec_unchecked(&cpu_buffer->commits);
94798 return NULL;
94799 }
94800 #endif
94801@@ -2902,7 +2902,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94802
94803 /* Do the likely case first */
94804 if (likely(bpage->page == (void *)addr)) {
94805- local_dec(&bpage->entries);
94806+ local_dec_unchecked(&bpage->entries);
94807 return;
94808 }
94809
94810@@ -2914,7 +2914,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94811 start = bpage;
94812 do {
94813 if (bpage->page == (void *)addr) {
94814- local_dec(&bpage->entries);
94815+ local_dec_unchecked(&bpage->entries);
94816 return;
94817 }
94818 rb_inc_page(cpu_buffer, &bpage);
94819@@ -3198,7 +3198,7 @@ static inline unsigned long
94820 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94821 {
94822 return local_read(&cpu_buffer->entries) -
94823- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94824+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94825 }
94826
94827 /**
94828@@ -3287,7 +3287,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94829 return 0;
94830
94831 cpu_buffer = buffer->buffers[cpu];
94832- ret = local_read(&cpu_buffer->overrun);
94833+ ret = local_read_unchecked(&cpu_buffer->overrun);
94834
94835 return ret;
94836 }
94837@@ -3310,7 +3310,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94838 return 0;
94839
94840 cpu_buffer = buffer->buffers[cpu];
94841- ret = local_read(&cpu_buffer->commit_overrun);
94842+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94843
94844 return ret;
94845 }
94846@@ -3332,7 +3332,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
94847 return 0;
94848
94849 cpu_buffer = buffer->buffers[cpu];
94850- ret = local_read(&cpu_buffer->dropped_events);
94851+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
94852
94853 return ret;
94854 }
94855@@ -3395,7 +3395,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94856 /* if you care about this being correct, lock the buffer */
94857 for_each_buffer_cpu(buffer, cpu) {
94858 cpu_buffer = buffer->buffers[cpu];
94859- overruns += local_read(&cpu_buffer->overrun);
94860+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94861 }
94862
94863 return overruns;
94864@@ -3566,8 +3566,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94865 /*
94866 * Reset the reader page to size zero.
94867 */
94868- local_set(&cpu_buffer->reader_page->write, 0);
94869- local_set(&cpu_buffer->reader_page->entries, 0);
94870+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94871+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94872 local_set(&cpu_buffer->reader_page->page->commit, 0);
94873 cpu_buffer->reader_page->real_end = 0;
94874
94875@@ -3601,7 +3601,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94876 * want to compare with the last_overrun.
94877 */
94878 smp_mb();
94879- overwrite = local_read(&(cpu_buffer->overrun));
94880+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94881
94882 /*
94883 * Here's the tricky part.
94884@@ -4173,8 +4173,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94885
94886 cpu_buffer->head_page
94887 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94888- local_set(&cpu_buffer->head_page->write, 0);
94889- local_set(&cpu_buffer->head_page->entries, 0);
94890+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94891+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94892 local_set(&cpu_buffer->head_page->page->commit, 0);
94893
94894 cpu_buffer->head_page->read = 0;
94895@@ -4184,18 +4184,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94896
94897 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94898 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94899- local_set(&cpu_buffer->reader_page->write, 0);
94900- local_set(&cpu_buffer->reader_page->entries, 0);
94901+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94902+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94903 local_set(&cpu_buffer->reader_page->page->commit, 0);
94904 cpu_buffer->reader_page->read = 0;
94905
94906 local_set(&cpu_buffer->entries_bytes, 0);
94907- local_set(&cpu_buffer->overrun, 0);
94908- local_set(&cpu_buffer->commit_overrun, 0);
94909- local_set(&cpu_buffer->dropped_events, 0);
94910+ local_set_unchecked(&cpu_buffer->overrun, 0);
94911+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94912+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
94913 local_set(&cpu_buffer->entries, 0);
94914 local_set(&cpu_buffer->committing, 0);
94915- local_set(&cpu_buffer->commits, 0);
94916+ local_set_unchecked(&cpu_buffer->commits, 0);
94917 cpu_buffer->read = 0;
94918 cpu_buffer->read_bytes = 0;
94919
94920@@ -4596,8 +4596,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94921 rb_init_page(bpage);
94922 bpage = reader->page;
94923 reader->page = *data_page;
94924- local_set(&reader->write, 0);
94925- local_set(&reader->entries, 0);
94926+ local_set_unchecked(&reader->write, 0);
94927+ local_set_unchecked(&reader->entries, 0);
94928 reader->read = 0;
94929 *data_page = bpage;
94930
94931diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94932index 62c6506..5c25989 100644
94933--- a/kernel/trace/trace.c
94934+++ b/kernel/trace/trace.c
94935@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94936 return 0;
94937 }
94938
94939-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94940+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94941 {
94942 /* do nothing if flag is already set */
94943 if (!!(trace_flags & mask) == !!enabled)
94944diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94945index dd8205a..1aae87a 100644
94946--- a/kernel/trace/trace.h
94947+++ b/kernel/trace/trace.h
94948@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94949 void trace_printk_init_buffers(void);
94950 void trace_printk_start_comm(void);
94951 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94952-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94953+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94954
94955 /*
94956 * Normal trace_printk() and friends allocates special buffers
94957diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94958index 57b67b1..66082a9 100644
94959--- a/kernel/trace/trace_clock.c
94960+++ b/kernel/trace/trace_clock.c
94961@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94962 return now;
94963 }
94964
94965-static atomic64_t trace_counter;
94966+static atomic64_unchecked_t trace_counter;
94967
94968 /*
94969 * trace_clock_counter(): simply an atomic counter.
94970@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94971 */
94972 u64 notrace trace_clock_counter(void)
94973 {
94974- return atomic64_add_return(1, &trace_counter);
94975+ return atomic64_inc_return_unchecked(&trace_counter);
94976 }
94977diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94978index db54dda..b9e4f03 100644
94979--- a/kernel/trace/trace_events.c
94980+++ b/kernel/trace/trace_events.c
94981@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94982 return 0;
94983 }
94984
94985-struct ftrace_module_file_ops;
94986 static void __add_event_to_tracers(struct ftrace_event_call *call);
94987
94988 /* Add an additional event_call dynamically */
94989diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94990index 2d25ad1..5bfd931 100644
94991--- a/kernel/trace/trace_functions_graph.c
94992+++ b/kernel/trace/trace_functions_graph.c
94993@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94994
94995 /* The return trace stack is full */
94996 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94997- atomic_inc(&current->trace_overrun);
94998+ atomic_inc_unchecked(&current->trace_overrun);
94999 return -EBUSY;
95000 }
95001
95002@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
95003 *ret = current->ret_stack[index].ret;
95004 trace->func = current->ret_stack[index].func;
95005 trace->calltime = current->ret_stack[index].calltime;
95006- trace->overrun = atomic_read(&current->trace_overrun);
95007+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
95008 trace->depth = index;
95009 }
95010
95011diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95012index 7a9ba62..2e0e4a1 100644
95013--- a/kernel/trace/trace_mmiotrace.c
95014+++ b/kernel/trace/trace_mmiotrace.c
95015@@ -24,7 +24,7 @@ struct header_iter {
95016 static struct trace_array *mmio_trace_array;
95017 static bool overrun_detected;
95018 static unsigned long prev_overruns;
95019-static atomic_t dropped_count;
95020+static atomic_unchecked_t dropped_count;
95021
95022 static void mmio_reset_data(struct trace_array *tr)
95023 {
95024@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
95025
95026 static unsigned long count_overruns(struct trace_iterator *iter)
95027 {
95028- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95029+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95030 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
95031
95032 if (over > prev_overruns)
95033@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95034 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95035 sizeof(*entry), 0, pc);
95036 if (!event) {
95037- atomic_inc(&dropped_count);
95038+ atomic_inc_unchecked(&dropped_count);
95039 return;
95040 }
95041 entry = ring_buffer_event_data(event);
95042@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95043 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95044 sizeof(*entry), 0, pc);
95045 if (!event) {
95046- atomic_inc(&dropped_count);
95047+ atomic_inc_unchecked(&dropped_count);
95048 return;
95049 }
95050 entry = ring_buffer_event_data(event);
95051diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95052index 692bf71..6d9a9cd 100644
95053--- a/kernel/trace/trace_output.c
95054+++ b/kernel/trace/trace_output.c
95055@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
95056 goto out;
95057 }
95058
95059+ pax_open_kernel();
95060 if (event->funcs->trace == NULL)
95061- event->funcs->trace = trace_nop_print;
95062+ *(void **)&event->funcs->trace = trace_nop_print;
95063 if (event->funcs->raw == NULL)
95064- event->funcs->raw = trace_nop_print;
95065+ *(void **)&event->funcs->raw = trace_nop_print;
95066 if (event->funcs->hex == NULL)
95067- event->funcs->hex = trace_nop_print;
95068+ *(void **)&event->funcs->hex = trace_nop_print;
95069 if (event->funcs->binary == NULL)
95070- event->funcs->binary = trace_nop_print;
95071+ *(void **)&event->funcs->binary = trace_nop_print;
95072+ pax_close_kernel();
95073
95074 key = event->type & (EVENT_HASHSIZE - 1);
95075
95076diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
95077index e694c9f..6775a38 100644
95078--- a/kernel/trace/trace_seq.c
95079+++ b/kernel/trace/trace_seq.c
95080@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
95081 return 0;
95082 }
95083
95084- seq_buf_path(&s->seq, path, "\n");
95085+ seq_buf_path(&s->seq, path, "\n\\");
95086
95087 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
95088 s->seq.len = save_len;
95089diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95090index c3e4fcf..ef6cc43 100644
95091--- a/kernel/trace/trace_stack.c
95092+++ b/kernel/trace/trace_stack.c
95093@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
95094 return;
95095
95096 /* we do not handle interrupt stacks yet */
95097- if (!object_is_on_stack(stack))
95098+ if (!object_starts_on_stack(stack))
95099 return;
95100
95101 local_irq_save(flags);
95102diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
95103index f97f6e3..d367b48 100644
95104--- a/kernel/trace/trace_syscalls.c
95105+++ b/kernel/trace/trace_syscalls.c
95106@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
95107 int num;
95108
95109 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95110+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95111+ return -EINVAL;
95112
95113 mutex_lock(&syscall_trace_lock);
95114 if (!sys_perf_refcount_enter)
95115@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
95116 int num;
95117
95118 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95119+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95120+ return;
95121
95122 mutex_lock(&syscall_trace_lock);
95123 sys_perf_refcount_enter--;
95124@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
95125 int num;
95126
95127 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95128+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95129+ return -EINVAL;
95130
95131 mutex_lock(&syscall_trace_lock);
95132 if (!sys_perf_refcount_exit)
95133@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
95134 int num;
95135
95136 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95137+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95138+ return;
95139
95140 mutex_lock(&syscall_trace_lock);
95141 sys_perf_refcount_exit--;
95142diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
95143index 4109f83..fe1f830 100644
95144--- a/kernel/user_namespace.c
95145+++ b/kernel/user_namespace.c
95146@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
95147 !kgid_has_mapping(parent_ns, group))
95148 return -EPERM;
95149
95150+#ifdef CONFIG_GRKERNSEC
95151+ /*
95152+ * This doesn't really inspire confidence:
95153+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
95154+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
95155+ * Increases kernel attack surface in areas developers
95156+ * previously cared little about ("low importance due
95157+ * to requiring "root" capability")
95158+ * To be removed when this code receives *proper* review
95159+ */
95160+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
95161+ !capable(CAP_SETGID))
95162+ return -EPERM;
95163+#endif
95164+
95165 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
95166 if (!ns)
95167 return -ENOMEM;
95168@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
95169 if (atomic_read(&current->mm->mm_users) > 1)
95170 return -EINVAL;
95171
95172- if (current->fs->users != 1)
95173+ if (atomic_read(&current->fs->users) != 1)
95174 return -EINVAL;
95175
95176 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
95177diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
95178index c8eac43..4b5f08f 100644
95179--- a/kernel/utsname_sysctl.c
95180+++ b/kernel/utsname_sysctl.c
95181@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
95182 static int proc_do_uts_string(struct ctl_table *table, int write,
95183 void __user *buffer, size_t *lenp, loff_t *ppos)
95184 {
95185- struct ctl_table uts_table;
95186+ ctl_table_no_const uts_table;
95187 int r;
95188 memcpy(&uts_table, table, sizeof(uts_table));
95189 uts_table.data = get_uts(table, write);
95190diff --git a/kernel/watchdog.c b/kernel/watchdog.c
95191index 3174bf8..3553520 100644
95192--- a/kernel/watchdog.c
95193+++ b/kernel/watchdog.c
95194@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
95195 static void watchdog_nmi_disable(unsigned int cpu) { return; }
95196 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
95197
95198-static struct smp_hotplug_thread watchdog_threads = {
95199+static struct smp_hotplug_thread watchdog_threads __read_only = {
95200 .store = &softlockup_watchdog,
95201 .thread_should_run = watchdog_should_run,
95202 .thread_fn = watchdog,
95203diff --git a/kernel/workqueue.c b/kernel/workqueue.c
95204index 41ff75b..5ad683a 100644
95205--- a/kernel/workqueue.c
95206+++ b/kernel/workqueue.c
95207@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
95208 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
95209 worker_flags |= WORKER_REBOUND;
95210 worker_flags &= ~WORKER_UNBOUND;
95211- ACCESS_ONCE(worker->flags) = worker_flags;
95212+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
95213 }
95214
95215 spin_unlock_irq(&pool->lock);
95216diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95217index c5cefb3..a4241e3 100644
95218--- a/lib/Kconfig.debug
95219+++ b/lib/Kconfig.debug
95220@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
95221
95222 config DEBUG_WW_MUTEX_SLOWPATH
95223 bool "Wait/wound mutex debugging: Slowpath testing"
95224- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95225+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95226 select DEBUG_LOCK_ALLOC
95227 select DEBUG_SPINLOCK
95228 select DEBUG_MUTEXES
95229@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
95230
95231 config DEBUG_LOCK_ALLOC
95232 bool "Lock debugging: detect incorrect freeing of live locks"
95233- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95234+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95235 select DEBUG_SPINLOCK
95236 select DEBUG_MUTEXES
95237 select LOCKDEP
95238@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
95239
95240 config PROVE_LOCKING
95241 bool "Lock debugging: prove locking correctness"
95242- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95243+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95244 select LOCKDEP
95245 select DEBUG_SPINLOCK
95246 select DEBUG_MUTEXES
95247@@ -1005,7 +1005,7 @@ config LOCKDEP
95248
95249 config LOCK_STAT
95250 bool "Lock usage statistics"
95251- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95252+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95253 select LOCKDEP
95254 select DEBUG_SPINLOCK
95255 select DEBUG_MUTEXES
95256@@ -1467,6 +1467,7 @@ config LATENCYTOP
95257 depends on DEBUG_KERNEL
95258 depends on STACKTRACE_SUPPORT
95259 depends on PROC_FS
95260+ depends on !GRKERNSEC_HIDESYM
95261 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
95262 select KALLSYMS
95263 select KALLSYMS_ALL
95264@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95265 config DEBUG_STRICT_USER_COPY_CHECKS
95266 bool "Strict user copy size checks"
95267 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95268- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
95269+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
95270 help
95271 Enabling this option turns a certain set of sanity checks for user
95272 copy operations into compile time failures.
95273@@ -1614,7 +1615,7 @@ endmenu # runtime tests
95274
95275 config PROVIDE_OHCI1394_DMA_INIT
95276 bool "Remote debugging over FireWire early on boot"
95277- depends on PCI && X86
95278+ depends on PCI && X86 && !GRKERNSEC
95279 help
95280 If you want to debug problems which hang or crash the kernel early
95281 on boot and the crashing machine has a FireWire port, you can use
95282diff --git a/lib/Makefile b/lib/Makefile
95283index 58f74d2..08e011f 100644
95284--- a/lib/Makefile
95285+++ b/lib/Makefile
95286@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
95287 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
95288 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
95289 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
95290-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
95291+obj-y += list_debug.o
95292 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
95293
95294 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
95295diff --git a/lib/average.c b/lib/average.c
95296index 114d1be..ab0350c 100644
95297--- a/lib/average.c
95298+++ b/lib/average.c
95299@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
95300 {
95301 unsigned long internal = ACCESS_ONCE(avg->internal);
95302
95303- ACCESS_ONCE(avg->internal) = internal ?
95304+ ACCESS_ONCE_RW(avg->internal) = internal ?
95305 (((internal << avg->weight) - internal) +
95306 (val << avg->factor)) >> avg->weight :
95307 (val << avg->factor);
95308diff --git a/lib/bitmap.c b/lib/bitmap.c
95309index d456f4c1..29a0308 100644
95310--- a/lib/bitmap.c
95311+++ b/lib/bitmap.c
95312@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
95313 }
95314 EXPORT_SYMBOL(__bitmap_subset);
95315
95316-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
95317+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
95318 {
95319 unsigned int k, lim = bits/BITS_PER_LONG;
95320 int w = 0;
95321@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95322 {
95323 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95324 u32 chunk;
95325- const char __user __force *ubuf = (const char __user __force *)buf;
95326+ const char __user *ubuf = (const char __force_user *)buf;
95327
95328 bitmap_zero(maskp, nmaskbits);
95329
95330@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
95331 {
95332 if (!access_ok(VERIFY_READ, ubuf, ulen))
95333 return -EFAULT;
95334- return __bitmap_parse((const char __force *)ubuf,
95335+ return __bitmap_parse((const char __force_kernel *)ubuf,
95336 ulen, 1, maskp, nmaskbits);
95337
95338 }
95339@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
95340 {
95341 unsigned a, b;
95342 int c, old_c, totaldigits;
95343- const char __user __force *ubuf = (const char __user __force *)buf;
95344+ const char __user *ubuf = (const char __force_user *)buf;
95345 int exp_digit, in_range;
95346
95347 totaldigits = c = 0;
95348@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
95349 {
95350 if (!access_ok(VERIFY_READ, ubuf, ulen))
95351 return -EFAULT;
95352- return __bitmap_parselist((const char __force *)ubuf,
95353+ return __bitmap_parselist((const char __force_kernel *)ubuf,
95354 ulen, 1, maskp, nmaskbits);
95355 }
95356 EXPORT_SYMBOL(bitmap_parselist_user);
95357diff --git a/lib/bug.c b/lib/bug.c
95358index 0c3bd95..5a615a1 100644
95359--- a/lib/bug.c
95360+++ b/lib/bug.c
95361@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95362 return BUG_TRAP_TYPE_NONE;
95363
95364 bug = find_bug(bugaddr);
95365+ if (!bug)
95366+ return BUG_TRAP_TYPE_NONE;
95367
95368 file = NULL;
95369 line = 0;
95370diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95371index 547f7f9..a6d4ba0 100644
95372--- a/lib/debugobjects.c
95373+++ b/lib/debugobjects.c
95374@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95375 if (limit > 4)
95376 return;
95377
95378- is_on_stack = object_is_on_stack(addr);
95379+ is_on_stack = object_starts_on_stack(addr);
95380 if (is_on_stack == onstack)
95381 return;
95382
95383diff --git a/lib/div64.c b/lib/div64.c
95384index 4382ad7..08aa558 100644
95385--- a/lib/div64.c
95386+++ b/lib/div64.c
95387@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
95388 EXPORT_SYMBOL(__div64_32);
95389
95390 #ifndef div_s64_rem
95391-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95392+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95393 {
95394 u64 quotient;
95395
95396@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
95397 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
95398 */
95399 #ifndef div64_u64
95400-u64 div64_u64(u64 dividend, u64 divisor)
95401+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
95402 {
95403 u32 high = divisor >> 32;
95404 u64 quot;
95405diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95406index 9722bd2..0d826f4 100644
95407--- a/lib/dma-debug.c
95408+++ b/lib/dma-debug.c
95409@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
95410
95411 void dma_debug_add_bus(struct bus_type *bus)
95412 {
95413- struct notifier_block *nb;
95414+ notifier_block_no_const *nb;
95415
95416 if (dma_debug_disabled())
95417 return;
95418@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
95419
95420 static void check_for_stack(struct device *dev, void *addr)
95421 {
95422- if (object_is_on_stack(addr))
95423+ if (object_starts_on_stack(addr))
95424 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
95425 "stack [addr=%p]\n", addr);
95426 }
95427diff --git a/lib/inflate.c b/lib/inflate.c
95428index 013a761..c28f3fc 100644
95429--- a/lib/inflate.c
95430+++ b/lib/inflate.c
95431@@ -269,7 +269,7 @@ static void free(void *where)
95432 malloc_ptr = free_mem_ptr;
95433 }
95434 #else
95435-#define malloc(a) kmalloc(a, GFP_KERNEL)
95436+#define malloc(a) kmalloc((a), GFP_KERNEL)
95437 #define free(a) kfree(a)
95438 #endif
95439
95440diff --git a/lib/ioremap.c b/lib/ioremap.c
95441index 0c9216c..863bd89 100644
95442--- a/lib/ioremap.c
95443+++ b/lib/ioremap.c
95444@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
95445 unsigned long next;
95446
95447 phys_addr -= addr;
95448- pmd = pmd_alloc(&init_mm, pud, addr);
95449+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95450 if (!pmd)
95451 return -ENOMEM;
95452 do {
95453@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
95454 unsigned long next;
95455
95456 phys_addr -= addr;
95457- pud = pud_alloc(&init_mm, pgd, addr);
95458+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95459 if (!pud)
95460 return -ENOMEM;
95461 do {
95462diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95463index bd2bea9..6b3c95e 100644
95464--- a/lib/is_single_threaded.c
95465+++ b/lib/is_single_threaded.c
95466@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95467 struct task_struct *p, *t;
95468 bool ret;
95469
95470+ if (!mm)
95471+ return true;
95472+
95473 if (atomic_read(&task->signal->live) != 1)
95474 return false;
95475
95476diff --git a/lib/kobject.c b/lib/kobject.c
95477index 03d4ab3..46f6374 100644
95478--- a/lib/kobject.c
95479+++ b/lib/kobject.c
95480@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
95481
95482
95483 static DEFINE_SPINLOCK(kobj_ns_type_lock);
95484-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
95485+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
95486
95487-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95488+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95489 {
95490 enum kobj_ns_type type = ops->type;
95491 int error;
95492diff --git a/lib/list_debug.c b/lib/list_debug.c
95493index c24c2f7..f0296f4 100644
95494--- a/lib/list_debug.c
95495+++ b/lib/list_debug.c
95496@@ -11,7 +11,9 @@
95497 #include <linux/bug.h>
95498 #include <linux/kernel.h>
95499 #include <linux/rculist.h>
95500+#include <linux/mm.h>
95501
95502+#ifdef CONFIG_DEBUG_LIST
95503 /*
95504 * Insert a new entry between two known consecutive entries.
95505 *
95506@@ -19,21 +21,40 @@
95507 * the prev/next entries already!
95508 */
95509
95510+static bool __list_add_debug(struct list_head *new,
95511+ struct list_head *prev,
95512+ struct list_head *next)
95513+{
95514+ if (unlikely(next->prev != prev)) {
95515+ printk(KERN_ERR "list_add corruption. next->prev should be "
95516+ "prev (%p), but was %p. (next=%p).\n",
95517+ prev, next->prev, next);
95518+ BUG();
95519+ return false;
95520+ }
95521+ if (unlikely(prev->next != next)) {
95522+ printk(KERN_ERR "list_add corruption. prev->next should be "
95523+ "next (%p), but was %p. (prev=%p).\n",
95524+ next, prev->next, prev);
95525+ BUG();
95526+ return false;
95527+ }
95528+ if (unlikely(new == prev || new == next)) {
95529+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
95530+ new, prev, next);
95531+ BUG();
95532+ return false;
95533+ }
95534+ return true;
95535+}
95536+
95537 void __list_add(struct list_head *new,
95538- struct list_head *prev,
95539- struct list_head *next)
95540+ struct list_head *prev,
95541+ struct list_head *next)
95542 {
95543- WARN(next->prev != prev,
95544- "list_add corruption. next->prev should be "
95545- "prev (%p), but was %p. (next=%p).\n",
95546- prev, next->prev, next);
95547- WARN(prev->next != next,
95548- "list_add corruption. prev->next should be "
95549- "next (%p), but was %p. (prev=%p).\n",
95550- next, prev->next, prev);
95551- WARN(new == prev || new == next,
95552- "list_add double add: new=%p, prev=%p, next=%p.\n",
95553- new, prev, next);
95554+ if (!__list_add_debug(new, prev, next))
95555+ return;
95556+
95557 next->prev = new;
95558 new->next = next;
95559 new->prev = prev;
95560@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95561 }
95562 EXPORT_SYMBOL(__list_add);
95563
95564-void __list_del_entry(struct list_head *entry)
95565+static bool __list_del_entry_debug(struct list_head *entry)
95566 {
95567 struct list_head *prev, *next;
95568
95569 prev = entry->prev;
95570 next = entry->next;
95571
95572- if (WARN(next == LIST_POISON1,
95573- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95574- entry, LIST_POISON1) ||
95575- WARN(prev == LIST_POISON2,
95576- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95577- entry, LIST_POISON2) ||
95578- WARN(prev->next != entry,
95579- "list_del corruption. prev->next should be %p, "
95580- "but was %p\n", entry, prev->next) ||
95581- WARN(next->prev != entry,
95582- "list_del corruption. next->prev should be %p, "
95583- "but was %p\n", entry, next->prev))
95584+ if (unlikely(next == LIST_POISON1)) {
95585+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95586+ entry, LIST_POISON1);
95587+ BUG();
95588+ return false;
95589+ }
95590+ if (unlikely(prev == LIST_POISON2)) {
95591+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95592+ entry, LIST_POISON2);
95593+ BUG();
95594+ return false;
95595+ }
95596+ if (unlikely(entry->prev->next != entry)) {
95597+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95598+ "but was %p\n", entry, prev->next);
95599+ BUG();
95600+ return false;
95601+ }
95602+ if (unlikely(entry->next->prev != entry)) {
95603+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95604+ "but was %p\n", entry, next->prev);
95605+ BUG();
95606+ return false;
95607+ }
95608+ return true;
95609+}
95610+
95611+void __list_del_entry(struct list_head *entry)
95612+{
95613+ if (!__list_del_entry_debug(entry))
95614 return;
95615
95616- __list_del(prev, next);
95617+ __list_del(entry->prev, entry->next);
95618 }
95619 EXPORT_SYMBOL(__list_del_entry);
95620
95621@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95622 void __list_add_rcu(struct list_head *new,
95623 struct list_head *prev, struct list_head *next)
95624 {
95625- WARN(next->prev != prev,
95626- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95627- prev, next->prev, next);
95628- WARN(prev->next != next,
95629- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95630- next, prev->next, prev);
95631+ if (!__list_add_debug(new, prev, next))
95632+ return;
95633+
95634 new->next = next;
95635 new->prev = prev;
95636 rcu_assign_pointer(list_next_rcu(prev), new);
95637 next->prev = new;
95638 }
95639 EXPORT_SYMBOL(__list_add_rcu);
95640+#endif
95641+
95642+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95643+{
95644+#ifdef CONFIG_DEBUG_LIST
95645+ if (!__list_add_debug(new, prev, next))
95646+ return;
95647+#endif
95648+
95649+ pax_open_kernel();
95650+ next->prev = new;
95651+ new->next = next;
95652+ new->prev = prev;
95653+ prev->next = new;
95654+ pax_close_kernel();
95655+}
95656+EXPORT_SYMBOL(__pax_list_add);
95657+
95658+void pax_list_del(struct list_head *entry)
95659+{
95660+#ifdef CONFIG_DEBUG_LIST
95661+ if (!__list_del_entry_debug(entry))
95662+ return;
95663+#endif
95664+
95665+ pax_open_kernel();
95666+ __list_del(entry->prev, entry->next);
95667+ entry->next = LIST_POISON1;
95668+ entry->prev = LIST_POISON2;
95669+ pax_close_kernel();
95670+}
95671+EXPORT_SYMBOL(pax_list_del);
95672+
95673+void pax_list_del_init(struct list_head *entry)
95674+{
95675+ pax_open_kernel();
95676+ __list_del(entry->prev, entry->next);
95677+ INIT_LIST_HEAD(entry);
95678+ pax_close_kernel();
95679+}
95680+EXPORT_SYMBOL(pax_list_del_init);
95681+
95682+void __pax_list_add_rcu(struct list_head *new,
95683+ struct list_head *prev, struct list_head *next)
95684+{
95685+#ifdef CONFIG_DEBUG_LIST
95686+ if (!__list_add_debug(new, prev, next))
95687+ return;
95688+#endif
95689+
95690+ pax_open_kernel();
95691+ new->next = next;
95692+ new->prev = prev;
95693+ rcu_assign_pointer(list_next_rcu(prev), new);
95694+ next->prev = new;
95695+ pax_close_kernel();
95696+}
95697+EXPORT_SYMBOL(__pax_list_add_rcu);
95698+
95699+void pax_list_del_rcu(struct list_head *entry)
95700+{
95701+#ifdef CONFIG_DEBUG_LIST
95702+ if (!__list_del_entry_debug(entry))
95703+ return;
95704+#endif
95705+
95706+ pax_open_kernel();
95707+ __list_del(entry->prev, entry->next);
95708+ entry->next = LIST_POISON1;
95709+ entry->prev = LIST_POISON2;
95710+ pax_close_kernel();
95711+}
95712+EXPORT_SYMBOL(pax_list_del_rcu);
95713diff --git a/lib/lockref.c b/lib/lockref.c
95714index ecb9a66..a044fc5 100644
95715--- a/lib/lockref.c
95716+++ b/lib/lockref.c
95717@@ -48,13 +48,13 @@
95718 void lockref_get(struct lockref *lockref)
95719 {
95720 CMPXCHG_LOOP(
95721- new.count++;
95722+ __lockref_inc(&new);
95723 ,
95724 return;
95725 );
95726
95727 spin_lock(&lockref->lock);
95728- lockref->count++;
95729+ __lockref_inc(lockref);
95730 spin_unlock(&lockref->lock);
95731 }
95732 EXPORT_SYMBOL(lockref_get);
95733@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
95734 int retval;
95735
95736 CMPXCHG_LOOP(
95737- new.count++;
95738- if (old.count <= 0)
95739+ __lockref_inc(&new);
95740+ if (__lockref_read(&old) <= 0)
95741 return 0;
95742 ,
95743 return 1;
95744@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
95745
95746 spin_lock(&lockref->lock);
95747 retval = 0;
95748- if (lockref->count > 0) {
95749- lockref->count++;
95750+ if (__lockref_read(lockref) > 0) {
95751+ __lockref_inc(lockref);
95752 retval = 1;
95753 }
95754 spin_unlock(&lockref->lock);
95755@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95756 int lockref_get_or_lock(struct lockref *lockref)
95757 {
95758 CMPXCHG_LOOP(
95759- new.count++;
95760- if (old.count <= 0)
95761+ __lockref_inc(&new);
95762+ if (__lockref_read(&old) <= 0)
95763 break;
95764 ,
95765 return 1;
95766 );
95767
95768 spin_lock(&lockref->lock);
95769- if (lockref->count <= 0)
95770+ if (__lockref_read(lockref) <= 0)
95771 return 0;
95772- lockref->count++;
95773+ __lockref_inc(lockref);
95774 spin_unlock(&lockref->lock);
95775 return 1;
95776 }
95777@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95778 int lockref_put_return(struct lockref *lockref)
95779 {
95780 CMPXCHG_LOOP(
95781- new.count--;
95782- if (old.count <= 0)
95783+ __lockref_dec(&new);
95784+ if (__lockref_read(&old) <= 0)
95785 return -1;
95786 ,
95787- return new.count;
95788+ return __lockref_read(&new);
95789 );
95790 return -1;
95791 }
95792@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
95793 int lockref_put_or_lock(struct lockref *lockref)
95794 {
95795 CMPXCHG_LOOP(
95796- new.count--;
95797- if (old.count <= 1)
95798+ __lockref_dec(&new);
95799+ if (__lockref_read(&old) <= 1)
95800 break;
95801 ,
95802 return 1;
95803 );
95804
95805 spin_lock(&lockref->lock);
95806- if (lockref->count <= 1)
95807+ if (__lockref_read(lockref) <= 1)
95808 return 0;
95809- lockref->count--;
95810+ __lockref_dec(lockref);
95811 spin_unlock(&lockref->lock);
95812 return 1;
95813 }
95814@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
95815 void lockref_mark_dead(struct lockref *lockref)
95816 {
95817 assert_spin_locked(&lockref->lock);
95818- lockref->count = -128;
95819+ __lockref_set(lockref, -128);
95820 }
95821 EXPORT_SYMBOL(lockref_mark_dead);
95822
95823@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
95824 int retval;
95825
95826 CMPXCHG_LOOP(
95827- new.count++;
95828- if (old.count < 0)
95829+ __lockref_inc(&new);
95830+ if (__lockref_read(&old) < 0)
95831 return 0;
95832 ,
95833 return 1;
95834@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
95835
95836 spin_lock(&lockref->lock);
95837 retval = 0;
95838- if (lockref->count >= 0) {
95839- lockref->count++;
95840+ if (__lockref_read(lockref) >= 0) {
95841+ __lockref_inc(lockref);
95842 retval = 1;
95843 }
95844 spin_unlock(&lockref->lock);
95845diff --git a/lib/nlattr.c b/lib/nlattr.c
95846index f5907d2..36072be 100644
95847--- a/lib/nlattr.c
95848+++ b/lib/nlattr.c
95849@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
95850 {
95851 int minlen = min_t(int, count, nla_len(src));
95852
95853+ BUG_ON(minlen < 0);
95854+
95855 memcpy(dest, nla_data(src), minlen);
95856 if (count > minlen)
95857 memset(dest + minlen, 0, count - minlen);
95858diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95859index 6111bcb..02e816b 100644
95860--- a/lib/percpu-refcount.c
95861+++ b/lib/percpu-refcount.c
95862@@ -31,7 +31,7 @@
95863 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95864 */
95865
95866-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95867+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95868
95869 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95870
95871diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95872index 3d2aa27..a472f20 100644
95873--- a/lib/radix-tree.c
95874+++ b/lib/radix-tree.c
95875@@ -67,7 +67,7 @@ struct radix_tree_preload {
95876 int nr;
95877 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95878 };
95879-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95880+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95881
95882 static inline void *ptr_to_indirect(void *ptr)
95883 {
95884diff --git a/lib/random32.c b/lib/random32.c
95885index 0bee183..526f12f 100644
95886--- a/lib/random32.c
95887+++ b/lib/random32.c
95888@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95889 }
95890 #endif
95891
95892-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95893+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95894
95895 /**
95896 * prandom_u32_state - seeded pseudo-random number generator.
95897diff --git a/lib/rbtree.c b/lib/rbtree.c
95898index c16c81a..4dcbda1 100644
95899--- a/lib/rbtree.c
95900+++ b/lib/rbtree.c
95901@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95902 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95903
95904 static const struct rb_augment_callbacks dummy_callbacks = {
95905- dummy_propagate, dummy_copy, dummy_rotate
95906+ .propagate = dummy_propagate,
95907+ .copy = dummy_copy,
95908+ .rotate = dummy_rotate
95909 };
95910
95911 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95912diff --git a/lib/show_mem.c b/lib/show_mem.c
95913index adc98e18..0ce83c2 100644
95914--- a/lib/show_mem.c
95915+++ b/lib/show_mem.c
95916@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
95917 quicklist_total_size());
95918 #endif
95919 #ifdef CONFIG_MEMORY_FAILURE
95920- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95921+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95922 #endif
95923 }
95924diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95925index e0af6ff..fcc9f15 100644
95926--- a/lib/strncpy_from_user.c
95927+++ b/lib/strncpy_from_user.c
95928@@ -22,7 +22,7 @@
95929 */
95930 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95931 {
95932- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95933+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95934 long res = 0;
95935
95936 /*
95937diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95938index a28df52..3d55877 100644
95939--- a/lib/strnlen_user.c
95940+++ b/lib/strnlen_user.c
95941@@ -26,7 +26,7 @@
95942 */
95943 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95944 {
95945- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95946+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95947 long align, res = 0;
95948 unsigned long c;
95949
95950diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95951index 4abda07..b9d3765 100644
95952--- a/lib/swiotlb.c
95953+++ b/lib/swiotlb.c
95954@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95955
95956 void
95957 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95958- dma_addr_t dev_addr)
95959+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95960 {
95961 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95962
95963diff --git a/lib/usercopy.c b/lib/usercopy.c
95964index 4f5b1dd..7cab418 100644
95965--- a/lib/usercopy.c
95966+++ b/lib/usercopy.c
95967@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95968 WARN(1, "Buffer overflow detected!\n");
95969 }
95970 EXPORT_SYMBOL(copy_from_user_overflow);
95971+
95972+void copy_to_user_overflow(void)
95973+{
95974+ WARN(1, "Buffer overflow detected!\n");
95975+}
95976+EXPORT_SYMBOL(copy_to_user_overflow);
95977diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95978index b235c96..343ffc1 100644
95979--- a/lib/vsprintf.c
95980+++ b/lib/vsprintf.c
95981@@ -16,6 +16,9 @@
95982 * - scnprintf and vscnprintf
95983 */
95984
95985+#ifdef CONFIG_GRKERNSEC_HIDESYM
95986+#define __INCLUDED_BY_HIDESYM 1
95987+#endif
95988 #include <stdarg.h>
95989 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95990 #include <linux/types.h>
95991@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95992 #ifdef CONFIG_KALLSYMS
95993 if (*fmt == 'B')
95994 sprint_backtrace(sym, value);
95995- else if (*fmt != 'f' && *fmt != 's')
95996+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95997 sprint_symbol(sym, value);
95998 else
95999 sprint_symbol_no_offset(sym, value);
96000@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
96001 return number(buf, end, num, spec);
96002 }
96003
96004+#ifdef CONFIG_GRKERNSEC_HIDESYM
96005+int kptr_restrict __read_mostly = 2;
96006+#else
96007 int kptr_restrict __read_mostly;
96008+#endif
96009
96010 /*
96011 * Show a '%p' thing. A kernel extension is that the '%p' is followed
96012@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
96013 *
96014 * - 'F' For symbolic function descriptor pointers with offset
96015 * - 'f' For simple symbolic function names without offset
96016+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
96017 * - 'S' For symbolic direct pointers with offset
96018 * - 's' For symbolic direct pointers without offset
96019+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
96020 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
96021 * - 'B' For backtraced symbolic direct pointers with offset
96022 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
96023@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96024
96025 if (!ptr && *fmt != 'K') {
96026 /*
96027- * Print (null) with the same width as a pointer so it makes
96028+ * Print (nil) with the same width as a pointer so it makes
96029 * tabular output look nice.
96030 */
96031 if (spec.field_width == -1)
96032 spec.field_width = default_width;
96033- return string(buf, end, "(null)", spec);
96034+ return string(buf, end, "(nil)", spec);
96035 }
96036
96037 switch (*fmt) {
96038@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96039 /* Fallthrough */
96040 case 'S':
96041 case 's':
96042+#ifdef CONFIG_GRKERNSEC_HIDESYM
96043+ break;
96044+#else
96045+ return symbol_string(buf, end, ptr, spec, fmt);
96046+#endif
96047+ case 'X':
96048+ ptr = dereference_function_descriptor(ptr);
96049+ case 'A':
96050 case 'B':
96051 return symbol_string(buf, end, ptr, spec, fmt);
96052 case 'R':
96053@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96054 va_end(va);
96055 return buf;
96056 }
96057+ case 'P':
96058+ break;
96059 case 'K':
96060 /*
96061 * %pK cannot be used in IRQ context because its test
96062@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96063 ((const struct file *)ptr)->f_path.dentry,
96064 spec, fmt);
96065 }
96066+
96067+#ifdef CONFIG_GRKERNSEC_HIDESYM
96068+ /* 'P' = approved pointers to copy to userland,
96069+ as in the /proc/kallsyms case, as we make it display nothing
96070+ for non-root users, and the real contents for root users
96071+ 'X' = approved simple symbols
96072+ Also ignore 'K' pointers, since we force their NULLing for non-root users
96073+ above
96074+ */
96075+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
96076+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
96077+ dump_stack();
96078+ ptr = NULL;
96079+ }
96080+#endif
96081+
96082 spec.flags |= SMALL;
96083 if (spec.field_width == -1) {
96084 spec.field_width = default_width;
96085@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96086 typeof(type) value; \
96087 if (sizeof(type) == 8) { \
96088 args = PTR_ALIGN(args, sizeof(u32)); \
96089- *(u32 *)&value = *(u32 *)args; \
96090- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
96091+ *(u32 *)&value = *(const u32 *)args; \
96092+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
96093 } else { \
96094 args = PTR_ALIGN(args, sizeof(type)); \
96095- value = *(typeof(type) *)args; \
96096+ value = *(const typeof(type) *)args; \
96097 } \
96098 args += sizeof(type); \
96099 value; \
96100@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96101 case FORMAT_TYPE_STR: {
96102 const char *str_arg = args;
96103 args += strlen(str_arg) + 1;
96104- str = string(str, end, (char *)str_arg, spec);
96105+ str = string(str, end, str_arg, spec);
96106 break;
96107 }
96108
96109diff --git a/localversion-grsec b/localversion-grsec
96110new file mode 100644
96111index 0000000..7cd6065
96112--- /dev/null
96113+++ b/localversion-grsec
96114@@ -0,0 +1 @@
96115+-grsec
96116diff --git a/mm/Kconfig b/mm/Kconfig
96117index a03131b..1b1bafb 100644
96118--- a/mm/Kconfig
96119+++ b/mm/Kconfig
96120@@ -342,10 +342,11 @@ config KSM
96121 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
96122
96123 config DEFAULT_MMAP_MIN_ADDR
96124- int "Low address space to protect from user allocation"
96125+ int "Low address space to protect from user allocation"
96126 depends on MMU
96127- default 4096
96128- help
96129+ default 32768 if ALPHA || ARM || PARISC || SPARC32
96130+ default 65536
96131+ help
96132 This is the portion of low virtual memory which should be protected
96133 from userspace allocation. Keeping a user from writing to low pages
96134 can help reduce the impact of kernel NULL pointer bugs.
96135@@ -376,7 +377,7 @@ config MEMORY_FAILURE
96136
96137 config HWPOISON_INJECT
96138 tristate "HWPoison pages injector"
96139- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
96140+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
96141 select PROC_PAGE_MONITOR
96142
96143 config NOMMU_INITIAL_TRIM_EXCESS
96144diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
96145index 957d3da..1d34e20 100644
96146--- a/mm/Kconfig.debug
96147+++ b/mm/Kconfig.debug
96148@@ -10,6 +10,7 @@ config PAGE_EXTENSION
96149 config DEBUG_PAGEALLOC
96150 bool "Debug page memory allocations"
96151 depends on DEBUG_KERNEL
96152+ depends on !PAX_MEMORY_SANITIZE
96153 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
96154 depends on !KMEMCHECK
96155 select PAGE_EXTENSION
96156diff --git a/mm/backing-dev.c b/mm/backing-dev.c
96157index 6dc4580..e031ec1 100644
96158--- a/mm/backing-dev.c
96159+++ b/mm/backing-dev.c
96160@@ -12,7 +12,7 @@
96161 #include <linux/device.h>
96162 #include <trace/events/writeback.h>
96163
96164-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
96165+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
96166
96167 struct backing_dev_info noop_backing_dev_info = {
96168 .name = "noop",
96169@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
96170 return err;
96171
96172 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
96173- atomic_long_inc_return(&bdi_seq));
96174+ atomic_long_inc_return_unchecked(&bdi_seq));
96175 if (err) {
96176 bdi_destroy(bdi);
96177 return err;
96178diff --git a/mm/filemap.c b/mm/filemap.c
96179index ad72420..0a20ef2 100644
96180--- a/mm/filemap.c
96181+++ b/mm/filemap.c
96182@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
96183 struct address_space *mapping = file->f_mapping;
96184
96185 if (!mapping->a_ops->readpage)
96186- return -ENOEXEC;
96187+ return -ENODEV;
96188 file_accessed(file);
96189 vma->vm_ops = &generic_file_vm_ops;
96190 return 0;
96191@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
96192 *pos = i_size_read(inode);
96193
96194 if (limit != RLIM_INFINITY) {
96195+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
96196 if (*pos >= limit) {
96197 send_sig(SIGXFSZ, current, 0);
96198 return -EFBIG;
96199diff --git a/mm/gup.c b/mm/gup.c
96200index a6e24e2..72dd2cf 100644
96201--- a/mm/gup.c
96202+++ b/mm/gup.c
96203@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
96204 unsigned int fault_flags = 0;
96205 int ret;
96206
96207- /* For mlock, just skip the stack guard page. */
96208- if ((*flags & FOLL_MLOCK) &&
96209- (stack_guard_page_start(vma, address) ||
96210- stack_guard_page_end(vma, address + PAGE_SIZE)))
96211- return -ENOENT;
96212 if (*flags & FOLL_WRITE)
96213 fault_flags |= FAULT_FLAG_WRITE;
96214 if (nonblocking)
96215@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96216 if (!(gup_flags & FOLL_FORCE))
96217 gup_flags |= FOLL_NUMA;
96218
96219- do {
96220+ while (nr_pages) {
96221 struct page *page;
96222 unsigned int foll_flags = gup_flags;
96223 unsigned int page_increm;
96224
96225 /* first iteration or cross vma bound */
96226 if (!vma || start >= vma->vm_end) {
96227- vma = find_extend_vma(mm, start);
96228+ vma = find_vma(mm, start);
96229 if (!vma && in_gate_area(mm, start)) {
96230 int ret;
96231 ret = get_gate_page(mm, start & PAGE_MASK,
96232@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96233 goto next_page;
96234 }
96235
96236- if (!vma || check_vma_flags(vma, gup_flags))
96237+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
96238 return i ? : -EFAULT;
96239 if (is_vm_hugetlb_page(vma)) {
96240 i = follow_hugetlb_page(mm, vma, pages, vmas,
96241@@ -509,7 +504,7 @@ next_page:
96242 i += page_increm;
96243 start += page_increm * PAGE_SIZE;
96244 nr_pages -= page_increm;
96245- } while (nr_pages);
96246+ }
96247 return i;
96248 }
96249 EXPORT_SYMBOL(__get_user_pages);
96250diff --git a/mm/highmem.c b/mm/highmem.c
96251index 123bcd3..0de52ba 100644
96252--- a/mm/highmem.c
96253+++ b/mm/highmem.c
96254@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
96255 * So no dangers, even with speculative execution.
96256 */
96257 page = pte_page(pkmap_page_table[i]);
96258+ pax_open_kernel();
96259 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
96260-
96261+ pax_close_kernel();
96262 set_page_address(page, NULL);
96263 need_flush = 1;
96264 }
96265@@ -259,9 +260,11 @@ start:
96266 }
96267 }
96268 vaddr = PKMAP_ADDR(last_pkmap_nr);
96269+
96270+ pax_open_kernel();
96271 set_pte_at(&init_mm, vaddr,
96272 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
96273-
96274+ pax_close_kernel();
96275 pkmap_count[last_pkmap_nr] = 1;
96276 set_page_address(page, (void *)vaddr);
96277
96278diff --git a/mm/hugetlb.c b/mm/hugetlb.c
96279index c41b2a0..100cf92 100644
96280--- a/mm/hugetlb.c
96281+++ b/mm/hugetlb.c
96282@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96283 struct ctl_table *table, int write,
96284 void __user *buffer, size_t *length, loff_t *ppos)
96285 {
96286+ ctl_table_no_const t;
96287 struct hstate *h = &default_hstate;
96288 unsigned long tmp = h->max_huge_pages;
96289 int ret;
96290@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96291 if (!hugepages_supported())
96292 return -ENOTSUPP;
96293
96294- table->data = &tmp;
96295- table->maxlen = sizeof(unsigned long);
96296- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96297+ t = *table;
96298+ t.data = &tmp;
96299+ t.maxlen = sizeof(unsigned long);
96300+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
96301 if (ret)
96302 goto out;
96303
96304@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96305 struct hstate *h = &default_hstate;
96306 unsigned long tmp;
96307 int ret;
96308+ ctl_table_no_const hugetlb_table;
96309
96310 if (!hugepages_supported())
96311 return -ENOTSUPP;
96312@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96313 if (write && hstate_is_gigantic(h))
96314 return -EINVAL;
96315
96316- table->data = &tmp;
96317- table->maxlen = sizeof(unsigned long);
96318- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96319+ hugetlb_table = *table;
96320+ hugetlb_table.data = &tmp;
96321+ hugetlb_table.maxlen = sizeof(unsigned long);
96322+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
96323 if (ret)
96324 goto out;
96325
96326@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
96327 i_mmap_unlock_write(mapping);
96328 }
96329
96330+#ifdef CONFIG_PAX_SEGMEXEC
96331+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
96332+{
96333+ struct mm_struct *mm = vma->vm_mm;
96334+ struct vm_area_struct *vma_m;
96335+ unsigned long address_m;
96336+ pte_t *ptep_m;
96337+
96338+ vma_m = pax_find_mirror_vma(vma);
96339+ if (!vma_m)
96340+ return;
96341+
96342+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96343+ address_m = address + SEGMEXEC_TASK_SIZE;
96344+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
96345+ get_page(page_m);
96346+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
96347+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
96348+}
96349+#endif
96350+
96351 /*
96352 * Hugetlb_cow() should be called with page lock of the original hugepage held.
96353 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
96354@@ -2912,6 +2937,11 @@ retry_avoidcopy:
96355 make_huge_pte(vma, new_page, 1));
96356 page_remove_rmap(old_page);
96357 hugepage_add_new_anon_rmap(new_page, vma, address);
96358+
96359+#ifdef CONFIG_PAX_SEGMEXEC
96360+ pax_mirror_huge_pte(vma, address, new_page);
96361+#endif
96362+
96363 /* Make the old page be freed below */
96364 new_page = old_page;
96365 }
96366@@ -3072,6 +3102,10 @@ retry:
96367 && (vma->vm_flags & VM_SHARED)));
96368 set_huge_pte_at(mm, address, ptep, new_pte);
96369
96370+#ifdef CONFIG_PAX_SEGMEXEC
96371+ pax_mirror_huge_pte(vma, address, page);
96372+#endif
96373+
96374 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
96375 /* Optimization, do the COW without a second fault */
96376 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
96377@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96378 struct address_space *mapping;
96379 int need_wait_lock = 0;
96380
96381+#ifdef CONFIG_PAX_SEGMEXEC
96382+ struct vm_area_struct *vma_m;
96383+#endif
96384+
96385 address &= huge_page_mask(h);
96386
96387 ptep = huge_pte_offset(mm, address);
96388@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96389 VM_FAULT_SET_HINDEX(hstate_index(h));
96390 }
96391
96392+#ifdef CONFIG_PAX_SEGMEXEC
96393+ vma_m = pax_find_mirror_vma(vma);
96394+ if (vma_m) {
96395+ unsigned long address_m;
96396+
96397+ if (vma->vm_start > vma_m->vm_start) {
96398+ address_m = address;
96399+ address -= SEGMEXEC_TASK_SIZE;
96400+ vma = vma_m;
96401+ h = hstate_vma(vma);
96402+ } else
96403+ address_m = address + SEGMEXEC_TASK_SIZE;
96404+
96405+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
96406+ return VM_FAULT_OOM;
96407+ address_m &= HPAGE_MASK;
96408+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
96409+ }
96410+#endif
96411+
96412 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
96413 if (!ptep)
96414 return VM_FAULT_OOM;
96415diff --git a/mm/internal.h b/mm/internal.h
96416index a96da5b..42ebd54 100644
96417--- a/mm/internal.h
96418+++ b/mm/internal.h
96419@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
96420
96421 extern int __isolate_free_page(struct page *page, unsigned int order);
96422 extern void __free_pages_bootmem(struct page *page, unsigned int order);
96423+extern void free_compound_page(struct page *page);
96424 extern void prep_compound_page(struct page *page, unsigned long order);
96425 #ifdef CONFIG_MEMORY_FAILURE
96426 extern bool is_free_buddy_page(struct page *page);
96427@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
96428
96429 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
96430 unsigned long, unsigned long,
96431- unsigned long, unsigned long);
96432+ unsigned long, unsigned long) __intentional_overflow(-1);
96433
96434 extern void set_pageblock_order(void);
96435 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
96436diff --git a/mm/kmemleak.c b/mm/kmemleak.c
96437index 5405aff..483406d 100644
96438--- a/mm/kmemleak.c
96439+++ b/mm/kmemleak.c
96440@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
96441
96442 for (i = 0; i < object->trace_len; i++) {
96443 void *ptr = (void *)object->trace[i];
96444- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
96445+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
96446 }
96447 }
96448
96449@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
96450 return -ENOMEM;
96451 }
96452
96453- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
96454+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
96455 &kmemleak_fops);
96456 if (!dentry)
96457 pr_warning("Failed to create the debugfs kmemleak file\n");
96458diff --git a/mm/maccess.c b/mm/maccess.c
96459index d53adf9..03a24bf 100644
96460--- a/mm/maccess.c
96461+++ b/mm/maccess.c
96462@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
96463 set_fs(KERNEL_DS);
96464 pagefault_disable();
96465 ret = __copy_from_user_inatomic(dst,
96466- (__force const void __user *)src, size);
96467+ (const void __force_user *)src, size);
96468 pagefault_enable();
96469 set_fs(old_fs);
96470
96471@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
96472
96473 set_fs(KERNEL_DS);
96474 pagefault_disable();
96475- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96476+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96477 pagefault_enable();
96478 set_fs(old_fs);
96479
96480diff --git a/mm/madvise.c b/mm/madvise.c
96481index d551475..8fdd7f3 100644
96482--- a/mm/madvise.c
96483+++ b/mm/madvise.c
96484@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
96485 pgoff_t pgoff;
96486 unsigned long new_flags = vma->vm_flags;
96487
96488+#ifdef CONFIG_PAX_SEGMEXEC
96489+ struct vm_area_struct *vma_m;
96490+#endif
96491+
96492 switch (behavior) {
96493 case MADV_NORMAL:
96494 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96495@@ -126,6 +130,13 @@ success:
96496 /*
96497 * vm_flags is protected by the mmap_sem held in write mode.
96498 */
96499+
96500+#ifdef CONFIG_PAX_SEGMEXEC
96501+ vma_m = pax_find_mirror_vma(vma);
96502+ if (vma_m)
96503+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96504+#endif
96505+
96506 vma->vm_flags = new_flags;
96507
96508 out:
96509@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96510 struct vm_area_struct **prev,
96511 unsigned long start, unsigned long end)
96512 {
96513+
96514+#ifdef CONFIG_PAX_SEGMEXEC
96515+ struct vm_area_struct *vma_m;
96516+#endif
96517+
96518 *prev = vma;
96519 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96520 return -EINVAL;
96521
96522 zap_page_range(vma, start, end - start, NULL);
96523+
96524+#ifdef CONFIG_PAX_SEGMEXEC
96525+ vma_m = pax_find_mirror_vma(vma);
96526+ if (vma_m) {
96527+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96528+ return -EINVAL;
96529+
96530+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96531+ }
96532+#endif
96533+
96534 return 0;
96535 }
96536
96537@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96538 if (end < start)
96539 return error;
96540
96541+#ifdef CONFIG_PAX_SEGMEXEC
96542+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96543+ if (end > SEGMEXEC_TASK_SIZE)
96544+ return error;
96545+ } else
96546+#endif
96547+
96548+ if (end > TASK_SIZE)
96549+ return error;
96550+
96551 error = 0;
96552 if (end == start)
96553 return error;
96554diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96555index d487f8d..39ebbf6 100644
96556--- a/mm/memory-failure.c
96557+++ b/mm/memory-failure.c
96558@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96559
96560 int sysctl_memory_failure_recovery __read_mostly = 1;
96561
96562-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96563+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96564
96565 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96566
96567@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96568 pfn, t->comm, t->pid);
96569 si.si_signo = SIGBUS;
96570 si.si_errno = 0;
96571- si.si_addr = (void *)addr;
96572+ si.si_addr = (void __user *)addr;
96573 #ifdef __ARCH_SI_TRAPNO
96574 si.si_trapno = trapno;
96575 #endif
96576@@ -779,7 +779,7 @@ static struct page_state {
96577 unsigned long res;
96578 char *msg;
96579 int (*action)(struct page *p, unsigned long pfn);
96580-} error_states[] = {
96581+} __do_const error_states[] = {
96582 { reserved, reserved, "reserved kernel", me_kernel },
96583 /*
96584 * free pages are specially detected outside this table:
96585@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96586 nr_pages = 1 << compound_order(hpage);
96587 else /* normal page or thp */
96588 nr_pages = 1;
96589- atomic_long_add(nr_pages, &num_poisoned_pages);
96590+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96591
96592 /*
96593 * We need/can do nothing about count=0 pages.
96594@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96595 if (PageHWPoison(hpage)) {
96596 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96597 || (p != hpage && TestSetPageHWPoison(hpage))) {
96598- atomic_long_sub(nr_pages, &num_poisoned_pages);
96599+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96600 unlock_page(hpage);
96601 return 0;
96602 }
96603@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96604 */
96605 if (!PageHWPoison(p)) {
96606 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96607- atomic_long_sub(nr_pages, &num_poisoned_pages);
96608+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96609 put_page(hpage);
96610 res = 0;
96611 goto out;
96612 }
96613 if (hwpoison_filter(p)) {
96614 if (TestClearPageHWPoison(p))
96615- atomic_long_sub(nr_pages, &num_poisoned_pages);
96616+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96617 unlock_page(hpage);
96618 put_page(hpage);
96619 return 0;
96620@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
96621 return 0;
96622 }
96623 if (TestClearPageHWPoison(p))
96624- atomic_long_dec(&num_poisoned_pages);
96625+ atomic_long_dec_unchecked(&num_poisoned_pages);
96626 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96627 return 0;
96628 }
96629@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
96630 */
96631 if (TestClearPageHWPoison(page)) {
96632 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96633- atomic_long_sub(nr_pages, &num_poisoned_pages);
96634+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96635 freeit = 1;
96636 if (PageHuge(page))
96637 clear_page_hwpoison_huge_page(page);
96638@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96639 if (PageHuge(page)) {
96640 set_page_hwpoison_huge_page(hpage);
96641 dequeue_hwpoisoned_huge_page(hpage);
96642- atomic_long_add(1 << compound_order(hpage),
96643+ atomic_long_add_unchecked(1 << compound_order(hpage),
96644 &num_poisoned_pages);
96645 } else {
96646 SetPageHWPoison(page);
96647- atomic_long_inc(&num_poisoned_pages);
96648+ atomic_long_inc_unchecked(&num_poisoned_pages);
96649 }
96650 }
96651 return ret;
96652@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
96653 put_page(page);
96654 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96655 SetPageHWPoison(page);
96656- atomic_long_inc(&num_poisoned_pages);
96657+ atomic_long_inc_unchecked(&num_poisoned_pages);
96658 return 0;
96659 }
96660
96661@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
96662 if (!is_free_buddy_page(page))
96663 pr_info("soft offline: %#lx: page leaked\n",
96664 pfn);
96665- atomic_long_inc(&num_poisoned_pages);
96666+ atomic_long_inc_unchecked(&num_poisoned_pages);
96667 }
96668 } else {
96669 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96670@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
96671 if (PageHuge(page)) {
96672 set_page_hwpoison_huge_page(hpage);
96673 dequeue_hwpoisoned_huge_page(hpage);
96674- atomic_long_add(1 << compound_order(hpage),
96675+ atomic_long_add_unchecked(1 << compound_order(hpage),
96676 &num_poisoned_pages);
96677 } else {
96678 SetPageHWPoison(page);
96679- atomic_long_inc(&num_poisoned_pages);
96680+ atomic_long_inc_unchecked(&num_poisoned_pages);
96681 }
96682 }
96683 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96684diff --git a/mm/memory.c b/mm/memory.c
96685index 97839f5..4bc5530 100644
96686--- a/mm/memory.c
96687+++ b/mm/memory.c
96688@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96689 free_pte_range(tlb, pmd, addr);
96690 } while (pmd++, addr = next, addr != end);
96691
96692+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96693 start &= PUD_MASK;
96694 if (start < floor)
96695 return;
96696@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96697 pud_clear(pud);
96698 pmd_free_tlb(tlb, pmd, start);
96699 mm_dec_nr_pmds(tlb->mm);
96700+#endif
96701 }
96702
96703 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96704@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96705 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96706 } while (pud++, addr = next, addr != end);
96707
96708+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96709 start &= PGDIR_MASK;
96710 if (start < floor)
96711 return;
96712@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96713 pud = pud_offset(pgd, start);
96714 pgd_clear(pgd);
96715 pud_free_tlb(tlb, pud, start);
96716+#endif
96717+
96718 }
96719
96720 /*
96721@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96722 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96723 */
96724 if (vma->vm_ops)
96725- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96726+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96727 vma->vm_ops->fault);
96728 if (vma->vm_file)
96729- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96730+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96731 vma->vm_file->f_op->mmap);
96732 dump_stack();
96733 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96734@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96735 page_add_file_rmap(page);
96736 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96737
96738+#ifdef CONFIG_PAX_SEGMEXEC
96739+ pax_mirror_file_pte(vma, addr, page, ptl);
96740+#endif
96741+
96742 retval = 0;
96743 pte_unmap_unlock(pte, ptl);
96744 return retval;
96745@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96746 if (!page_count(page))
96747 return -EINVAL;
96748 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96749+
96750+#ifdef CONFIG_PAX_SEGMEXEC
96751+ struct vm_area_struct *vma_m;
96752+#endif
96753+
96754 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96755 BUG_ON(vma->vm_flags & VM_PFNMAP);
96756 vma->vm_flags |= VM_MIXEDMAP;
96757+
96758+#ifdef CONFIG_PAX_SEGMEXEC
96759+ vma_m = pax_find_mirror_vma(vma);
96760+ if (vma_m)
96761+ vma_m->vm_flags |= VM_MIXEDMAP;
96762+#endif
96763+
96764 }
96765 return insert_page(vma, addr, page, vma->vm_page_prot);
96766 }
96767@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96768 unsigned long pfn)
96769 {
96770 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96771+ BUG_ON(vma->vm_mirror);
96772
96773 if (addr < vma->vm_start || addr >= vma->vm_end)
96774 return -EFAULT;
96775@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96776
96777 BUG_ON(pud_huge(*pud));
96778
96779- pmd = pmd_alloc(mm, pud, addr);
96780+ pmd = (mm == &init_mm) ?
96781+ pmd_alloc_kernel(mm, pud, addr) :
96782+ pmd_alloc(mm, pud, addr);
96783 if (!pmd)
96784 return -ENOMEM;
96785 do {
96786@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96787 unsigned long next;
96788 int err;
96789
96790- pud = pud_alloc(mm, pgd, addr);
96791+ pud = (mm == &init_mm) ?
96792+ pud_alloc_kernel(mm, pgd, addr) :
96793+ pud_alloc(mm, pgd, addr);
96794 if (!pud)
96795 return -ENOMEM;
96796 do {
96797@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96798 return ret;
96799 }
96800
96801+#ifdef CONFIG_PAX_SEGMEXEC
96802+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96803+{
96804+ struct mm_struct *mm = vma->vm_mm;
96805+ spinlock_t *ptl;
96806+ pte_t *pte, entry;
96807+
96808+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96809+ entry = *pte;
96810+ if (!pte_present(entry)) {
96811+ if (!pte_none(entry)) {
96812+ free_swap_and_cache(pte_to_swp_entry(entry));
96813+ pte_clear_not_present_full(mm, address, pte, 0);
96814+ }
96815+ } else {
96816+ struct page *page;
96817+
96818+ flush_cache_page(vma, address, pte_pfn(entry));
96819+ entry = ptep_clear_flush(vma, address, pte);
96820+ BUG_ON(pte_dirty(entry));
96821+ page = vm_normal_page(vma, address, entry);
96822+ if (page) {
96823+ update_hiwater_rss(mm);
96824+ if (PageAnon(page))
96825+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96826+ else
96827+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96828+ page_remove_rmap(page);
96829+ page_cache_release(page);
96830+ }
96831+ }
96832+ pte_unmap_unlock(pte, ptl);
96833+}
96834+
96835+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96836+ *
96837+ * the ptl of the lower mapped page is held on entry and is not released on exit
96838+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96839+ */
96840+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96841+{
96842+ struct mm_struct *mm = vma->vm_mm;
96843+ unsigned long address_m;
96844+ spinlock_t *ptl_m;
96845+ struct vm_area_struct *vma_m;
96846+ pmd_t *pmd_m;
96847+ pte_t *pte_m, entry_m;
96848+
96849+ BUG_ON(!page_m || !PageAnon(page_m));
96850+
96851+ vma_m = pax_find_mirror_vma(vma);
96852+ if (!vma_m)
96853+ return;
96854+
96855+ BUG_ON(!PageLocked(page_m));
96856+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96857+ address_m = address + SEGMEXEC_TASK_SIZE;
96858+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96859+ pte_m = pte_offset_map(pmd_m, address_m);
96860+ ptl_m = pte_lockptr(mm, pmd_m);
96861+ if (ptl != ptl_m) {
96862+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96863+ if (!pte_none(*pte_m))
96864+ goto out;
96865+ }
96866+
96867+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96868+ page_cache_get(page_m);
96869+ page_add_anon_rmap(page_m, vma_m, address_m);
96870+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96871+ set_pte_at(mm, address_m, pte_m, entry_m);
96872+ update_mmu_cache(vma_m, address_m, pte_m);
96873+out:
96874+ if (ptl != ptl_m)
96875+ spin_unlock(ptl_m);
96876+ pte_unmap(pte_m);
96877+ unlock_page(page_m);
96878+}
96879+
96880+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96881+{
96882+ struct mm_struct *mm = vma->vm_mm;
96883+ unsigned long address_m;
96884+ spinlock_t *ptl_m;
96885+ struct vm_area_struct *vma_m;
96886+ pmd_t *pmd_m;
96887+ pte_t *pte_m, entry_m;
96888+
96889+ BUG_ON(!page_m || PageAnon(page_m));
96890+
96891+ vma_m = pax_find_mirror_vma(vma);
96892+ if (!vma_m)
96893+ return;
96894+
96895+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96896+ address_m = address + SEGMEXEC_TASK_SIZE;
96897+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96898+ pte_m = pte_offset_map(pmd_m, address_m);
96899+ ptl_m = pte_lockptr(mm, pmd_m);
96900+ if (ptl != ptl_m) {
96901+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96902+ if (!pte_none(*pte_m))
96903+ goto out;
96904+ }
96905+
96906+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96907+ page_cache_get(page_m);
96908+ page_add_file_rmap(page_m);
96909+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96910+ set_pte_at(mm, address_m, pte_m, entry_m);
96911+ update_mmu_cache(vma_m, address_m, pte_m);
96912+out:
96913+ if (ptl != ptl_m)
96914+ spin_unlock(ptl_m);
96915+ pte_unmap(pte_m);
96916+}
96917+
96918+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96919+{
96920+ struct mm_struct *mm = vma->vm_mm;
96921+ unsigned long address_m;
96922+ spinlock_t *ptl_m;
96923+ struct vm_area_struct *vma_m;
96924+ pmd_t *pmd_m;
96925+ pte_t *pte_m, entry_m;
96926+
96927+ vma_m = pax_find_mirror_vma(vma);
96928+ if (!vma_m)
96929+ return;
96930+
96931+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96932+ address_m = address + SEGMEXEC_TASK_SIZE;
96933+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96934+ pte_m = pte_offset_map(pmd_m, address_m);
96935+ ptl_m = pte_lockptr(mm, pmd_m);
96936+ if (ptl != ptl_m) {
96937+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96938+ if (!pte_none(*pte_m))
96939+ goto out;
96940+ }
96941+
96942+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96943+ set_pte_at(mm, address_m, pte_m, entry_m);
96944+out:
96945+ if (ptl != ptl_m)
96946+ spin_unlock(ptl_m);
96947+ pte_unmap(pte_m);
96948+}
96949+
96950+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96951+{
96952+ struct page *page_m;
96953+ pte_t entry;
96954+
96955+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96956+ goto out;
96957+
96958+ entry = *pte;
96959+ page_m = vm_normal_page(vma, address, entry);
96960+ if (!page_m)
96961+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96962+ else if (PageAnon(page_m)) {
96963+ if (pax_find_mirror_vma(vma)) {
96964+ pte_unmap_unlock(pte, ptl);
96965+ lock_page(page_m);
96966+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96967+ if (pte_same(entry, *pte))
96968+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96969+ else
96970+ unlock_page(page_m);
96971+ }
96972+ } else
96973+ pax_mirror_file_pte(vma, address, page_m, ptl);
96974+
96975+out:
96976+ pte_unmap_unlock(pte, ptl);
96977+}
96978+#endif
96979+
96980 /*
96981 * This routine handles present pages, when users try to write
96982 * to a shared page. It is done by copying the page to a new address
96983@@ -2172,6 +2377,12 @@ gotten:
96984 */
96985 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96986 if (likely(pte_same(*page_table, orig_pte))) {
96987+
96988+#ifdef CONFIG_PAX_SEGMEXEC
96989+ if (pax_find_mirror_vma(vma))
96990+ BUG_ON(!trylock_page(new_page));
96991+#endif
96992+
96993 if (old_page) {
96994 if (!PageAnon(old_page)) {
96995 dec_mm_counter_fast(mm, MM_FILEPAGES);
96996@@ -2225,6 +2436,10 @@ gotten:
96997 page_remove_rmap(old_page);
96998 }
96999
97000+#ifdef CONFIG_PAX_SEGMEXEC
97001+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97002+#endif
97003+
97004 /* Free the old page.. */
97005 new_page = old_page;
97006 ret |= VM_FAULT_WRITE;
97007@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97008 swap_free(entry);
97009 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
97010 try_to_free_swap(page);
97011+
97012+#ifdef CONFIG_PAX_SEGMEXEC
97013+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
97014+#endif
97015+
97016 unlock_page(page);
97017 if (page != swapcache) {
97018 /*
97019@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97020
97021 /* No need to invalidate - it was non-present before */
97022 update_mmu_cache(vma, address, page_table);
97023+
97024+#ifdef CONFIG_PAX_SEGMEXEC
97025+ pax_mirror_anon_pte(vma, address, page, ptl);
97026+#endif
97027+
97028 unlock:
97029 pte_unmap_unlock(page_table, ptl);
97030 out:
97031@@ -2525,40 +2750,6 @@ out_release:
97032 }
97033
97034 /*
97035- * This is like a special single-page "expand_{down|up}wards()",
97036- * except we must first make sure that 'address{-|+}PAGE_SIZE'
97037- * doesn't hit another vma.
97038- */
97039-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
97040-{
97041- address &= PAGE_MASK;
97042- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
97043- struct vm_area_struct *prev = vma->vm_prev;
97044-
97045- /*
97046- * Is there a mapping abutting this one below?
97047- *
97048- * That's only ok if it's the same stack mapping
97049- * that has gotten split..
97050- */
97051- if (prev && prev->vm_end == address)
97052- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
97053-
97054- return expand_downwards(vma, address - PAGE_SIZE);
97055- }
97056- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
97057- struct vm_area_struct *next = vma->vm_next;
97058-
97059- /* As VM_GROWSDOWN but s/below/above/ */
97060- if (next && next->vm_start == address + PAGE_SIZE)
97061- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
97062-
97063- return expand_upwards(vma, address + PAGE_SIZE);
97064- }
97065- return 0;
97066-}
97067-
97068-/*
97069 * We enter with non-exclusive mmap_sem (to exclude vma changes,
97070 * but allow concurrent faults), and pte mapped but not yet locked.
97071 * We return with mmap_sem still held, but pte unmapped and unlocked.
97072@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97073 unsigned int flags)
97074 {
97075 struct mem_cgroup *memcg;
97076- struct page *page;
97077+ struct page *page = NULL;
97078 spinlock_t *ptl;
97079 pte_t entry;
97080
97081- pte_unmap(page_table);
97082-
97083- /* Check if we need to add a guard page to the stack */
97084- if (check_stack_guard_page(vma, address) < 0)
97085- return VM_FAULT_SIGSEGV;
97086-
97087- /* Use the zero-page for reads */
97088 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
97089 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
97090 vma->vm_page_prot));
97091- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97092+ ptl = pte_lockptr(mm, pmd);
97093+ spin_lock(ptl);
97094 if (!pte_none(*page_table))
97095 goto unlock;
97096 goto setpte;
97097 }
97098
97099 /* Allocate our own private page. */
97100+ pte_unmap(page_table);
97101+
97102 if (unlikely(anon_vma_prepare(vma)))
97103 goto oom;
97104 page = alloc_zeroed_user_highpage_movable(vma, address);
97105@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97106 if (!pte_none(*page_table))
97107 goto release;
97108
97109+#ifdef CONFIG_PAX_SEGMEXEC
97110+ if (pax_find_mirror_vma(vma))
97111+ BUG_ON(!trylock_page(page));
97112+#endif
97113+
97114 inc_mm_counter_fast(mm, MM_ANONPAGES);
97115 page_add_new_anon_rmap(page, vma, address);
97116 mem_cgroup_commit_charge(page, memcg, false);
97117@@ -2621,6 +2813,12 @@ setpte:
97118
97119 /* No need to invalidate - it was non-present before */
97120 update_mmu_cache(vma, address, page_table);
97121+
97122+#ifdef CONFIG_PAX_SEGMEXEC
97123+ if (page)
97124+ pax_mirror_anon_pte(vma, address, page, ptl);
97125+#endif
97126+
97127 unlock:
97128 pte_unmap_unlock(page_table, ptl);
97129 return 0;
97130@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97131 return ret;
97132 }
97133 do_set_pte(vma, address, fault_page, pte, false, false);
97134+
97135+#ifdef CONFIG_PAX_SEGMEXEC
97136+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97137+#endif
97138+
97139 unlock_page(fault_page);
97140 unlock_out:
97141 pte_unmap_unlock(pte, ptl);
97142@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97143 }
97144 goto uncharge_out;
97145 }
97146+
97147+#ifdef CONFIG_PAX_SEGMEXEC
97148+ if (pax_find_mirror_vma(vma))
97149+ BUG_ON(!trylock_page(new_page));
97150+#endif
97151+
97152 do_set_pte(vma, address, new_page, pte, true, true);
97153+
97154+#ifdef CONFIG_PAX_SEGMEXEC
97155+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97156+#endif
97157+
97158 mem_cgroup_commit_charge(new_page, memcg, false);
97159 lru_cache_add_active_or_unevictable(new_page, vma);
97160 pte_unmap_unlock(pte, ptl);
97161@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97162 return ret;
97163 }
97164 do_set_pte(vma, address, fault_page, pte, true, false);
97165+
97166+#ifdef CONFIG_PAX_SEGMEXEC
97167+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97168+#endif
97169+
97170 pte_unmap_unlock(pte, ptl);
97171
97172 if (set_page_dirty(fault_page))
97173@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
97174 if (flags & FAULT_FLAG_WRITE)
97175 flush_tlb_fix_spurious_fault(vma, address);
97176 }
97177+
97178+#ifdef CONFIG_PAX_SEGMEXEC
97179+ pax_mirror_pte(vma, address, pte, pmd, ptl);
97180+ return 0;
97181+#endif
97182+
97183 unlock:
97184 pte_unmap_unlock(pte, ptl);
97185 return 0;
97186@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97187 pmd_t *pmd;
97188 pte_t *pte;
97189
97190+#ifdef CONFIG_PAX_SEGMEXEC
97191+ struct vm_area_struct *vma_m;
97192+#endif
97193+
97194 if (unlikely(is_vm_hugetlb_page(vma)))
97195 return hugetlb_fault(mm, vma, address, flags);
97196
97197+#ifdef CONFIG_PAX_SEGMEXEC
97198+ vma_m = pax_find_mirror_vma(vma);
97199+ if (vma_m) {
97200+ unsigned long address_m;
97201+ pgd_t *pgd_m;
97202+ pud_t *pud_m;
97203+ pmd_t *pmd_m;
97204+
97205+ if (vma->vm_start > vma_m->vm_start) {
97206+ address_m = address;
97207+ address -= SEGMEXEC_TASK_SIZE;
97208+ vma = vma_m;
97209+ } else
97210+ address_m = address + SEGMEXEC_TASK_SIZE;
97211+
97212+ pgd_m = pgd_offset(mm, address_m);
97213+ pud_m = pud_alloc(mm, pgd_m, address_m);
97214+ if (!pud_m)
97215+ return VM_FAULT_OOM;
97216+ pmd_m = pmd_alloc(mm, pud_m, address_m);
97217+ if (!pmd_m)
97218+ return VM_FAULT_OOM;
97219+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
97220+ return VM_FAULT_OOM;
97221+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
97222+ }
97223+#endif
97224+
97225 pgd = pgd_offset(mm, address);
97226 pud = pud_alloc(mm, pgd, address);
97227 if (!pud)
97228@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97229 spin_unlock(&mm->page_table_lock);
97230 return 0;
97231 }
97232+
97233+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97234+{
97235+ pud_t *new = pud_alloc_one(mm, address);
97236+ if (!new)
97237+ return -ENOMEM;
97238+
97239+ smp_wmb(); /* See comment in __pte_alloc */
97240+
97241+ spin_lock(&mm->page_table_lock);
97242+ if (pgd_present(*pgd)) /* Another has populated it */
97243+ pud_free(mm, new);
97244+ else
97245+ pgd_populate_kernel(mm, pgd, new);
97246+ spin_unlock(&mm->page_table_lock);
97247+ return 0;
97248+}
97249 #endif /* __PAGETABLE_PUD_FOLDED */
97250
97251 #ifndef __PAGETABLE_PMD_FOLDED
97252@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
97253 spin_unlock(&mm->page_table_lock);
97254 return 0;
97255 }
97256+
97257+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
97258+{
97259+ pmd_t *new = pmd_alloc_one(mm, address);
97260+ if (!new)
97261+ return -ENOMEM;
97262+
97263+ smp_wmb(); /* See comment in __pte_alloc */
97264+
97265+ spin_lock(&mm->page_table_lock);
97266+#ifndef __ARCH_HAS_4LEVEL_HACK
97267+ if (!pud_present(*pud)) {
97268+ mm_inc_nr_pmds(mm);
97269+ pud_populate_kernel(mm, pud, new);
97270+ } else /* Another has populated it */
97271+ pmd_free(mm, new);
97272+#else
97273+ if (!pgd_present(*pud)) {
97274+ mm_inc_nr_pmds(mm);
97275+ pgd_populate_kernel(mm, pud, new);
97276+ } else /* Another has populated it */
97277+ pmd_free(mm, new);
97278+#endif /* __ARCH_HAS_4LEVEL_HACK */
97279+ spin_unlock(&mm->page_table_lock);
97280+ return 0;
97281+}
97282 #endif /* __PAGETABLE_PMD_FOLDED */
97283
97284 static int __follow_pte(struct mm_struct *mm, unsigned long address,
97285@@ -3482,8 +3782,8 @@ out:
97286 return ret;
97287 }
97288
97289-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97290- void *buf, int len, int write)
97291+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97292+ void *buf, size_t len, int write)
97293 {
97294 resource_size_t phys_addr;
97295 unsigned long prot = 0;
97296@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
97297 * Access another process' address space as given in mm. If non-NULL, use the
97298 * given task for page fault accounting.
97299 */
97300-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97301- unsigned long addr, void *buf, int len, int write)
97302+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97303+ unsigned long addr, void *buf, size_t len, int write)
97304 {
97305 struct vm_area_struct *vma;
97306 void *old_buf = buf;
97307@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97308 down_read(&mm->mmap_sem);
97309 /* ignore errors, just check how much was successfully transferred */
97310 while (len) {
97311- int bytes, ret, offset;
97312+ ssize_t bytes, ret, offset;
97313 void *maddr;
97314 struct page *page = NULL;
97315
97316@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97317 *
97318 * The caller must hold a reference on @mm.
97319 */
97320-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97321- void *buf, int len, int write)
97322+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
97323+ void *buf, size_t len, int write)
97324 {
97325 return __access_remote_vm(NULL, mm, addr, buf, len, write);
97326 }
97327@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97328 * Source/target buffer must be kernel space,
97329 * Do not walk the page table directly, use get_user_pages
97330 */
97331-int access_process_vm(struct task_struct *tsk, unsigned long addr,
97332- void *buf, int len, int write)
97333+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
97334+ void *buf, size_t len, int write)
97335 {
97336 struct mm_struct *mm;
97337- int ret;
97338+ ssize_t ret;
97339
97340 mm = get_task_mm(tsk);
97341 if (!mm)
97342diff --git a/mm/mempolicy.c b/mm/mempolicy.c
97343index 4721046..6ae2056 100644
97344--- a/mm/mempolicy.c
97345+++ b/mm/mempolicy.c
97346@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97347 unsigned long vmstart;
97348 unsigned long vmend;
97349
97350+#ifdef CONFIG_PAX_SEGMEXEC
97351+ struct vm_area_struct *vma_m;
97352+#endif
97353+
97354 vma = find_vma(mm, start);
97355 if (!vma || vma->vm_start > start)
97356 return -EFAULT;
97357@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97358 err = vma_replace_policy(vma, new_pol);
97359 if (err)
97360 goto out;
97361+
97362+#ifdef CONFIG_PAX_SEGMEXEC
97363+ vma_m = pax_find_mirror_vma(vma);
97364+ if (vma_m) {
97365+ err = vma_replace_policy(vma_m, new_pol);
97366+ if (err)
97367+ goto out;
97368+ }
97369+#endif
97370+
97371 }
97372
97373 out:
97374@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
97375
97376 if (end < start)
97377 return -EINVAL;
97378+
97379+#ifdef CONFIG_PAX_SEGMEXEC
97380+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97381+ if (end > SEGMEXEC_TASK_SIZE)
97382+ return -EINVAL;
97383+ } else
97384+#endif
97385+
97386+ if (end > TASK_SIZE)
97387+ return -EINVAL;
97388+
97389 if (end == start)
97390 return 0;
97391
97392@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97393 */
97394 tcred = __task_cred(task);
97395 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97396- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97397- !capable(CAP_SYS_NICE)) {
97398+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97399 rcu_read_unlock();
97400 err = -EPERM;
97401 goto out_put;
97402@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97403 goto out;
97404 }
97405
97406+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97407+ if (mm != current->mm &&
97408+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
97409+ mmput(mm);
97410+ err = -EPERM;
97411+ goto out;
97412+ }
97413+#endif
97414+
97415 err = do_migrate_pages(mm, old, new,
97416 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
97417
97418diff --git a/mm/migrate.c b/mm/migrate.c
97419index 85e0426..be49beb 100644
97420--- a/mm/migrate.c
97421+++ b/mm/migrate.c
97422@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
97423 */
97424 tcred = __task_cred(task);
97425 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97426- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97427- !capable(CAP_SYS_NICE)) {
97428+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97429 rcu_read_unlock();
97430 err = -EPERM;
97431 goto out;
97432diff --git a/mm/mlock.c b/mm/mlock.c
97433index 8a54cd2..92f1747 100644
97434--- a/mm/mlock.c
97435+++ b/mm/mlock.c
97436@@ -14,6 +14,7 @@
97437 #include <linux/pagevec.h>
97438 #include <linux/mempolicy.h>
97439 #include <linux/syscalls.h>
97440+#include <linux/security.h>
97441 #include <linux/sched.h>
97442 #include <linux/export.h>
97443 #include <linux/rmap.h>
97444@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
97445 {
97446 unsigned long nstart, end, tmp;
97447 struct vm_area_struct * vma, * prev;
97448- int error;
97449+ int error = 0;
97450
97451 VM_BUG_ON(start & ~PAGE_MASK);
97452 VM_BUG_ON(len != PAGE_ALIGN(len));
97453@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
97454 return -EINVAL;
97455 if (end == start)
97456 return 0;
97457+ if (end > TASK_SIZE)
97458+ return -EINVAL;
97459+
97460 vma = find_vma(current->mm, start);
97461 if (!vma || vma->vm_start > start)
97462 return -ENOMEM;
97463@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
97464 for (nstart = start ; ; ) {
97465 vm_flags_t newflags;
97466
97467+#ifdef CONFIG_PAX_SEGMEXEC
97468+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97469+ break;
97470+#endif
97471+
97472 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
97473
97474 newflags = vma->vm_flags & ~VM_LOCKED;
97475@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
97476 locked += current->mm->locked_vm;
97477
97478 /* check against resource limits */
97479+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
97480 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
97481 error = do_mlock(start, len, 1);
97482
97483@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
97484 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
97485 vm_flags_t newflags;
97486
97487+#ifdef CONFIG_PAX_SEGMEXEC
97488+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97489+ break;
97490+#endif
97491+
97492 newflags = vma->vm_flags & ~VM_LOCKED;
97493 if (flags & MCL_CURRENT)
97494 newflags |= VM_LOCKED;
97495@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
97496 lock_limit >>= PAGE_SHIFT;
97497
97498 ret = -ENOMEM;
97499+
97500+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
97501+
97502 down_write(&current->mm->mmap_sem);
97503-
97504 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
97505 capable(CAP_IPC_LOCK))
97506 ret = do_mlockall(flags);
97507diff --git a/mm/mm_init.c b/mm/mm_init.c
97508index 5f420f7..dd42fb1b 100644
97509--- a/mm/mm_init.c
97510+++ b/mm/mm_init.c
97511@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
97512 return NOTIFY_OK;
97513 }
97514
97515-static struct notifier_block compute_batch_nb __meminitdata = {
97516+static struct notifier_block compute_batch_nb __meminitconst = {
97517 .notifier_call = mm_compute_batch_notifier,
97518 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
97519 };
97520diff --git a/mm/mmap.c b/mm/mmap.c
97521index 9ec50a3..0476e2d 100644
97522--- a/mm/mmap.c
97523+++ b/mm/mmap.c
97524@@ -41,6 +41,7 @@
97525 #include <linux/notifier.h>
97526 #include <linux/memory.h>
97527 #include <linux/printk.h>
97528+#include <linux/random.h>
97529
97530 #include <asm/uaccess.h>
97531 #include <asm/cacheflush.h>
97532@@ -57,6 +58,16 @@
97533 #define arch_rebalance_pgtables(addr, len) (addr)
97534 #endif
97535
97536+static inline void verify_mm_writelocked(struct mm_struct *mm)
97537+{
97538+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
97539+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97540+ up_read(&mm->mmap_sem);
97541+ BUG();
97542+ }
97543+#endif
97544+}
97545+
97546 static void unmap_region(struct mm_struct *mm,
97547 struct vm_area_struct *vma, struct vm_area_struct *prev,
97548 unsigned long start, unsigned long end);
97549@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
97550 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
97551 *
97552 */
97553-pgprot_t protection_map[16] = {
97554+pgprot_t protection_map[16] __read_only = {
97555 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97556 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
97557 };
97558
97559-pgprot_t vm_get_page_prot(unsigned long vm_flags)
97560+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
97561 {
97562- return __pgprot(pgprot_val(protection_map[vm_flags &
97563+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
97564 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
97565 pgprot_val(arch_vm_get_page_prot(vm_flags)));
97566+
97567+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97568+ if (!(__supported_pte_mask & _PAGE_NX) &&
97569+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97570+ (vm_flags & (VM_READ | VM_WRITE)))
97571+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97572+#endif
97573+
97574+ return prot;
97575 }
97576 EXPORT_SYMBOL(vm_get_page_prot);
97577
97578@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97579 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97580 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97581 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97582+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97583 /*
97584 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97585 * other variables. It can be updated by several CPUs frequently.
97586@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97587 struct vm_area_struct *next = vma->vm_next;
97588
97589 might_sleep();
97590+ BUG_ON(vma->vm_mirror);
97591 if (vma->vm_ops && vma->vm_ops->close)
97592 vma->vm_ops->close(vma);
97593 if (vma->vm_file)
97594@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
97595
97596 SYSCALL_DEFINE1(brk, unsigned long, brk)
97597 {
97598+ unsigned long rlim;
97599 unsigned long retval;
97600 unsigned long newbrk, oldbrk;
97601 struct mm_struct *mm = current->mm;
97602@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97603 * segment grow beyond its set limit the in case where the limit is
97604 * not page aligned -Ram Gupta
97605 */
97606- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
97607+ rlim = rlimit(RLIMIT_DATA);
97608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97609+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97610+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97611+ rlim = 4096 * PAGE_SIZE;
97612+#endif
97613+ if (check_data_rlimit(rlim, brk, mm->start_brk,
97614 mm->end_data, mm->start_data))
97615 goto out;
97616
97617@@ -967,6 +996,12 @@ static int
97618 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97619 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97620 {
97621+
97622+#ifdef CONFIG_PAX_SEGMEXEC
97623+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97624+ return 0;
97625+#endif
97626+
97627 if (is_mergeable_vma(vma, file, vm_flags) &&
97628 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97629 if (vma->vm_pgoff == vm_pgoff)
97630@@ -986,6 +1021,12 @@ static int
97631 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97632 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97633 {
97634+
97635+#ifdef CONFIG_PAX_SEGMEXEC
97636+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97637+ return 0;
97638+#endif
97639+
97640 if (is_mergeable_vma(vma, file, vm_flags) &&
97641 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97642 pgoff_t vm_pglen;
97643@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97644 struct vm_area_struct *area, *next;
97645 int err;
97646
97647+#ifdef CONFIG_PAX_SEGMEXEC
97648+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97649+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97650+
97651+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97652+#endif
97653+
97654 /*
97655 * We later require that vma->vm_flags == vm_flags,
97656 * so this tests vma->vm_flags & VM_SPECIAL, too.
97657@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97658 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97659 next = next->vm_next;
97660
97661+#ifdef CONFIG_PAX_SEGMEXEC
97662+ if (prev)
97663+ prev_m = pax_find_mirror_vma(prev);
97664+ if (area)
97665+ area_m = pax_find_mirror_vma(area);
97666+ if (next)
97667+ next_m = pax_find_mirror_vma(next);
97668+#endif
97669+
97670 /*
97671 * Can it merge with the predecessor?
97672 */
97673@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97674 /* cases 1, 6 */
97675 err = vma_adjust(prev, prev->vm_start,
97676 next->vm_end, prev->vm_pgoff, NULL);
97677- } else /* cases 2, 5, 7 */
97678+
97679+#ifdef CONFIG_PAX_SEGMEXEC
97680+ if (!err && prev_m)
97681+ err = vma_adjust(prev_m, prev_m->vm_start,
97682+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97683+#endif
97684+
97685+ } else { /* cases 2, 5, 7 */
97686 err = vma_adjust(prev, prev->vm_start,
97687 end, prev->vm_pgoff, NULL);
97688+
97689+#ifdef CONFIG_PAX_SEGMEXEC
97690+ if (!err && prev_m)
97691+ err = vma_adjust(prev_m, prev_m->vm_start,
97692+ end_m, prev_m->vm_pgoff, NULL);
97693+#endif
97694+
97695+ }
97696 if (err)
97697 return NULL;
97698 khugepaged_enter_vma_merge(prev, vm_flags);
97699@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97700 mpol_equal(policy, vma_policy(next)) &&
97701 can_vma_merge_before(next, vm_flags,
97702 anon_vma, file, pgoff+pglen)) {
97703- if (prev && addr < prev->vm_end) /* case 4 */
97704+ if (prev && addr < prev->vm_end) { /* case 4 */
97705 err = vma_adjust(prev, prev->vm_start,
97706 addr, prev->vm_pgoff, NULL);
97707- else /* cases 3, 8 */
97708+
97709+#ifdef CONFIG_PAX_SEGMEXEC
97710+ if (!err && prev_m)
97711+ err = vma_adjust(prev_m, prev_m->vm_start,
97712+ addr_m, prev_m->vm_pgoff, NULL);
97713+#endif
97714+
97715+ } else { /* cases 3, 8 */
97716 err = vma_adjust(area, addr, next->vm_end,
97717 next->vm_pgoff - pglen, NULL);
97718+
97719+#ifdef CONFIG_PAX_SEGMEXEC
97720+ if (!err && area_m)
97721+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97722+ next_m->vm_pgoff - pglen, NULL);
97723+#endif
97724+
97725+ }
97726 if (err)
97727 return NULL;
97728 khugepaged_enter_vma_merge(area, vm_flags);
97729@@ -1199,8 +1286,10 @@ none:
97730 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97731 struct file *file, long pages)
97732 {
97733- const unsigned long stack_flags
97734- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97735+
97736+#ifdef CONFIG_PAX_RANDMMAP
97737+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97738+#endif
97739
97740 mm->total_vm += pages;
97741
97742@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97743 mm->shared_vm += pages;
97744 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97745 mm->exec_vm += pages;
97746- } else if (flags & stack_flags)
97747+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97748 mm->stack_vm += pages;
97749 }
97750 #endif /* CONFIG_PROC_FS */
97751@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97752 locked += mm->locked_vm;
97753 lock_limit = rlimit(RLIMIT_MEMLOCK);
97754 lock_limit >>= PAGE_SHIFT;
97755+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97756 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97757 return -EAGAIN;
97758 }
97759@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97760 * (the exception is when the underlying filesystem is noexec
97761 * mounted, in which case we dont add PROT_EXEC.)
97762 */
97763- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97764+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97765 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97766 prot |= PROT_EXEC;
97767
97768@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97769 /* Obtain the address to map to. we verify (or select) it and ensure
97770 * that it represents a valid section of the address space.
97771 */
97772- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97773+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97774 if (addr & ~PAGE_MASK)
97775 return addr;
97776
97777@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97778 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97779 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97780
97781+#ifdef CONFIG_PAX_MPROTECT
97782+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97783+
97784+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97785+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97786+ mm->binfmt->handle_mmap)
97787+ mm->binfmt->handle_mmap(file);
97788+#endif
97789+
97790+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97791+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97792+ gr_log_rwxmmap(file);
97793+
97794+#ifdef CONFIG_PAX_EMUPLT
97795+ vm_flags &= ~VM_EXEC;
97796+#else
97797+ return -EPERM;
97798+#endif
97799+
97800+ }
97801+
97802+ if (!(vm_flags & VM_EXEC))
97803+ vm_flags &= ~VM_MAYEXEC;
97804+#else
97805+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97806+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97807+#endif
97808+ else
97809+ vm_flags &= ~VM_MAYWRITE;
97810+ }
97811+#endif
97812+
97813+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97814+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97815+ vm_flags &= ~VM_PAGEEXEC;
97816+#endif
97817+
97818 if (flags & MAP_LOCKED)
97819 if (!can_do_mlock())
97820 return -EPERM;
97821@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97822 vm_flags |= VM_NORESERVE;
97823 }
97824
97825+ if (!gr_acl_handle_mmap(file, prot))
97826+ return -EACCES;
97827+
97828 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97829 if (!IS_ERR_VALUE(addr) &&
97830 ((vm_flags & VM_LOCKED) ||
97831@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97832 vm_flags_t vm_flags = vma->vm_flags;
97833
97834 /* If it was private or non-writable, the write bit is already clear */
97835- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97836+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97837 return 0;
97838
97839 /* The backer wishes to know when pages are first written to? */
97840@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97841 struct rb_node **rb_link, *rb_parent;
97842 unsigned long charged = 0;
97843
97844+#ifdef CONFIG_PAX_SEGMEXEC
97845+ struct vm_area_struct *vma_m = NULL;
97846+#endif
97847+
97848+ /*
97849+ * mm->mmap_sem is required to protect against another thread
97850+ * changing the mappings in case we sleep.
97851+ */
97852+ verify_mm_writelocked(mm);
97853+
97854 /* Check against address space limit. */
97855+
97856+#ifdef CONFIG_PAX_RANDMMAP
97857+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97858+#endif
97859+
97860 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97861 unsigned long nr_pages;
97862
97863@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97864
97865 /* Clear old maps */
97866 error = -ENOMEM;
97867-munmap_back:
97868 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97869 if (do_munmap(mm, addr, len))
97870 return -ENOMEM;
97871- goto munmap_back;
97872+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97873 }
97874
97875 /*
97876@@ -1586,6 +1730,16 @@ munmap_back:
97877 goto unacct_error;
97878 }
97879
97880+#ifdef CONFIG_PAX_SEGMEXEC
97881+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97882+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97883+ if (!vma_m) {
97884+ error = -ENOMEM;
97885+ goto free_vma;
97886+ }
97887+ }
97888+#endif
97889+
97890 vma->vm_mm = mm;
97891 vma->vm_start = addr;
97892 vma->vm_end = addr + len;
97893@@ -1616,6 +1770,13 @@ munmap_back:
97894 if (error)
97895 goto unmap_and_free_vma;
97896
97897+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97898+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97899+ vma->vm_flags |= VM_PAGEEXEC;
97900+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97901+ }
97902+#endif
97903+
97904 /* Can addr have changed??
97905 *
97906 * Answer: Yes, several device drivers can do it in their
97907@@ -1634,6 +1795,12 @@ munmap_back:
97908 }
97909
97910 vma_link(mm, vma, prev, rb_link, rb_parent);
97911+
97912+#ifdef CONFIG_PAX_SEGMEXEC
97913+ if (vma_m)
97914+ BUG_ON(pax_mirror_vma(vma_m, vma));
97915+#endif
97916+
97917 /* Once vma denies write, undo our temporary denial count */
97918 if (file) {
97919 if (vm_flags & VM_SHARED)
97920@@ -1646,6 +1813,7 @@ out:
97921 perf_event_mmap(vma);
97922
97923 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97924+ track_exec_limit(mm, addr, addr + len, vm_flags);
97925 if (vm_flags & VM_LOCKED) {
97926 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97927 vma == get_gate_vma(current->mm)))
97928@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
97929 if (vm_flags & VM_DENYWRITE)
97930 allow_write_access(file);
97931 free_vma:
97932+
97933+#ifdef CONFIG_PAX_SEGMEXEC
97934+ if (vma_m)
97935+ kmem_cache_free(vm_area_cachep, vma_m);
97936+#endif
97937+
97938 kmem_cache_free(vm_area_cachep, vma);
97939 unacct_error:
97940 if (charged)
97941@@ -1690,7 +1864,63 @@ unacct_error:
97942 return error;
97943 }
97944
97945-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97946+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97947+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97948+{
97949+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97950+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97951+
97952+ return 0;
97953+}
97954+#endif
97955+
97956+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97957+{
97958+ if (!vma) {
97959+#ifdef CONFIG_STACK_GROWSUP
97960+ if (addr > sysctl_heap_stack_gap)
97961+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97962+ else
97963+ vma = find_vma(current->mm, 0);
97964+ if (vma && (vma->vm_flags & VM_GROWSUP))
97965+ return false;
97966+#endif
97967+ return true;
97968+ }
97969+
97970+ if (addr + len > vma->vm_start)
97971+ return false;
97972+
97973+ if (vma->vm_flags & VM_GROWSDOWN)
97974+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97975+#ifdef CONFIG_STACK_GROWSUP
97976+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97977+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97978+#endif
97979+ else if (offset)
97980+ return offset <= vma->vm_start - addr - len;
97981+
97982+ return true;
97983+}
97984+
97985+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97986+{
97987+ if (vma->vm_start < len)
97988+ return -ENOMEM;
97989+
97990+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97991+ if (offset <= vma->vm_start - len)
97992+ return vma->vm_start - len - offset;
97993+ else
97994+ return -ENOMEM;
97995+ }
97996+
97997+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97998+ return vma->vm_start - len - sysctl_heap_stack_gap;
97999+ return -ENOMEM;
98000+}
98001+
98002+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
98003 {
98004 /*
98005 * We implement the search by looking for an rbtree node that
98006@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98007 }
98008 }
98009
98010- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
98011+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
98012 check_current:
98013 /* Check if current node has a suitable gap */
98014 if (gap_start > high_limit)
98015 return -ENOMEM;
98016+
98017+ if (gap_end - gap_start > info->threadstack_offset)
98018+ gap_start += info->threadstack_offset;
98019+ else
98020+ gap_start = gap_end;
98021+
98022+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98023+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98024+ gap_start += sysctl_heap_stack_gap;
98025+ else
98026+ gap_start = gap_end;
98027+ }
98028+ if (vma->vm_flags & VM_GROWSDOWN) {
98029+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98030+ gap_end -= sysctl_heap_stack_gap;
98031+ else
98032+ gap_end = gap_start;
98033+ }
98034 if (gap_end >= low_limit && gap_end - gap_start >= length)
98035 goto found;
98036
98037@@ -1792,7 +2040,7 @@ found:
98038 return gap_start;
98039 }
98040
98041-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
98042+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
98043 {
98044 struct mm_struct *mm = current->mm;
98045 struct vm_area_struct *vma;
98046@@ -1846,6 +2094,24 @@ check_current:
98047 gap_end = vma->vm_start;
98048 if (gap_end < low_limit)
98049 return -ENOMEM;
98050+
98051+ if (gap_end - gap_start > info->threadstack_offset)
98052+ gap_end -= info->threadstack_offset;
98053+ else
98054+ gap_end = gap_start;
98055+
98056+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98057+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98058+ gap_start += sysctl_heap_stack_gap;
98059+ else
98060+ gap_start = gap_end;
98061+ }
98062+ if (vma->vm_flags & VM_GROWSDOWN) {
98063+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98064+ gap_end -= sysctl_heap_stack_gap;
98065+ else
98066+ gap_end = gap_start;
98067+ }
98068 if (gap_start <= high_limit && gap_end - gap_start >= length)
98069 goto found;
98070
98071@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98072 struct mm_struct *mm = current->mm;
98073 struct vm_area_struct *vma;
98074 struct vm_unmapped_area_info info;
98075+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98076
98077 if (len > TASK_SIZE - mmap_min_addr)
98078 return -ENOMEM;
98079@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98080 if (flags & MAP_FIXED)
98081 return addr;
98082
98083+#ifdef CONFIG_PAX_RANDMMAP
98084+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98085+#endif
98086+
98087 if (addr) {
98088 addr = PAGE_ALIGN(addr);
98089 vma = find_vma(mm, addr);
98090 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98091- (!vma || addr + len <= vma->vm_start))
98092+ check_heap_stack_gap(vma, addr, len, offset))
98093 return addr;
98094 }
98095
98096@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98097 info.low_limit = mm->mmap_base;
98098 info.high_limit = TASK_SIZE;
98099 info.align_mask = 0;
98100+ info.threadstack_offset = offset;
98101 return vm_unmapped_area(&info);
98102 }
98103 #endif
98104@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98105 struct mm_struct *mm = current->mm;
98106 unsigned long addr = addr0;
98107 struct vm_unmapped_area_info info;
98108+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98109
98110 /* requested length too big for entire address space */
98111 if (len > TASK_SIZE - mmap_min_addr)
98112@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98113 if (flags & MAP_FIXED)
98114 return addr;
98115
98116+#ifdef CONFIG_PAX_RANDMMAP
98117+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98118+#endif
98119+
98120 /* requesting a specific address */
98121 if (addr) {
98122 addr = PAGE_ALIGN(addr);
98123 vma = find_vma(mm, addr);
98124 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98125- (!vma || addr + len <= vma->vm_start))
98126+ check_heap_stack_gap(vma, addr, len, offset))
98127 return addr;
98128 }
98129
98130@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98131 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
98132 info.high_limit = mm->mmap_base;
98133 info.align_mask = 0;
98134+ info.threadstack_offset = offset;
98135 addr = vm_unmapped_area(&info);
98136
98137 /*
98138@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98139 VM_BUG_ON(addr != -ENOMEM);
98140 info.flags = 0;
98141 info.low_limit = TASK_UNMAPPED_BASE;
98142+
98143+#ifdef CONFIG_PAX_RANDMMAP
98144+ if (mm->pax_flags & MF_PAX_RANDMMAP)
98145+ info.low_limit += mm->delta_mmap;
98146+#endif
98147+
98148 info.high_limit = TASK_SIZE;
98149 addr = vm_unmapped_area(&info);
98150 }
98151@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
98152 return vma;
98153 }
98154
98155+#ifdef CONFIG_PAX_SEGMEXEC
98156+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
98157+{
98158+ struct vm_area_struct *vma_m;
98159+
98160+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
98161+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
98162+ BUG_ON(vma->vm_mirror);
98163+ return NULL;
98164+ }
98165+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
98166+ vma_m = vma->vm_mirror;
98167+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
98168+ BUG_ON(vma->vm_file != vma_m->vm_file);
98169+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
98170+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
98171+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
98172+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
98173+ return vma_m;
98174+}
98175+#endif
98176+
98177 /*
98178 * Verify that the stack growth is acceptable and
98179 * update accounting. This is shared with both the
98180@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98181
98182 /* Stack limit test */
98183 actual_size = size;
98184- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
98185- actual_size -= PAGE_SIZE;
98186+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
98187 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
98188 return -ENOMEM;
98189
98190@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98191 locked = mm->locked_vm + grow;
98192 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
98193 limit >>= PAGE_SHIFT;
98194+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98195 if (locked > limit && !capable(CAP_IPC_LOCK))
98196 return -ENOMEM;
98197 }
98198@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98199 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
98200 * vma is the last one with address > vma->vm_end. Have to extend vma.
98201 */
98202+#ifndef CONFIG_IA64
98203+static
98204+#endif
98205 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98206 {
98207 int error;
98208+ bool locknext;
98209
98210 if (!(vma->vm_flags & VM_GROWSUP))
98211 return -EFAULT;
98212
98213+ /* Also guard against wrapping around to address 0. */
98214+ if (address < PAGE_ALIGN(address+1))
98215+ address = PAGE_ALIGN(address+1);
98216+ else
98217+ return -ENOMEM;
98218+
98219 /*
98220 * We must make sure the anon_vma is allocated
98221 * so that the anon_vma locking is not a noop.
98222 */
98223 if (unlikely(anon_vma_prepare(vma)))
98224 return -ENOMEM;
98225+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
98226+ if (locknext && anon_vma_prepare(vma->vm_next))
98227+ return -ENOMEM;
98228 vma_lock_anon_vma(vma);
98229+ if (locknext)
98230+ vma_lock_anon_vma(vma->vm_next);
98231
98232 /*
98233 * vma->vm_start/vm_end cannot change under us because the caller
98234 * is required to hold the mmap_sem in read mode. We need the
98235- * anon_vma lock to serialize against concurrent expand_stacks.
98236- * Also guard against wrapping around to address 0.
98237+ * anon_vma locks to serialize against concurrent expand_stacks
98238+ * and expand_upwards.
98239 */
98240- if (address < PAGE_ALIGN(address+4))
98241- address = PAGE_ALIGN(address+4);
98242- else {
98243- vma_unlock_anon_vma(vma);
98244- return -ENOMEM;
98245- }
98246 error = 0;
98247
98248 /* Somebody else might have raced and expanded it already */
98249- if (address > vma->vm_end) {
98250+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
98251+ error = -ENOMEM;
98252+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
98253 unsigned long size, grow;
98254
98255 size = address - vma->vm_start;
98256@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98257 }
98258 }
98259 }
98260+ if (locknext)
98261+ vma_unlock_anon_vma(vma->vm_next);
98262 vma_unlock_anon_vma(vma);
98263 khugepaged_enter_vma_merge(vma, vma->vm_flags);
98264 validate_mm(vma->vm_mm);
98265@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
98266 unsigned long address)
98267 {
98268 int error;
98269+ bool lockprev = false;
98270+ struct vm_area_struct *prev;
98271
98272 /*
98273 * We must make sure the anon_vma is allocated
98274@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
98275 if (error)
98276 return error;
98277
98278+ prev = vma->vm_prev;
98279+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
98280+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
98281+#endif
98282+ if (lockprev && anon_vma_prepare(prev))
98283+ return -ENOMEM;
98284+ if (lockprev)
98285+ vma_lock_anon_vma(prev);
98286+
98287 vma_lock_anon_vma(vma);
98288
98289 /*
98290@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
98291 */
98292
98293 /* Somebody else might have raced and expanded it already */
98294- if (address < vma->vm_start) {
98295+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
98296+ error = -ENOMEM;
98297+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
98298 unsigned long size, grow;
98299
98300+#ifdef CONFIG_PAX_SEGMEXEC
98301+ struct vm_area_struct *vma_m;
98302+
98303+ vma_m = pax_find_mirror_vma(vma);
98304+#endif
98305+
98306 size = vma->vm_end - address;
98307 grow = (vma->vm_start - address) >> PAGE_SHIFT;
98308
98309@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
98310 vma->vm_pgoff -= grow;
98311 anon_vma_interval_tree_post_update_vma(vma);
98312 vma_gap_update(vma);
98313+
98314+#ifdef CONFIG_PAX_SEGMEXEC
98315+ if (vma_m) {
98316+ anon_vma_interval_tree_pre_update_vma(vma_m);
98317+ vma_m->vm_start -= grow << PAGE_SHIFT;
98318+ vma_m->vm_pgoff -= grow;
98319+ anon_vma_interval_tree_post_update_vma(vma_m);
98320+ vma_gap_update(vma_m);
98321+ }
98322+#endif
98323+
98324 spin_unlock(&vma->vm_mm->page_table_lock);
98325
98326+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
98327 perf_event_mmap(vma);
98328 }
98329 }
98330 }
98331 vma_unlock_anon_vma(vma);
98332+ if (lockprev)
98333+ vma_unlock_anon_vma(prev);
98334 khugepaged_enter_vma_merge(vma, vma->vm_flags);
98335 validate_mm(vma->vm_mm);
98336 return error;
98337@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
98338 do {
98339 long nrpages = vma_pages(vma);
98340
98341+#ifdef CONFIG_PAX_SEGMEXEC
98342+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
98343+ vma = remove_vma(vma);
98344+ continue;
98345+ }
98346+#endif
98347+
98348 if (vma->vm_flags & VM_ACCOUNT)
98349 nr_accounted += nrpages;
98350 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
98351@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
98352 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
98353 vma->vm_prev = NULL;
98354 do {
98355+
98356+#ifdef CONFIG_PAX_SEGMEXEC
98357+ if (vma->vm_mirror) {
98358+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
98359+ vma->vm_mirror->vm_mirror = NULL;
98360+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
98361+ vma->vm_mirror = NULL;
98362+ }
98363+#endif
98364+
98365 vma_rb_erase(vma, &mm->mm_rb);
98366 mm->map_count--;
98367 tail_vma = vma;
98368@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98369 struct vm_area_struct *new;
98370 int err = -ENOMEM;
98371
98372+#ifdef CONFIG_PAX_SEGMEXEC
98373+ struct vm_area_struct *vma_m, *new_m = NULL;
98374+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
98375+#endif
98376+
98377 if (is_vm_hugetlb_page(vma) && (addr &
98378 ~(huge_page_mask(hstate_vma(vma)))))
98379 return -EINVAL;
98380
98381+#ifdef CONFIG_PAX_SEGMEXEC
98382+ vma_m = pax_find_mirror_vma(vma);
98383+#endif
98384+
98385 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98386 if (!new)
98387 goto out_err;
98388
98389+#ifdef CONFIG_PAX_SEGMEXEC
98390+ if (vma_m) {
98391+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98392+ if (!new_m) {
98393+ kmem_cache_free(vm_area_cachep, new);
98394+ goto out_err;
98395+ }
98396+ }
98397+#endif
98398+
98399 /* most fields are the same, copy all, and then fixup */
98400 *new = *vma;
98401
98402@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98403 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
98404 }
98405
98406+#ifdef CONFIG_PAX_SEGMEXEC
98407+ if (vma_m) {
98408+ *new_m = *vma_m;
98409+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
98410+ new_m->vm_mirror = new;
98411+ new->vm_mirror = new_m;
98412+
98413+ if (new_below)
98414+ new_m->vm_end = addr_m;
98415+ else {
98416+ new_m->vm_start = addr_m;
98417+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
98418+ }
98419+ }
98420+#endif
98421+
98422 err = vma_dup_policy(vma, new);
98423 if (err)
98424 goto out_free_vma;
98425@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98426 else
98427 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
98428
98429+#ifdef CONFIG_PAX_SEGMEXEC
98430+ if (!err && vma_m) {
98431+ struct mempolicy *pol = vma_policy(new);
98432+
98433+ if (anon_vma_clone(new_m, vma_m))
98434+ goto out_free_mpol;
98435+
98436+ mpol_get(pol);
98437+ set_vma_policy(new_m, pol);
98438+
98439+ if (new_m->vm_file)
98440+ get_file(new_m->vm_file);
98441+
98442+ if (new_m->vm_ops && new_m->vm_ops->open)
98443+ new_m->vm_ops->open(new_m);
98444+
98445+ if (new_below)
98446+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
98447+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
98448+ else
98449+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
98450+
98451+ if (err) {
98452+ if (new_m->vm_ops && new_m->vm_ops->close)
98453+ new_m->vm_ops->close(new_m);
98454+ if (new_m->vm_file)
98455+ fput(new_m->vm_file);
98456+ mpol_put(pol);
98457+ }
98458+ }
98459+#endif
98460+
98461 /* Success. */
98462 if (!err)
98463 return 0;
98464@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98465 new->vm_ops->close(new);
98466 if (new->vm_file)
98467 fput(new->vm_file);
98468- unlink_anon_vmas(new);
98469 out_free_mpol:
98470 mpol_put(vma_policy(new));
98471 out_free_vma:
98472+
98473+#ifdef CONFIG_PAX_SEGMEXEC
98474+ if (new_m) {
98475+ unlink_anon_vmas(new_m);
98476+ kmem_cache_free(vm_area_cachep, new_m);
98477+ }
98478+#endif
98479+
98480+ unlink_anon_vmas(new);
98481 kmem_cache_free(vm_area_cachep, new);
98482 out_err:
98483 return err;
98484@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98485 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98486 unsigned long addr, int new_below)
98487 {
98488+
98489+#ifdef CONFIG_PAX_SEGMEXEC
98490+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98491+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
98492+ if (mm->map_count >= sysctl_max_map_count-1)
98493+ return -ENOMEM;
98494+ } else
98495+#endif
98496+
98497 if (mm->map_count >= sysctl_max_map_count)
98498 return -ENOMEM;
98499
98500@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98501 * work. This now handles partial unmappings.
98502 * Jeremy Fitzhardinge <jeremy@goop.org>
98503 */
98504+#ifdef CONFIG_PAX_SEGMEXEC
98505 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98506 {
98507+ int ret = __do_munmap(mm, start, len);
98508+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
98509+ return ret;
98510+
98511+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
98512+}
98513+
98514+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98515+#else
98516+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98517+#endif
98518+{
98519 unsigned long end;
98520 struct vm_area_struct *vma, *prev, *last;
98521
98522+ /*
98523+ * mm->mmap_sem is required to protect against another thread
98524+ * changing the mappings in case we sleep.
98525+ */
98526+ verify_mm_writelocked(mm);
98527+
98528 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
98529 return -EINVAL;
98530
98531@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98532 /* Fix up all other VM information */
98533 remove_vma_list(mm, vma);
98534
98535+ track_exec_limit(mm, start, end, 0UL);
98536+
98537 return 0;
98538 }
98539
98540@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
98541 int ret;
98542 struct mm_struct *mm = current->mm;
98543
98544+
98545+#ifdef CONFIG_PAX_SEGMEXEC
98546+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98547+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
98548+ return -EINVAL;
98549+#endif
98550+
98551 down_write(&mm->mmap_sem);
98552 ret = do_munmap(mm, start, len);
98553 up_write(&mm->mmap_sem);
98554@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
98555 down_write(&mm->mmap_sem);
98556 vma = find_vma(mm, start);
98557
98558+#ifdef CONFIG_PAX_SEGMEXEC
98559+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
98560+ goto out;
98561+#endif
98562+
98563 if (!vma || !(vma->vm_flags & VM_SHARED))
98564 goto out;
98565
98566@@ -2692,16 +3178,6 @@ out:
98567 return ret;
98568 }
98569
98570-static inline void verify_mm_writelocked(struct mm_struct *mm)
98571-{
98572-#ifdef CONFIG_DEBUG_VM
98573- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98574- WARN_ON(1);
98575- up_read(&mm->mmap_sem);
98576- }
98577-#endif
98578-}
98579-
98580 /*
98581 * this is really a simplified "do_mmap". it only handles
98582 * anonymous maps. eventually we may be able to do some
98583@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98584 struct rb_node **rb_link, *rb_parent;
98585 pgoff_t pgoff = addr >> PAGE_SHIFT;
98586 int error;
98587+ unsigned long charged;
98588
98589 len = PAGE_ALIGN(len);
98590 if (!len)
98591@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98592
98593 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98594
98595+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98596+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98597+ flags &= ~VM_EXEC;
98598+
98599+#ifdef CONFIG_PAX_MPROTECT
98600+ if (mm->pax_flags & MF_PAX_MPROTECT)
98601+ flags &= ~VM_MAYEXEC;
98602+#endif
98603+
98604+ }
98605+#endif
98606+
98607 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98608 if (error & ~PAGE_MASK)
98609 return error;
98610
98611+ charged = len >> PAGE_SHIFT;
98612+
98613 error = mlock_future_check(mm, mm->def_flags, len);
98614 if (error)
98615 return error;
98616@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98617 /*
98618 * Clear old maps. this also does some error checking for us
98619 */
98620- munmap_back:
98621 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98622 if (do_munmap(mm, addr, len))
98623 return -ENOMEM;
98624- goto munmap_back;
98625+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98626 }
98627
98628 /* Check against address space limits *after* clearing old maps... */
98629- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98630+ if (!may_expand_vm(mm, charged))
98631 return -ENOMEM;
98632
98633 if (mm->map_count > sysctl_max_map_count)
98634 return -ENOMEM;
98635
98636- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98637+ if (security_vm_enough_memory_mm(mm, charged))
98638 return -ENOMEM;
98639
98640 /* Can we just expand an old private anonymous mapping? */
98641@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98642 */
98643 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98644 if (!vma) {
98645- vm_unacct_memory(len >> PAGE_SHIFT);
98646+ vm_unacct_memory(charged);
98647 return -ENOMEM;
98648 }
98649
98650@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98651 vma_link(mm, vma, prev, rb_link, rb_parent);
98652 out:
98653 perf_event_mmap(vma);
98654- mm->total_vm += len >> PAGE_SHIFT;
98655+ mm->total_vm += charged;
98656 if (flags & VM_LOCKED)
98657- mm->locked_vm += (len >> PAGE_SHIFT);
98658+ mm->locked_vm += charged;
98659 vma->vm_flags |= VM_SOFTDIRTY;
98660+ track_exec_limit(mm, addr, addr + len, flags);
98661 return addr;
98662 }
98663
98664@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
98665 while (vma) {
98666 if (vma->vm_flags & VM_ACCOUNT)
98667 nr_accounted += vma_pages(vma);
98668+ vma->vm_mirror = NULL;
98669 vma = remove_vma(vma);
98670 }
98671 vm_unacct_memory(nr_accounted);
98672@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98673 struct vm_area_struct *prev;
98674 struct rb_node **rb_link, *rb_parent;
98675
98676+#ifdef CONFIG_PAX_SEGMEXEC
98677+ struct vm_area_struct *vma_m = NULL;
98678+#endif
98679+
98680+ if (security_mmap_addr(vma->vm_start))
98681+ return -EPERM;
98682+
98683 /*
98684 * The vm_pgoff of a purely anonymous vma should be irrelevant
98685 * until its first write fault, when page's anon_vma and index
98686@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98687 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98688 return -ENOMEM;
98689
98690+#ifdef CONFIG_PAX_SEGMEXEC
98691+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98692+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98693+ if (!vma_m)
98694+ return -ENOMEM;
98695+ }
98696+#endif
98697+
98698 vma_link(mm, vma, prev, rb_link, rb_parent);
98699+
98700+#ifdef CONFIG_PAX_SEGMEXEC
98701+ if (vma_m)
98702+ BUG_ON(pax_mirror_vma(vma_m, vma));
98703+#endif
98704+
98705 return 0;
98706 }
98707
98708@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98709 struct rb_node **rb_link, *rb_parent;
98710 bool faulted_in_anon_vma = true;
98711
98712+ BUG_ON(vma->vm_mirror);
98713+
98714 /*
98715 * If anonymous vma has not yet been faulted, update new pgoff
98716 * to match new location, to increase its chance of merging.
98717@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98718 return NULL;
98719 }
98720
98721+#ifdef CONFIG_PAX_SEGMEXEC
98722+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98723+{
98724+ struct vm_area_struct *prev_m;
98725+ struct rb_node **rb_link_m, *rb_parent_m;
98726+ struct mempolicy *pol_m;
98727+
98728+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98729+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98730+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98731+ *vma_m = *vma;
98732+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98733+ if (anon_vma_clone(vma_m, vma))
98734+ return -ENOMEM;
98735+ pol_m = vma_policy(vma_m);
98736+ mpol_get(pol_m);
98737+ set_vma_policy(vma_m, pol_m);
98738+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98739+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98740+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98741+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98742+ if (vma_m->vm_file)
98743+ get_file(vma_m->vm_file);
98744+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98745+ vma_m->vm_ops->open(vma_m);
98746+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98747+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98748+ vma_m->vm_mirror = vma;
98749+ vma->vm_mirror = vma_m;
98750+ return 0;
98751+}
98752+#endif
98753+
98754 /*
98755 * Return true if the calling process may expand its vm space by the passed
98756 * number of pages
98757@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98758
98759 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98760
98761+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98762 if (cur + npages > lim)
98763 return 0;
98764 return 1;
98765@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
98766 vma->vm_start = addr;
98767 vma->vm_end = addr + len;
98768
98769+#ifdef CONFIG_PAX_MPROTECT
98770+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98771+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98772+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98773+ return ERR_PTR(-EPERM);
98774+ if (!(vm_flags & VM_EXEC))
98775+ vm_flags &= ~VM_MAYEXEC;
98776+#else
98777+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98778+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98779+#endif
98780+ else
98781+ vm_flags &= ~VM_MAYWRITE;
98782+ }
98783+#endif
98784+
98785 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98786 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98787
98788diff --git a/mm/mprotect.c b/mm/mprotect.c
98789index 8858483..8145fa5 100644
98790--- a/mm/mprotect.c
98791+++ b/mm/mprotect.c
98792@@ -24,10 +24,18 @@
98793 #include <linux/migrate.h>
98794 #include <linux/perf_event.h>
98795 #include <linux/ksm.h>
98796+#include <linux/sched/sysctl.h>
98797+
98798+#ifdef CONFIG_PAX_MPROTECT
98799+#include <linux/elf.h>
98800+#include <linux/binfmts.h>
98801+#endif
98802+
98803 #include <asm/uaccess.h>
98804 #include <asm/pgtable.h>
98805 #include <asm/cacheflush.h>
98806 #include <asm/tlbflush.h>
98807+#include <asm/mmu_context.h>
98808
98809 /*
98810 * For a prot_numa update we only hold mmap_sem for read so there is a
98811@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98812 return pages;
98813 }
98814
98815+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98816+/* called while holding the mmap semaphor for writing except stack expansion */
98817+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98818+{
98819+ unsigned long oldlimit, newlimit = 0UL;
98820+
98821+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98822+ return;
98823+
98824+ spin_lock(&mm->page_table_lock);
98825+ oldlimit = mm->context.user_cs_limit;
98826+ if ((prot & VM_EXEC) && oldlimit < end)
98827+ /* USER_CS limit moved up */
98828+ newlimit = end;
98829+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98830+ /* USER_CS limit moved down */
98831+ newlimit = start;
98832+
98833+ if (newlimit) {
98834+ mm->context.user_cs_limit = newlimit;
98835+
98836+#ifdef CONFIG_SMP
98837+ wmb();
98838+ cpus_clear(mm->context.cpu_user_cs_mask);
98839+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98840+#endif
98841+
98842+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98843+ }
98844+ spin_unlock(&mm->page_table_lock);
98845+ if (newlimit == end) {
98846+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98847+
98848+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98849+ if (is_vm_hugetlb_page(vma))
98850+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98851+ else
98852+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98853+ }
98854+}
98855+#endif
98856+
98857 int
98858 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98859 unsigned long start, unsigned long end, unsigned long newflags)
98860@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98861 int error;
98862 int dirty_accountable = 0;
98863
98864+#ifdef CONFIG_PAX_SEGMEXEC
98865+ struct vm_area_struct *vma_m = NULL;
98866+ unsigned long start_m, end_m;
98867+
98868+ start_m = start + SEGMEXEC_TASK_SIZE;
98869+ end_m = end + SEGMEXEC_TASK_SIZE;
98870+#endif
98871+
98872 if (newflags == oldflags) {
98873 *pprev = vma;
98874 return 0;
98875 }
98876
98877+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98878+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98879+
98880+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98881+ return -ENOMEM;
98882+
98883+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98884+ return -ENOMEM;
98885+ }
98886+
98887 /*
98888 * If we make a private mapping writable we increase our commit;
98889 * but (without finer accounting) cannot reduce our commit if we
98890@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98891 }
98892 }
98893
98894+#ifdef CONFIG_PAX_SEGMEXEC
98895+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98896+ if (start != vma->vm_start) {
98897+ error = split_vma(mm, vma, start, 1);
98898+ if (error)
98899+ goto fail;
98900+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98901+ *pprev = (*pprev)->vm_next;
98902+ }
98903+
98904+ if (end != vma->vm_end) {
98905+ error = split_vma(mm, vma, end, 0);
98906+ if (error)
98907+ goto fail;
98908+ }
98909+
98910+ if (pax_find_mirror_vma(vma)) {
98911+ error = __do_munmap(mm, start_m, end_m - start_m);
98912+ if (error)
98913+ goto fail;
98914+ } else {
98915+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98916+ if (!vma_m) {
98917+ error = -ENOMEM;
98918+ goto fail;
98919+ }
98920+ vma->vm_flags = newflags;
98921+ error = pax_mirror_vma(vma_m, vma);
98922+ if (error) {
98923+ vma->vm_flags = oldflags;
98924+ goto fail;
98925+ }
98926+ }
98927+ }
98928+#endif
98929+
98930 /*
98931 * First try to merge with previous and/or next vma.
98932 */
98933@@ -315,7 +419,19 @@ success:
98934 * vm_flags and vm_page_prot are protected by the mmap_sem
98935 * held in write mode.
98936 */
98937+
98938+#ifdef CONFIG_PAX_SEGMEXEC
98939+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98940+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98941+#endif
98942+
98943 vma->vm_flags = newflags;
98944+
98945+#ifdef CONFIG_PAX_MPROTECT
98946+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98947+ mm->binfmt->handle_mprotect(vma, newflags);
98948+#endif
98949+
98950 dirty_accountable = vma_wants_writenotify(vma);
98951 vma_set_page_prot(vma);
98952
98953@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98954 end = start + len;
98955 if (end <= start)
98956 return -ENOMEM;
98957+
98958+#ifdef CONFIG_PAX_SEGMEXEC
98959+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98960+ if (end > SEGMEXEC_TASK_SIZE)
98961+ return -EINVAL;
98962+ } else
98963+#endif
98964+
98965+ if (end > TASK_SIZE)
98966+ return -EINVAL;
98967+
98968 if (!arch_validate_prot(prot))
98969 return -EINVAL;
98970
98971@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98972 /*
98973 * Does the application expect PROT_READ to imply PROT_EXEC:
98974 */
98975- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98976+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98977 prot |= PROT_EXEC;
98978
98979 vm_flags = calc_vm_prot_bits(prot);
98980@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98981 if (start > vma->vm_start)
98982 prev = vma;
98983
98984+#ifdef CONFIG_PAX_MPROTECT
98985+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98986+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98987+#endif
98988+
98989 for (nstart = start ; ; ) {
98990 unsigned long newflags;
98991
98992@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98993
98994 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98995 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98996+ if (prot & (PROT_WRITE | PROT_EXEC))
98997+ gr_log_rwxmprotect(vma);
98998+
98999+ error = -EACCES;
99000+ goto out;
99001+ }
99002+
99003+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
99004 error = -EACCES;
99005 goto out;
99006 }
99007@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99008 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
99009 if (error)
99010 goto out;
99011+
99012+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
99013+
99014 nstart = tmp;
99015
99016 if (nstart < prev->vm_end)
99017diff --git a/mm/mremap.c b/mm/mremap.c
99018index 2dc44b1..caa1819 100644
99019--- a/mm/mremap.c
99020+++ b/mm/mremap.c
99021@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
99022 continue;
99023 pte = ptep_get_and_clear(mm, old_addr, old_pte);
99024 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
99025+
99026+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99027+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
99028+ pte = pte_exprotect(pte);
99029+#endif
99030+
99031 pte = move_soft_dirty_pte(pte);
99032 set_pte_at(mm, new_addr, new_pte, pte);
99033 }
99034@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
99035 if (is_vm_hugetlb_page(vma))
99036 goto Einval;
99037
99038+#ifdef CONFIG_PAX_SEGMEXEC
99039+ if (pax_find_mirror_vma(vma))
99040+ goto Einval;
99041+#endif
99042+
99043 /* We can't remap across vm area boundaries */
99044 if (old_len > vma->vm_end - addr)
99045 goto Efault;
99046@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
99047 unsigned long ret = -EINVAL;
99048 unsigned long charged = 0;
99049 unsigned long map_flags;
99050+ unsigned long pax_task_size = TASK_SIZE;
99051
99052 if (new_addr & ~PAGE_MASK)
99053 goto out;
99054
99055- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
99056+#ifdef CONFIG_PAX_SEGMEXEC
99057+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99058+ pax_task_size = SEGMEXEC_TASK_SIZE;
99059+#endif
99060+
99061+ pax_task_size -= PAGE_SIZE;
99062+
99063+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
99064 goto out;
99065
99066 /* Check if the location we're moving into overlaps the
99067 * old location at all, and fail if it does.
99068 */
99069- if ((new_addr <= addr) && (new_addr+new_len) > addr)
99070- goto out;
99071-
99072- if ((addr <= new_addr) && (addr+old_len) > new_addr)
99073+ if (addr + old_len > new_addr && new_addr + new_len > addr)
99074 goto out;
99075
99076 ret = do_munmap(mm, new_addr, new_len);
99077@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99078 unsigned long ret = -EINVAL;
99079 unsigned long charged = 0;
99080 bool locked = false;
99081+ unsigned long pax_task_size = TASK_SIZE;
99082
99083 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
99084 return ret;
99085@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99086 if (!new_len)
99087 return ret;
99088
99089+#ifdef CONFIG_PAX_SEGMEXEC
99090+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99091+ pax_task_size = SEGMEXEC_TASK_SIZE;
99092+#endif
99093+
99094+ pax_task_size -= PAGE_SIZE;
99095+
99096+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
99097+ old_len > pax_task_size || addr > pax_task_size-old_len)
99098+ return ret;
99099+
99100 down_write(&current->mm->mmap_sem);
99101
99102 if (flags & MREMAP_FIXED) {
99103@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99104 new_addr = addr;
99105 }
99106 ret = addr;
99107+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
99108 goto out;
99109 }
99110 }
99111@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99112 goto out;
99113 }
99114
99115+ map_flags = vma->vm_flags;
99116 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
99117+ if (!(ret & ~PAGE_MASK)) {
99118+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
99119+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
99120+ }
99121 }
99122 out:
99123 if (ret & ~PAGE_MASK)
99124diff --git a/mm/nommu.c b/mm/nommu.c
99125index 3fba2dc..fdad748 100644
99126--- a/mm/nommu.c
99127+++ b/mm/nommu.c
99128@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
99129 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
99130 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
99131 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
99132-int heap_stack_gap = 0;
99133
99134 atomic_long_t mmap_pages_allocated;
99135
99136@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
99137 EXPORT_SYMBOL(find_vma);
99138
99139 /*
99140- * find a VMA
99141- * - we don't extend stack VMAs under NOMMU conditions
99142- */
99143-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
99144-{
99145- return find_vma(mm, addr);
99146-}
99147-
99148-/*
99149 * expand a stack to a given address
99150 * - not supported under NOMMU conditions
99151 */
99152@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99153
99154 /* most fields are the same, copy all, and then fixup */
99155 *new = *vma;
99156+ INIT_LIST_HEAD(&new->anon_vma_chain);
99157 *region = *vma->vm_region;
99158 new->vm_region = region;
99159
99160@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
99161 }
99162 EXPORT_SYMBOL(filemap_map_pages);
99163
99164-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99165- unsigned long addr, void *buf, int len, int write)
99166+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99167+ unsigned long addr, void *buf, size_t len, int write)
99168 {
99169 struct vm_area_struct *vma;
99170
99171@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99172 *
99173 * The caller must hold a reference on @mm.
99174 */
99175-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99176- void *buf, int len, int write)
99177+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
99178+ void *buf, size_t len, int write)
99179 {
99180 return __access_remote_vm(NULL, mm, addr, buf, len, write);
99181 }
99182@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99183 * Access another process' address space.
99184 * - source/target buffer must be kernel space
99185 */
99186-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
99187+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
99188 {
99189 struct mm_struct *mm;
99190
99191diff --git a/mm/page-writeback.c b/mm/page-writeback.c
99192index 644bcb6..444a2c4 100644
99193--- a/mm/page-writeback.c
99194+++ b/mm/page-writeback.c
99195@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
99196 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
99197 * - the bdi dirty thresh drops quickly due to change of JBOD workload
99198 */
99199-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
99200+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
99201 unsigned long thresh,
99202 unsigned long bg_thresh,
99203 unsigned long dirty,
99204diff --git a/mm/page_alloc.c b/mm/page_alloc.c
99205index 40e2942..0eb29a2 100644
99206--- a/mm/page_alloc.c
99207+++ b/mm/page_alloc.c
99208@@ -61,6 +61,7 @@
99209 #include <linux/hugetlb.h>
99210 #include <linux/sched/rt.h>
99211 #include <linux/page_owner.h>
99212+#include <linux/random.h>
99213
99214 #include <asm/sections.h>
99215 #include <asm/tlbflush.h>
99216@@ -357,7 +358,7 @@ out:
99217 * This usage means that zero-order pages may not be compound.
99218 */
99219
99220-static void free_compound_page(struct page *page)
99221+void free_compound_page(struct page *page)
99222 {
99223 __free_pages_ok(page, compound_order(page));
99224 }
99225@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
99226 __mod_zone_freepage_state(zone, (1 << order), migratetype);
99227 }
99228 #else
99229-struct page_ext_operations debug_guardpage_ops = { NULL, };
99230+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
99231 static inline void set_page_guard(struct zone *zone, struct page *page,
99232 unsigned int order, int migratetype) {}
99233 static inline void clear_page_guard(struct zone *zone, struct page *page,
99234@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99235 bool compound = PageCompound(page);
99236 int i, bad = 0;
99237
99238+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99239+ unsigned long index = 1UL << order;
99240+#endif
99241+
99242 VM_BUG_ON_PAGE(PageTail(page), page);
99243 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
99244
99245@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99246 debug_check_no_obj_freed(page_address(page),
99247 PAGE_SIZE << order);
99248 }
99249+
99250+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99251+ for (; index; --index)
99252+ sanitize_highpage(page + index - 1);
99253+#endif
99254+
99255 arch_free_page(page, order);
99256 kernel_map_pages(page, 1 << order, 0);
99257
99258@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
99259 local_irq_restore(flags);
99260 }
99261
99262+#ifdef CONFIG_PAX_LATENT_ENTROPY
99263+bool __meminitdata extra_latent_entropy;
99264+
99265+static int __init setup_pax_extra_latent_entropy(char *str)
99266+{
99267+ extra_latent_entropy = true;
99268+ return 0;
99269+}
99270+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
99271+
99272+volatile u64 latent_entropy __latent_entropy;
99273+EXPORT_SYMBOL(latent_entropy);
99274+#endif
99275+
99276 void __init __free_pages_bootmem(struct page *page, unsigned int order)
99277 {
99278 unsigned int nr_pages = 1 << order;
99279@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
99280 __ClearPageReserved(p);
99281 set_page_count(p, 0);
99282
99283+#ifdef CONFIG_PAX_LATENT_ENTROPY
99284+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
99285+ u64 hash = 0;
99286+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
99287+ const u64 *data = lowmem_page_address(page);
99288+
99289+ for (index = 0; index < end; index++)
99290+ hash ^= hash + data[index];
99291+ latent_entropy ^= hash;
99292+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
99293+ }
99294+#endif
99295+
99296 page_zone(page)->managed_pages += nr_pages;
99297 set_page_refcounted(page);
99298 __free_pages(page, order);
99299@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
99300 kernel_map_pages(page, 1 << order, 1);
99301 kasan_alloc_pages(page, order);
99302
99303+#ifndef CONFIG_PAX_MEMORY_SANITIZE
99304 if (gfp_flags & __GFP_ZERO)
99305 prep_zero_page(page, order, gfp_flags);
99306+#endif
99307
99308 if (order && (gfp_flags & __GFP_COMP))
99309 prep_compound_page(page, order);
99310@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
99311 }
99312
99313 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
99314- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99315+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99316 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
99317 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
99318
99319@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
99320 do {
99321 mod_zone_page_state(zone, NR_ALLOC_BATCH,
99322 high_wmark_pages(zone) - low_wmark_pages(zone) -
99323- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99324+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99325 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
99326 } while (zone++ != preferred_zone);
99327 }
99328@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
99329
99330 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
99331 high_wmark_pages(zone) - low_wmark_pages(zone) -
99332- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99333+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99334
99335 setup_zone_migrate_reserve(zone);
99336 spin_unlock_irqrestore(&zone->lock, flags);
99337diff --git a/mm/percpu.c b/mm/percpu.c
99338index 73c97a5..508ee25 100644
99339--- a/mm/percpu.c
99340+++ b/mm/percpu.c
99341@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
99342 static unsigned int pcpu_high_unit_cpu __read_mostly;
99343
99344 /* the address of the first chunk which starts with the kernel static area */
99345-void *pcpu_base_addr __read_mostly;
99346+void *pcpu_base_addr __read_only;
99347 EXPORT_SYMBOL_GPL(pcpu_base_addr);
99348
99349 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
99350diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
99351index b159769..d07037f 100644
99352--- a/mm/process_vm_access.c
99353+++ b/mm/process_vm_access.c
99354@@ -13,6 +13,7 @@
99355 #include <linux/uio.h>
99356 #include <linux/sched.h>
99357 #include <linux/highmem.h>
99358+#include <linux/security.h>
99359 #include <linux/ptrace.h>
99360 #include <linux/slab.h>
99361 #include <linux/syscalls.h>
99362@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99363 ssize_t iov_len;
99364 size_t total_len = iov_iter_count(iter);
99365
99366+ return -ENOSYS; // PaX: until properly audited
99367+
99368 /*
99369 * Work out how many pages of struct pages we're going to need
99370 * when eventually calling get_user_pages
99371 */
99372 for (i = 0; i < riovcnt; i++) {
99373 iov_len = rvec[i].iov_len;
99374- if (iov_len > 0) {
99375- nr_pages_iov = ((unsigned long)rvec[i].iov_base
99376- + iov_len)
99377- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
99378- / PAGE_SIZE + 1;
99379- nr_pages = max(nr_pages, nr_pages_iov);
99380- }
99381+ if (iov_len <= 0)
99382+ continue;
99383+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
99384+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
99385+ nr_pages = max(nr_pages, nr_pages_iov);
99386 }
99387
99388 if (nr_pages == 0)
99389@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99390 goto free_proc_pages;
99391 }
99392
99393+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
99394+ rc = -EPERM;
99395+ goto put_task_struct;
99396+ }
99397+
99398 mm = mm_access(task, PTRACE_MODE_ATTACH);
99399 if (!mm || IS_ERR(mm)) {
99400 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
99401diff --git a/mm/rmap.c b/mm/rmap.c
99402index c161a14..8a069bb 100644
99403--- a/mm/rmap.c
99404+++ b/mm/rmap.c
99405@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99406 struct anon_vma *anon_vma = vma->anon_vma;
99407 struct anon_vma_chain *avc;
99408
99409+#ifdef CONFIG_PAX_SEGMEXEC
99410+ struct anon_vma_chain *avc_m = NULL;
99411+#endif
99412+
99413 might_sleep();
99414 if (unlikely(!anon_vma)) {
99415 struct mm_struct *mm = vma->vm_mm;
99416@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99417 if (!avc)
99418 goto out_enomem;
99419
99420+#ifdef CONFIG_PAX_SEGMEXEC
99421+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
99422+ if (!avc_m)
99423+ goto out_enomem_free_avc;
99424+#endif
99425+
99426 anon_vma = find_mergeable_anon_vma(vma);
99427 allocated = NULL;
99428 if (!anon_vma) {
99429@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99430 /* page_table_lock to protect against threads */
99431 spin_lock(&mm->page_table_lock);
99432 if (likely(!vma->anon_vma)) {
99433+
99434+#ifdef CONFIG_PAX_SEGMEXEC
99435+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
99436+
99437+ if (vma_m) {
99438+ BUG_ON(vma_m->anon_vma);
99439+ vma_m->anon_vma = anon_vma;
99440+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
99441+ anon_vma->degree++;
99442+ avc_m = NULL;
99443+ }
99444+#endif
99445+
99446 vma->anon_vma = anon_vma;
99447 anon_vma_chain_link(vma, avc, anon_vma);
99448 /* vma reference or self-parent link for new root */
99449@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99450
99451 if (unlikely(allocated))
99452 put_anon_vma(allocated);
99453+
99454+#ifdef CONFIG_PAX_SEGMEXEC
99455+ if (unlikely(avc_m))
99456+ anon_vma_chain_free(avc_m);
99457+#endif
99458+
99459 if (unlikely(avc))
99460 anon_vma_chain_free(avc);
99461 }
99462 return 0;
99463
99464 out_enomem_free_avc:
99465+
99466+#ifdef CONFIG_PAX_SEGMEXEC
99467+ if (avc_m)
99468+ anon_vma_chain_free(avc_m);
99469+#endif
99470+
99471 anon_vma_chain_free(avc);
99472 out_enomem:
99473 return -ENOMEM;
99474@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
99475 * good chance of avoiding scanning the whole hierarchy when it searches where
99476 * page is mapped.
99477 */
99478-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99479+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
99480 {
99481 struct anon_vma_chain *avc, *pavc;
99482 struct anon_vma *root = NULL;
99483@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99484 * the corresponding VMA in the parent process is attached to.
99485 * Returns 0 on success, non-zero on failure.
99486 */
99487-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
99488+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
99489 {
99490 struct anon_vma_chain *avc;
99491 struct anon_vma *anon_vma;
99492@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
99493 void __init anon_vma_init(void)
99494 {
99495 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
99496- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
99497- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
99498+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
99499+ anon_vma_ctor);
99500+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
99501+ SLAB_PANIC|SLAB_NO_SANITIZE);
99502 }
99503
99504 /*
99505diff --git a/mm/shmem.c b/mm/shmem.c
99506index cf2d0ca..ec06b8b 100644
99507--- a/mm/shmem.c
99508+++ b/mm/shmem.c
99509@@ -33,7 +33,7 @@
99510 #include <linux/swap.h>
99511 #include <linux/aio.h>
99512
99513-static struct vfsmount *shm_mnt;
99514+struct vfsmount *shm_mnt;
99515
99516 #ifdef CONFIG_SHMEM
99517 /*
99518@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
99519 #define BOGO_DIRENT_SIZE 20
99520
99521 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99522-#define SHORT_SYMLINK_LEN 128
99523+#define SHORT_SYMLINK_LEN 64
99524
99525 /*
99526 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99527@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
99528 static int shmem_xattr_validate(const char *name)
99529 {
99530 struct { const char *prefix; size_t len; } arr[] = {
99531+
99532+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99533+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
99534+#endif
99535+
99536 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
99537 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
99538 };
99539@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
99540 if (err)
99541 return err;
99542
99543+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99544+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
99545+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
99546+ return -EOPNOTSUPP;
99547+ if (size > 8)
99548+ return -EINVAL;
99549+ }
99550+#endif
99551+
99552 return simple_xattr_set(&info->xattrs, name, value, size, flags);
99553 }
99554
99555@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
99556 int err = -ENOMEM;
99557
99558 /* Round up to L1_CACHE_BYTES to resist false sharing */
99559- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
99560- L1_CACHE_BYTES), GFP_KERNEL);
99561+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
99562 if (!sbinfo)
99563 return -ENOMEM;
99564
99565diff --git a/mm/slab.c b/mm/slab.c
99566index c4b89ea..20990be 100644
99567--- a/mm/slab.c
99568+++ b/mm/slab.c
99569@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99570 if ((x)->max_freeable < i) \
99571 (x)->max_freeable = i; \
99572 } while (0)
99573-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
99574-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
99575-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
99576-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
99577+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
99578+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
99579+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99580+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99581+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99582+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99583 #else
99584 #define STATS_INC_ACTIVE(x) do { } while (0)
99585 #define STATS_DEC_ACTIVE(x) do { } while (0)
99586@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99587 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99588 #define STATS_INC_FREEHIT(x) do { } while (0)
99589 #define STATS_INC_FREEMISS(x) do { } while (0)
99590+#define STATS_INC_SANITIZED(x) do { } while (0)
99591+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99592 #endif
99593
99594 #if DEBUG
99595@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99596 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99597 */
99598 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99599- const struct page *page, void *obj)
99600+ const struct page *page, const void *obj)
99601 {
99602 u32 offset = (obj - page->s_mem);
99603 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99604@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
99605 * structures first. Without this, further allocations will bug.
99606 */
99607 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
99608- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99609+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99610 slab_state = PARTIAL_NODE;
99611
99612 slab_early_init = 0;
99613@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99614
99615 cachep = find_mergeable(size, align, flags, name, ctor);
99616 if (cachep) {
99617- cachep->refcount++;
99618+ atomic_inc(&cachep->refcount);
99619
99620 /*
99621 * Adjust the object sizes so that we clear
99622@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99623 struct array_cache *ac = cpu_cache_get(cachep);
99624
99625 check_irq_off();
99626+
99627+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99628+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99629+ STATS_INC_NOT_SANITIZED(cachep);
99630+ else {
99631+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99632+
99633+ if (cachep->ctor)
99634+ cachep->ctor(objp);
99635+
99636+ STATS_INC_SANITIZED(cachep);
99637+ }
99638+#endif
99639+
99640 kmemleak_free_recursive(objp, cachep->flags);
99641 objp = cache_free_debugcheck(cachep, objp, caller);
99642
99643@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
99644 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
99645 }
99646
99647-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99648+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99649 {
99650 return __do_kmalloc_node(size, flags, node, _RET_IP_);
99651 }
99652@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
99653 * @flags: the type of memory to allocate (see kmalloc).
99654 * @caller: function caller for debug tracking of the caller
99655 */
99656-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
99657+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
99658 unsigned long caller)
99659 {
99660 struct kmem_cache *cachep;
99661@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
99662
99663 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99664 return;
99665+ VM_BUG_ON(!virt_addr_valid(objp));
99666 local_irq_save(flags);
99667 kfree_debugcheck(objp);
99668 c = virt_to_cache(objp);
99669@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99670 }
99671 /* cpu stats */
99672 {
99673- unsigned long allochit = atomic_read(&cachep->allochit);
99674- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99675- unsigned long freehit = atomic_read(&cachep->freehit);
99676- unsigned long freemiss = atomic_read(&cachep->freemiss);
99677+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99678+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99679+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99680+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99681
99682 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99683 allochit, allocmiss, freehit, freemiss);
99684 }
99685+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99686+ {
99687+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99688+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99689+
99690+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99691+ }
99692+#endif
99693 #endif
99694 }
99695
99696@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
99697 static int __init slab_proc_init(void)
99698 {
99699 #ifdef CONFIG_DEBUG_SLAB_LEAK
99700- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99701+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99702 #endif
99703 return 0;
99704 }
99705 module_init(slab_proc_init);
99706 #endif
99707
99708+bool is_usercopy_object(const void *ptr)
99709+{
99710+ struct page *page;
99711+ struct kmem_cache *cachep;
99712+
99713+ if (ZERO_OR_NULL_PTR(ptr))
99714+ return false;
99715+
99716+ if (!slab_is_available())
99717+ return false;
99718+
99719+ if (!virt_addr_valid(ptr))
99720+ return false;
99721+
99722+ page = virt_to_head_page(ptr);
99723+
99724+ if (!PageSlab(page))
99725+ return false;
99726+
99727+ cachep = page->slab_cache;
99728+ return cachep->flags & SLAB_USERCOPY;
99729+}
99730+
99731+#ifdef CONFIG_PAX_USERCOPY
99732+const char *check_heap_object(const void *ptr, unsigned long n)
99733+{
99734+ struct page *page;
99735+ struct kmem_cache *cachep;
99736+ unsigned int objnr;
99737+ unsigned long offset;
99738+
99739+ if (ZERO_OR_NULL_PTR(ptr))
99740+ return "<null>";
99741+
99742+ if (!virt_addr_valid(ptr))
99743+ return NULL;
99744+
99745+ page = virt_to_head_page(ptr);
99746+
99747+ if (!PageSlab(page))
99748+ return NULL;
99749+
99750+ cachep = page->slab_cache;
99751+ if (!(cachep->flags & SLAB_USERCOPY))
99752+ return cachep->name;
99753+
99754+ objnr = obj_to_index(cachep, page, ptr);
99755+ BUG_ON(objnr >= cachep->num);
99756+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99757+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99758+ return NULL;
99759+
99760+ return cachep->name;
99761+}
99762+#endif
99763+
99764 /**
99765 * ksize - get the actual amount of memory allocated for a given object
99766 * @objp: Pointer to the object
99767diff --git a/mm/slab.h b/mm/slab.h
99768index 4c3ac12..7b2e470 100644
99769--- a/mm/slab.h
99770+++ b/mm/slab.h
99771@@ -22,7 +22,7 @@ struct kmem_cache {
99772 unsigned int align; /* Alignment as calculated */
99773 unsigned long flags; /* Active flags on the slab */
99774 const char *name; /* Slab name for sysfs */
99775- int refcount; /* Use counter */
99776+ atomic_t refcount; /* Use counter */
99777 void (*ctor)(void *); /* Called on object slot creation */
99778 struct list_head list; /* List of all slab caches on the system */
99779 };
99780@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99781 /* The slab cache that manages slab cache information */
99782 extern struct kmem_cache *kmem_cache;
99783
99784+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99785+#ifdef CONFIG_X86_64
99786+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99787+#else
99788+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99789+#endif
99790+enum pax_sanitize_mode {
99791+ PAX_SANITIZE_SLAB_OFF = 0,
99792+ PAX_SANITIZE_SLAB_FAST,
99793+ PAX_SANITIZE_SLAB_FULL,
99794+};
99795+extern enum pax_sanitize_mode pax_sanitize_slab;
99796+#endif
99797+
99798 unsigned long calculate_alignment(unsigned long flags,
99799 unsigned long align, unsigned long size);
99800
99801@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99802
99803 /* Legal flag mask for kmem_cache_create(), for various configurations */
99804 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99805- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99806+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99807+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99808
99809 #if defined(CONFIG_DEBUG_SLAB)
99810 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99811@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99812 return s;
99813
99814 page = virt_to_head_page(x);
99815+
99816+ BUG_ON(!PageSlab(page));
99817+
99818 cachep = page->slab_cache;
99819 if (slab_equal_or_root(cachep, s))
99820 return cachep;
99821diff --git a/mm/slab_common.c b/mm/slab_common.c
99822index 999bb34..9843aea 100644
99823--- a/mm/slab_common.c
99824+++ b/mm/slab_common.c
99825@@ -25,11 +25,35 @@
99826
99827 #include "slab.h"
99828
99829-enum slab_state slab_state;
99830+enum slab_state slab_state __read_only;
99831 LIST_HEAD(slab_caches);
99832 DEFINE_MUTEX(slab_mutex);
99833 struct kmem_cache *kmem_cache;
99834
99835+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99836+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99837+static int __init pax_sanitize_slab_setup(char *str)
99838+{
99839+ if (!str)
99840+ return 0;
99841+
99842+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99843+ pr_info("PaX slab sanitization: %s\n", "disabled");
99844+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99845+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99846+ pr_info("PaX slab sanitization: %s\n", "fast");
99847+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99848+ } else if (!strcmp(str, "full")) {
99849+ pr_info("PaX slab sanitization: %s\n", "full");
99850+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99851+ } else
99852+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99853+
99854+ return 0;
99855+}
99856+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99857+#endif
99858+
99859 /*
99860 * Set of flags that will prevent slab merging
99861 */
99862@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99863 * Merge control. If this is set then no merging of slab caches will occur.
99864 * (Could be removed. This was introduced to pacify the merge skeptics.)
99865 */
99866-static int slab_nomerge;
99867+static int slab_nomerge = 1;
99868
99869 static int __init setup_slab_nomerge(char *str)
99870 {
99871@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
99872 /*
99873 * We may have set a slab to be unmergeable during bootstrap.
99874 */
99875- if (s->refcount < 0)
99876+ if (atomic_read(&s->refcount) < 0)
99877 return 1;
99878
99879 return 0;
99880@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
99881 if (err)
99882 goto out_free_cache;
99883
99884- s->refcount = 1;
99885+ atomic_set(&s->refcount, 1);
99886 list_add(&s->list, &slab_caches);
99887 out:
99888 if (err)
99889@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99890 */
99891 flags &= CACHE_CREATE_MASK;
99892
99893+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99894+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99895+ flags |= SLAB_NO_SANITIZE;
99896+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99897+ flags &= ~SLAB_NO_SANITIZE;
99898+#endif
99899+
99900 s = __kmem_cache_alias(name, size, align, flags, ctor);
99901 if (s)
99902 goto out_unlock;
99903@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
99904 rcu_barrier();
99905
99906 list_for_each_entry_safe(s, s2, release, list) {
99907-#ifdef SLAB_SUPPORTS_SYSFS
99908+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99909 sysfs_slab_remove(s);
99910 #else
99911 slab_kmem_cache_release(s);
99912@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99913
99914 mutex_lock(&slab_mutex);
99915
99916- s->refcount--;
99917- if (s->refcount)
99918+ if (!atomic_dec_and_test(&s->refcount))
99919 goto out_unlock;
99920
99921 for_each_memcg_cache_safe(c, c2, s) {
99922@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99923 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99924 name, size, err);
99925
99926- s->refcount = -1; /* Exempt from merging for now */
99927+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99928 }
99929
99930 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99931@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99932
99933 create_boot_cache(s, name, size, flags);
99934 list_add(&s->list, &slab_caches);
99935- s->refcount = 1;
99936+ atomic_set(&s->refcount, 1);
99937 return s;
99938 }
99939
99940@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99941 EXPORT_SYMBOL(kmalloc_dma_caches);
99942 #endif
99943
99944+#ifdef CONFIG_PAX_USERCOPY_SLABS
99945+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99946+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99947+#endif
99948+
99949 /*
99950 * Conversion table for small slabs sizes / 8 to the index in the
99951 * kmalloc array. This is necessary for slabs < 192 since we have non power
99952@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99953 return kmalloc_dma_caches[index];
99954
99955 #endif
99956+
99957+#ifdef CONFIG_PAX_USERCOPY_SLABS
99958+ if (unlikely((flags & GFP_USERCOPY)))
99959+ return kmalloc_usercopy_caches[index];
99960+
99961+#endif
99962+
99963 return kmalloc_caches[index];
99964 }
99965
99966@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99967 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99968 if (!kmalloc_caches[i]) {
99969 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99970- 1 << i, flags);
99971+ 1 << i, SLAB_USERCOPY | flags);
99972 }
99973
99974 /*
99975@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99976 * earlier power of two caches
99977 */
99978 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99979- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99980+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99981
99982 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99983- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99984+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99985 }
99986
99987 /* Kmalloc array is now usable */
99988@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99989 }
99990 }
99991 #endif
99992+
99993+#ifdef CONFIG_PAX_USERCOPY_SLABS
99994+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99995+ struct kmem_cache *s = kmalloc_caches[i];
99996+
99997+ if (s) {
99998+ int size = kmalloc_size(i);
99999+ char *n = kasprintf(GFP_NOWAIT,
100000+ "usercopy-kmalloc-%d", size);
100001+
100002+ BUG_ON(!n);
100003+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
100004+ size, SLAB_USERCOPY | flags);
100005+ }
100006+ }
100007+#endif
100008+
100009 }
100010 #endif /* !CONFIG_SLOB */
100011
100012@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
100013 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
100014 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
100015 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
100016+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100017+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
100018+#endif
100019 #endif
100020 seq_putc(m, '\n');
100021 }
100022@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
100023 module_init(slab_proc_init);
100024 #endif /* CONFIG_SLABINFO */
100025
100026-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
100027+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
100028 gfp_t flags)
100029 {
100030 void *ret;
100031diff --git a/mm/slob.c b/mm/slob.c
100032index 94a7fed..cf3fb1a 100644
100033--- a/mm/slob.c
100034+++ b/mm/slob.c
100035@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
100036 /*
100037 * Return the size of a slob block.
100038 */
100039-static slobidx_t slob_units(slob_t *s)
100040+static slobidx_t slob_units(const slob_t *s)
100041 {
100042 if (s->units > 0)
100043 return s->units;
100044@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
100045 /*
100046 * Return the next free slob block pointer after this one.
100047 */
100048-static slob_t *slob_next(slob_t *s)
100049+static slob_t *slob_next(const slob_t *s)
100050 {
100051 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
100052 slobidx_t next;
100053@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
100054 /*
100055 * Returns true if s is the last free block in its page.
100056 */
100057-static int slob_last(slob_t *s)
100058+static int slob_last(const slob_t *s)
100059 {
100060 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
100061 }
100062
100063-static void *slob_new_pages(gfp_t gfp, int order, int node)
100064+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
100065 {
100066- void *page;
100067+ struct page *page;
100068
100069 #ifdef CONFIG_NUMA
100070 if (node != NUMA_NO_NODE)
100071@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
100072 if (!page)
100073 return NULL;
100074
100075- return page_address(page);
100076+ __SetPageSlab(page);
100077+ return page;
100078 }
100079
100080-static void slob_free_pages(void *b, int order)
100081+static void slob_free_pages(struct page *sp, int order)
100082 {
100083 if (current->reclaim_state)
100084 current->reclaim_state->reclaimed_slab += 1 << order;
100085- free_pages((unsigned long)b, order);
100086+ __ClearPageSlab(sp);
100087+ page_mapcount_reset(sp);
100088+ sp->private = 0;
100089+ __free_pages(sp, order);
100090 }
100091
100092 /*
100093@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100094
100095 /* Not enough space: must allocate a new page */
100096 if (!b) {
100097- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100098- if (!b)
100099+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100100+ if (!sp)
100101 return NULL;
100102- sp = virt_to_page(b);
100103- __SetPageSlab(sp);
100104+ b = page_address(sp);
100105
100106 spin_lock_irqsave(&slob_lock, flags);
100107 sp->units = SLOB_UNITS(PAGE_SIZE);
100108 sp->freelist = b;
100109+ sp->private = 0;
100110 INIT_LIST_HEAD(&sp->lru);
100111 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
100112 set_slob_page_free(sp, slob_list);
100113@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100114 /*
100115 * slob_free: entry point into the slob allocator.
100116 */
100117-static void slob_free(void *block, int size)
100118+static void slob_free(struct kmem_cache *c, void *block, int size)
100119 {
100120 struct page *sp;
100121 slob_t *prev, *next, *b = (slob_t *)block;
100122@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
100123 if (slob_page_free(sp))
100124 clear_slob_page_free(sp);
100125 spin_unlock_irqrestore(&slob_lock, flags);
100126- __ClearPageSlab(sp);
100127- page_mapcount_reset(sp);
100128- slob_free_pages(b, 0);
100129+ slob_free_pages(sp, 0);
100130 return;
100131 }
100132
100133+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100134+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
100135+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
100136+#endif
100137+
100138 if (!slob_page_free(sp)) {
100139 /* This slob page is about to become partially free. Easy! */
100140 sp->units = units;
100141@@ -424,11 +431,10 @@ out:
100142 */
100143
100144 static __always_inline void *
100145-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100146+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
100147 {
100148- unsigned int *m;
100149- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100150- void *ret;
100151+ slob_t *m;
100152+ void *ret = NULL;
100153
100154 gfp &= gfp_allowed_mask;
100155
100156@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100157
100158 if (!m)
100159 return NULL;
100160- *m = size;
100161+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
100162+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
100163+ m[0].units = size;
100164+ m[1].units = align;
100165 ret = (void *)m + align;
100166
100167 trace_kmalloc_node(caller, ret,
100168 size, size + align, gfp, node);
100169 } else {
100170 unsigned int order = get_order(size);
100171+ struct page *page;
100172
100173 if (likely(order))
100174 gfp |= __GFP_COMP;
100175- ret = slob_new_pages(gfp, order, node);
100176+ page = slob_new_pages(gfp, order, node);
100177+ if (page) {
100178+ ret = page_address(page);
100179+ page->private = size;
100180+ }
100181
100182 trace_kmalloc_node(caller, ret,
100183 size, PAGE_SIZE << order, gfp, node);
100184 }
100185
100186- kmemleak_alloc(ret, size, 1, gfp);
100187 return ret;
100188 }
100189
100190-void *__kmalloc(size_t size, gfp_t gfp)
100191+static __always_inline void *
100192+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100193+{
100194+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100195+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
100196+
100197+ if (!ZERO_OR_NULL_PTR(ret))
100198+ kmemleak_alloc(ret, size, 1, gfp);
100199+ return ret;
100200+}
100201+
100202+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
100203 {
100204 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
100205 }
100206@@ -491,34 +515,112 @@ void kfree(const void *block)
100207 return;
100208 kmemleak_free(block);
100209
100210+ VM_BUG_ON(!virt_addr_valid(block));
100211 sp = virt_to_page(block);
100212- if (PageSlab(sp)) {
100213+ VM_BUG_ON(!PageSlab(sp));
100214+ if (!sp->private) {
100215 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100216- unsigned int *m = (unsigned int *)(block - align);
100217- slob_free(m, *m + align);
100218- } else
100219+ slob_t *m = (slob_t *)(block - align);
100220+ slob_free(NULL, m, m[0].units + align);
100221+ } else {
100222+ __ClearPageSlab(sp);
100223+ page_mapcount_reset(sp);
100224+ sp->private = 0;
100225 __free_pages(sp, compound_order(sp));
100226+ }
100227 }
100228 EXPORT_SYMBOL(kfree);
100229
100230+bool is_usercopy_object(const void *ptr)
100231+{
100232+ if (!slab_is_available())
100233+ return false;
100234+
100235+ // PAX: TODO
100236+
100237+ return false;
100238+}
100239+
100240+#ifdef CONFIG_PAX_USERCOPY
100241+const char *check_heap_object(const void *ptr, unsigned long n)
100242+{
100243+ struct page *page;
100244+ const slob_t *free;
100245+ const void *base;
100246+ unsigned long flags;
100247+
100248+ if (ZERO_OR_NULL_PTR(ptr))
100249+ return "<null>";
100250+
100251+ if (!virt_addr_valid(ptr))
100252+ return NULL;
100253+
100254+ page = virt_to_head_page(ptr);
100255+ if (!PageSlab(page))
100256+ return NULL;
100257+
100258+ if (page->private) {
100259+ base = page;
100260+ if (base <= ptr && n <= page->private - (ptr - base))
100261+ return NULL;
100262+ return "<slob>";
100263+ }
100264+
100265+ /* some tricky double walking to find the chunk */
100266+ spin_lock_irqsave(&slob_lock, flags);
100267+ base = (void *)((unsigned long)ptr & PAGE_MASK);
100268+ free = page->freelist;
100269+
100270+ while (!slob_last(free) && (void *)free <= ptr) {
100271+ base = free + slob_units(free);
100272+ free = slob_next(free);
100273+ }
100274+
100275+ while (base < (void *)free) {
100276+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
100277+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
100278+ int offset;
100279+
100280+ if (ptr < base + align)
100281+ break;
100282+
100283+ offset = ptr - base - align;
100284+ if (offset >= m) {
100285+ base += size;
100286+ continue;
100287+ }
100288+
100289+ if (n > m - offset)
100290+ break;
100291+
100292+ spin_unlock_irqrestore(&slob_lock, flags);
100293+ return NULL;
100294+ }
100295+
100296+ spin_unlock_irqrestore(&slob_lock, flags);
100297+ return "<slob>";
100298+}
100299+#endif
100300+
100301 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
100302 size_t ksize(const void *block)
100303 {
100304 struct page *sp;
100305 int align;
100306- unsigned int *m;
100307+ slob_t *m;
100308
100309 BUG_ON(!block);
100310 if (unlikely(block == ZERO_SIZE_PTR))
100311 return 0;
100312
100313 sp = virt_to_page(block);
100314- if (unlikely(!PageSlab(sp)))
100315- return PAGE_SIZE << compound_order(sp);
100316+ VM_BUG_ON(!PageSlab(sp));
100317+ if (sp->private)
100318+ return sp->private;
100319
100320 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100321- m = (unsigned int *)(block - align);
100322- return SLOB_UNITS(*m) * SLOB_UNIT;
100323+ m = (slob_t *)(block - align);
100324+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
100325 }
100326 EXPORT_SYMBOL(ksize);
100327
100328@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
100329
100330 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
100331 {
100332- void *b;
100333+ void *b = NULL;
100334
100335 flags &= gfp_allowed_mask;
100336
100337 lockdep_trace_alloc(flags);
100338
100339+#ifdef CONFIG_PAX_USERCOPY_SLABS
100340+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
100341+#else
100342 if (c->size < PAGE_SIZE) {
100343 b = slob_alloc(c->size, flags, c->align, node);
100344 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100345 SLOB_UNITS(c->size) * SLOB_UNIT,
100346 flags, node);
100347 } else {
100348- b = slob_new_pages(flags, get_order(c->size), node);
100349+ struct page *sp;
100350+
100351+ sp = slob_new_pages(flags, get_order(c->size), node);
100352+ if (sp) {
100353+ b = page_address(sp);
100354+ sp->private = c->size;
100355+ }
100356 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100357 PAGE_SIZE << get_order(c->size),
100358 flags, node);
100359 }
100360+#endif
100361
100362 if (b && c->ctor)
100363 c->ctor(b);
100364@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
100365 EXPORT_SYMBOL(kmem_cache_alloc);
100366
100367 #ifdef CONFIG_NUMA
100368-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
100369+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
100370 {
100371 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
100372 }
100373@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
100374 EXPORT_SYMBOL(kmem_cache_alloc_node);
100375 #endif
100376
100377-static void __kmem_cache_free(void *b, int size)
100378+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
100379 {
100380- if (size < PAGE_SIZE)
100381- slob_free(b, size);
100382+ struct page *sp;
100383+
100384+ sp = virt_to_page(b);
100385+ BUG_ON(!PageSlab(sp));
100386+ if (!sp->private)
100387+ slob_free(c, b, size);
100388 else
100389- slob_free_pages(b, get_order(size));
100390+ slob_free_pages(sp, get_order(size));
100391 }
100392
100393 static void kmem_rcu_free(struct rcu_head *head)
100394@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
100395 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
100396 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
100397
100398- __kmem_cache_free(b, slob_rcu->size);
100399+ __kmem_cache_free(NULL, b, slob_rcu->size);
100400 }
100401
100402 void kmem_cache_free(struct kmem_cache *c, void *b)
100403 {
100404+ int size = c->size;
100405+
100406+#ifdef CONFIG_PAX_USERCOPY_SLABS
100407+ if (size + c->align < PAGE_SIZE) {
100408+ size += c->align;
100409+ b -= c->align;
100410+ }
100411+#endif
100412+
100413 kmemleak_free_recursive(b, c->flags);
100414 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
100415 struct slob_rcu *slob_rcu;
100416- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
100417- slob_rcu->size = c->size;
100418+ slob_rcu = b + (size - sizeof(struct slob_rcu));
100419+ slob_rcu->size = size;
100420 call_rcu(&slob_rcu->head, kmem_rcu_free);
100421 } else {
100422- __kmem_cache_free(b, c->size);
100423+ __kmem_cache_free(c, b, size);
100424 }
100425
100426+#ifdef CONFIG_PAX_USERCOPY_SLABS
100427+ trace_kfree(_RET_IP_, b);
100428+#else
100429 trace_kmem_cache_free(_RET_IP_, b);
100430+#endif
100431+
100432 }
100433 EXPORT_SYMBOL(kmem_cache_free);
100434
100435diff --git a/mm/slub.c b/mm/slub.c
100436index 82c4737..55c316a 100644
100437--- a/mm/slub.c
100438+++ b/mm/slub.c
100439@@ -198,7 +198,7 @@ struct track {
100440
100441 enum track_item { TRACK_ALLOC, TRACK_FREE };
100442
100443-#ifdef CONFIG_SYSFS
100444+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100445 static int sysfs_slab_add(struct kmem_cache *);
100446 static int sysfs_slab_alias(struct kmem_cache *, const char *);
100447 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
100448@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
100449 if (!t->addr)
100450 return;
100451
100452- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
100453+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
100454 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
100455 #ifdef CONFIG_STACKTRACE
100456 {
100457@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
100458
100459 slab_free_hook(s, x);
100460
100461+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100462+ if (!(s->flags & SLAB_NO_SANITIZE)) {
100463+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
100464+ if (s->ctor)
100465+ s->ctor(x);
100466+ }
100467+#endif
100468+
100469 redo:
100470 /*
100471 * Determine the currently cpus per cpu slab.
100472@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
100473 s->inuse = size;
100474
100475 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
100476+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100477+ (!(flags & SLAB_NO_SANITIZE)) ||
100478+#endif
100479 s->ctor)) {
100480 /*
100481 * Relocate free pointer after the object if it is not
100482@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
100483
100484 __setup("slub_min_objects=", setup_slub_min_objects);
100485
100486-void *__kmalloc(size_t size, gfp_t flags)
100487+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
100488 {
100489 struct kmem_cache *s;
100490 void *ret;
100491@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
100492 return ptr;
100493 }
100494
100495-void *__kmalloc_node(size_t size, gfp_t flags, int node)
100496+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
100497 {
100498 struct kmem_cache *s;
100499 void *ret;
100500@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
100501 return slab_ksize(page->slab_cache);
100502 }
100503
100504+bool is_usercopy_object(const void *ptr)
100505+{
100506+ struct page *page;
100507+ struct kmem_cache *s;
100508+
100509+ if (ZERO_OR_NULL_PTR(ptr))
100510+ return false;
100511+
100512+ if (!slab_is_available())
100513+ return false;
100514+
100515+ if (!virt_addr_valid(ptr))
100516+ return false;
100517+
100518+ page = virt_to_head_page(ptr);
100519+
100520+ if (!PageSlab(page))
100521+ return false;
100522+
100523+ s = page->slab_cache;
100524+ return s->flags & SLAB_USERCOPY;
100525+}
100526+
100527+#ifdef CONFIG_PAX_USERCOPY
100528+const char *check_heap_object(const void *ptr, unsigned long n)
100529+{
100530+ struct page *page;
100531+ struct kmem_cache *s;
100532+ unsigned long offset;
100533+
100534+ if (ZERO_OR_NULL_PTR(ptr))
100535+ return "<null>";
100536+
100537+ if (!virt_addr_valid(ptr))
100538+ return NULL;
100539+
100540+ page = virt_to_head_page(ptr);
100541+
100542+ if (!PageSlab(page))
100543+ return NULL;
100544+
100545+ s = page->slab_cache;
100546+ if (!(s->flags & SLAB_USERCOPY))
100547+ return s->name;
100548+
100549+ offset = (ptr - page_address(page)) % s->size;
100550+ if (offset <= s->object_size && n <= s->object_size - offset)
100551+ return NULL;
100552+
100553+ return s->name;
100554+}
100555+#endif
100556+
100557 size_t ksize(const void *object)
100558 {
100559 size_t size = __ksize(object);
100560@@ -3410,6 +3474,7 @@ void kfree(const void *x)
100561 if (unlikely(ZERO_OR_NULL_PTR(x)))
100562 return;
100563
100564+ VM_BUG_ON(!virt_addr_valid(x));
100565 page = virt_to_head_page(x);
100566 if (unlikely(!PageSlab(page))) {
100567 BUG_ON(!PageCompound(page));
100568@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100569
100570 s = find_mergeable(size, align, flags, name, ctor);
100571 if (s) {
100572- s->refcount++;
100573+ atomic_inc(&s->refcount);
100574
100575 /*
100576 * Adjust the object sizes so that we clear
100577@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100578 }
100579
100580 if (sysfs_slab_alias(s, name)) {
100581- s->refcount--;
100582+ atomic_dec(&s->refcount);
100583 s = NULL;
100584 }
100585 }
100586@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100587 }
100588 #endif
100589
100590-#ifdef CONFIG_SYSFS
100591+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100592 static int count_inuse(struct page *page)
100593 {
100594 return page->inuse;
100595@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100596 len += sprintf(buf + len, "%7ld ", l->count);
100597
100598 if (l->addr)
100599+#ifdef CONFIG_GRKERNSEC_HIDESYM
100600+ len += sprintf(buf + len, "%pS", NULL);
100601+#else
100602 len += sprintf(buf + len, "%pS", (void *)l->addr);
100603+#endif
100604 else
100605 len += sprintf(buf + len, "<not-available>");
100606
100607@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
100608 validate_slab_cache(kmalloc_caches[9]);
100609 }
100610 #else
100611-#ifdef CONFIG_SYSFS
100612+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100613 static void resiliency_test(void) {};
100614 #endif
100615 #endif
100616
100617-#ifdef CONFIG_SYSFS
100618+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100619 enum slab_stat_type {
100620 SL_ALL, /* All slabs */
100621 SL_PARTIAL, /* Only partially allocated slabs */
100622@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100623 {
100624 if (!s->ctor)
100625 return 0;
100626+#ifdef CONFIG_GRKERNSEC_HIDESYM
100627+ return sprintf(buf, "%pS\n", NULL);
100628+#else
100629 return sprintf(buf, "%pS\n", s->ctor);
100630+#endif
100631 }
100632 SLAB_ATTR_RO(ctor);
100633
100634 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100635 {
100636- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100637+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100638 }
100639 SLAB_ATTR_RO(aliases);
100640
100641@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100642 SLAB_ATTR_RO(cache_dma);
100643 #endif
100644
100645+#ifdef CONFIG_PAX_USERCOPY_SLABS
100646+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100647+{
100648+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100649+}
100650+SLAB_ATTR_RO(usercopy);
100651+#endif
100652+
100653+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100654+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100655+{
100656+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100657+}
100658+SLAB_ATTR_RO(sanitize);
100659+#endif
100660+
100661 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100662 {
100663 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100664@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
100665 * as well as cause other issues like converting a mergeable
100666 * cache into an umergeable one.
100667 */
100668- if (s->refcount > 1)
100669+ if (atomic_read(&s->refcount) > 1)
100670 return -EINVAL;
100671
100672 s->flags &= ~SLAB_TRACE;
100673@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
100674 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
100675 size_t length)
100676 {
100677- if (s->refcount > 1)
100678+ if (atomic_read(&s->refcount) > 1)
100679 return -EINVAL;
100680
100681 s->flags &= ~SLAB_FAILSLAB;
100682@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
100683 #ifdef CONFIG_ZONE_DMA
100684 &cache_dma_attr.attr,
100685 #endif
100686+#ifdef CONFIG_PAX_USERCOPY_SLABS
100687+ &usercopy_attr.attr,
100688+#endif
100689+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100690+ &sanitize_attr.attr,
100691+#endif
100692 #ifdef CONFIG_NUMA
100693 &remote_node_defrag_ratio_attr.attr,
100694 #endif
100695@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
100696 return name;
100697 }
100698
100699+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100700 static int sysfs_slab_add(struct kmem_cache *s)
100701 {
100702 int err;
100703@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100704 kobject_del(&s->kobj);
100705 kobject_put(&s->kobj);
100706 }
100707+#endif
100708
100709 /*
100710 * Need to buffer aliases during bootup until sysfs becomes
100711@@ -5243,6 +5340,7 @@ struct saved_alias {
100712
100713 static struct saved_alias *alias_list;
100714
100715+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100716 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100717 {
100718 struct saved_alias *al;
100719@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100720 alias_list = al;
100721 return 0;
100722 }
100723+#endif
100724
100725 static int __init slab_sysfs_init(void)
100726 {
100727diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100728index 4cba9c2..b4f9fcc 100644
100729--- a/mm/sparse-vmemmap.c
100730+++ b/mm/sparse-vmemmap.c
100731@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100732 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100733 if (!p)
100734 return NULL;
100735- pud_populate(&init_mm, pud, p);
100736+ pud_populate_kernel(&init_mm, pud, p);
100737 }
100738 return pud;
100739 }
100740@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100741 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100742 if (!p)
100743 return NULL;
100744- pgd_populate(&init_mm, pgd, p);
100745+ pgd_populate_kernel(&init_mm, pgd, p);
100746 }
100747 return pgd;
100748 }
100749diff --git a/mm/sparse.c b/mm/sparse.c
100750index d1b48b6..6e8590e 100644
100751--- a/mm/sparse.c
100752+++ b/mm/sparse.c
100753@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100754
100755 for (i = 0; i < PAGES_PER_SECTION; i++) {
100756 if (PageHWPoison(&memmap[i])) {
100757- atomic_long_sub(1, &num_poisoned_pages);
100758+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100759 ClearPageHWPoison(&memmap[i]);
100760 }
100761 }
100762diff --git a/mm/swap.c b/mm/swap.c
100763index cd3a5e6..40c0c8f 100644
100764--- a/mm/swap.c
100765+++ b/mm/swap.c
100766@@ -31,6 +31,7 @@
100767 #include <linux/memcontrol.h>
100768 #include <linux/gfp.h>
100769 #include <linux/uio.h>
100770+#include <linux/hugetlb.h>
100771
100772 #include "internal.h"
100773
100774@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100775
100776 __page_cache_release(page);
100777 dtor = get_compound_page_dtor(page);
100778+ if (!PageHuge(page))
100779+ BUG_ON(dtor != free_compound_page);
100780 (*dtor)(page);
100781 }
100782
100783diff --git a/mm/swapfile.c b/mm/swapfile.c
100784index 63f55cc..31874e6 100644
100785--- a/mm/swapfile.c
100786+++ b/mm/swapfile.c
100787@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100788
100789 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100790 /* Activity counter to indicate that a swapon or swapoff has occurred */
100791-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100792+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100793
100794 static inline unsigned char swap_count(unsigned char ent)
100795 {
100796@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100797 spin_unlock(&swap_lock);
100798
100799 err = 0;
100800- atomic_inc(&proc_poll_event);
100801+ atomic_inc_unchecked(&proc_poll_event);
100802 wake_up_interruptible(&proc_poll_wait);
100803
100804 out_dput:
100805@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100806
100807 poll_wait(file, &proc_poll_wait, wait);
100808
100809- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100810- seq->poll_event = atomic_read(&proc_poll_event);
100811+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100812+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100813 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100814 }
100815
100816@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100817 return ret;
100818
100819 seq = file->private_data;
100820- seq->poll_event = atomic_read(&proc_poll_event);
100821+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100822 return 0;
100823 }
100824
100825@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100826 (frontswap_map) ? "FS" : "");
100827
100828 mutex_unlock(&swapon_mutex);
100829- atomic_inc(&proc_poll_event);
100830+ atomic_inc_unchecked(&proc_poll_event);
100831 wake_up_interruptible(&proc_poll_wait);
100832
100833 if (S_ISREG(inode->i_mode))
100834diff --git a/mm/util.c b/mm/util.c
100835index 3981ae9..28b585b 100644
100836--- a/mm/util.c
100837+++ b/mm/util.c
100838@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100839 void arch_pick_mmap_layout(struct mm_struct *mm)
100840 {
100841 mm->mmap_base = TASK_UNMAPPED_BASE;
100842+
100843+#ifdef CONFIG_PAX_RANDMMAP
100844+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100845+ mm->mmap_base += mm->delta_mmap;
100846+#endif
100847+
100848 mm->get_unmapped_area = arch_get_unmapped_area;
100849 }
100850 #endif
100851@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100852 if (!mm->arg_end)
100853 goto out_mm; /* Shh! No looking before we're done */
100854
100855+ if (gr_acl_handle_procpidmem(task))
100856+ goto out_mm;
100857+
100858 len = mm->arg_end - mm->arg_start;
100859
100860 if (len > buflen)
100861diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100862index 49abccf..7bd1931 100644
100863--- a/mm/vmalloc.c
100864+++ b/mm/vmalloc.c
100865@@ -39,20 +39,65 @@ struct vfree_deferred {
100866 struct work_struct wq;
100867 };
100868 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100869+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100870+
100871+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100872+struct stack_deferred_llist {
100873+ struct llist_head list;
100874+ void *stack;
100875+ void *lowmem_stack;
100876+};
100877+
100878+struct stack_deferred {
100879+ struct stack_deferred_llist list;
100880+ struct work_struct wq;
100881+};
100882+
100883+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100884+#endif
100885
100886 static void __vunmap(const void *, int);
100887
100888-static void free_work(struct work_struct *w)
100889+static void vfree_work(struct work_struct *w)
100890 {
100891 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100892 struct llist_node *llnode = llist_del_all(&p->list);
100893 while (llnode) {
100894- void *p = llnode;
100895+ void *x = llnode;
100896 llnode = llist_next(llnode);
100897- __vunmap(p, 1);
100898+ __vunmap(x, 1);
100899 }
100900 }
100901
100902+static void vunmap_work(struct work_struct *w)
100903+{
100904+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100905+ struct llist_node *llnode = llist_del_all(&p->list);
100906+ while (llnode) {
100907+ void *x = llnode;
100908+ llnode = llist_next(llnode);
100909+ __vunmap(x, 0);
100910+ }
100911+}
100912+
100913+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100914+static void unmap_work(struct work_struct *w)
100915+{
100916+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100917+ struct llist_node *llnode = llist_del_all(&p->list.list);
100918+ while (llnode) {
100919+ struct stack_deferred_llist *x =
100920+ llist_entry((struct llist_head *)llnode,
100921+ struct stack_deferred_llist, list);
100922+ void *stack = ACCESS_ONCE(x->stack);
100923+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100924+ llnode = llist_next(llnode);
100925+ __vunmap(stack, 0);
100926+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100927+ }
100928+}
100929+#endif
100930+
100931 /*** Page table manipulation functions ***/
100932
100933 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100934@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100935
100936 pte = pte_offset_kernel(pmd, addr);
100937 do {
100938- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100939- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100940+
100941+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100942+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100943+ BUG_ON(!pte_exec(*pte));
100944+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100945+ continue;
100946+ }
100947+#endif
100948+
100949+ {
100950+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100951+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100952+ }
100953 } while (pte++, addr += PAGE_SIZE, addr != end);
100954 }
100955
100956@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100957 pte = pte_alloc_kernel(pmd, addr);
100958 if (!pte)
100959 return -ENOMEM;
100960+
100961+ pax_open_kernel();
100962 do {
100963 struct page *page = pages[*nr];
100964
100965- if (WARN_ON(!pte_none(*pte)))
100966+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100967+ if (pgprot_val(prot) & _PAGE_NX)
100968+#endif
100969+
100970+ if (!pte_none(*pte)) {
100971+ pax_close_kernel();
100972+ WARN_ON(1);
100973 return -EBUSY;
100974- if (WARN_ON(!page))
100975+ }
100976+ if (!page) {
100977+ pax_close_kernel();
100978+ WARN_ON(1);
100979 return -ENOMEM;
100980+ }
100981 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100982 (*nr)++;
100983 } while (pte++, addr += PAGE_SIZE, addr != end);
100984+ pax_close_kernel();
100985 return 0;
100986 }
100987
100988@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100989 pmd_t *pmd;
100990 unsigned long next;
100991
100992- pmd = pmd_alloc(&init_mm, pud, addr);
100993+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100994 if (!pmd)
100995 return -ENOMEM;
100996 do {
100997@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100998 pud_t *pud;
100999 unsigned long next;
101000
101001- pud = pud_alloc(&init_mm, pgd, addr);
101002+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
101003 if (!pud)
101004 return -ENOMEM;
101005 do {
101006@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
101007 if (addr >= MODULES_VADDR && addr < MODULES_END)
101008 return 1;
101009 #endif
101010+
101011+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101012+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
101013+ return 1;
101014+#endif
101015+
101016 return is_vmalloc_addr(x);
101017 }
101018
101019@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
101020
101021 if (!pgd_none(*pgd)) {
101022 pud_t *pud = pud_offset(pgd, addr);
101023+#ifdef CONFIG_X86
101024+ if (!pud_large(*pud))
101025+#endif
101026 if (!pud_none(*pud)) {
101027 pmd_t *pmd = pmd_offset(pud, addr);
101028+#ifdef CONFIG_X86
101029+ if (!pmd_large(*pmd))
101030+#endif
101031 if (!pmd_none(*pmd)) {
101032 pte_t *ptep, pte;
101033
101034@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
101035 * Allocate a region of KVA of the specified size and alignment, within the
101036 * vstart and vend.
101037 */
101038-static struct vmap_area *alloc_vmap_area(unsigned long size,
101039+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
101040 unsigned long align,
101041 unsigned long vstart, unsigned long vend,
101042 int node, gfp_t gfp_mask)
101043@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
101044 for_each_possible_cpu(i) {
101045 struct vmap_block_queue *vbq;
101046 struct vfree_deferred *p;
101047+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101048+ struct stack_deferred *p2;
101049+#endif
101050
101051 vbq = &per_cpu(vmap_block_queue, i);
101052 spin_lock_init(&vbq->lock);
101053 INIT_LIST_HEAD(&vbq->free);
101054+
101055 p = &per_cpu(vfree_deferred, i);
101056 init_llist_head(&p->list);
101057- INIT_WORK(&p->wq, free_work);
101058+ INIT_WORK(&p->wq, vfree_work);
101059+
101060+ p = &per_cpu(vunmap_deferred, i);
101061+ init_llist_head(&p->list);
101062+ INIT_WORK(&p->wq, vunmap_work);
101063+
101064+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101065+ p2 = &per_cpu(stack_deferred, i);
101066+ init_llist_head(&p2->list.list);
101067+ INIT_WORK(&p2->wq, unmap_work);
101068+#endif
101069 }
101070
101071 /* Import existing vmlist entries. */
101072@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
101073 struct vm_struct *area;
101074
101075 BUG_ON(in_interrupt());
101076+
101077+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101078+ if (flags & VM_KERNEXEC) {
101079+ if (start != VMALLOC_START || end != VMALLOC_END)
101080+ return NULL;
101081+ start = (unsigned long)MODULES_EXEC_VADDR;
101082+ end = (unsigned long)MODULES_EXEC_END;
101083+ }
101084+#endif
101085+
101086 if (flags & VM_IOREMAP)
101087 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
101088
101089@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
101090 */
101091 void vunmap(const void *addr)
101092 {
101093- BUG_ON(in_interrupt());
101094- might_sleep();
101095- if (addr)
101096+ if (!addr)
101097+ return;
101098+ if (unlikely(in_interrupt())) {
101099+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
101100+ if (llist_add((struct llist_node *)addr, &p->list))
101101+ schedule_work(&p->wq);
101102+ } else {
101103+ might_sleep();
101104 __vunmap(addr, 0);
101105+ }
101106 }
101107 EXPORT_SYMBOL(vunmap);
101108
101109+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101110+void unmap_process_stacks(struct task_struct *task)
101111+{
101112+ if (unlikely(in_interrupt())) {
101113+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
101114+ struct stack_deferred_llist *list = task->stack;
101115+ list->stack = task->stack;
101116+ list->lowmem_stack = task->lowmem_stack;
101117+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
101118+ schedule_work(&p->wq);
101119+ } else {
101120+ __vunmap(task->stack, 0);
101121+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
101122+ }
101123+}
101124+#endif
101125+
101126 /**
101127 * vmap - map an array of pages into virtually contiguous space
101128 * @pages: array of page pointers
101129@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
101130 if (count > totalram_pages)
101131 return NULL;
101132
101133+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101134+ if (!(pgprot_val(prot) & _PAGE_NX))
101135+ flags |= VM_KERNEXEC;
101136+#endif
101137+
101138 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
101139 __builtin_return_address(0));
101140 if (!area)
101141@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
101142 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
101143 goto fail;
101144
101145+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101146+ if (!(pgprot_val(prot) & _PAGE_NX)) {
101147+ vm_flags |= VM_KERNEXEC;
101148+ start = VMALLOC_START;
101149+ end = VMALLOC_END;
101150+ }
101151+#endif
101152+
101153 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
101154 vm_flags, start, end, node, gfp_mask, caller);
101155 if (!area)
101156@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
101157 * For tight control over page level allocator and protection flags
101158 * use __vmalloc() instead.
101159 */
101160-
101161 void *vmalloc_exec(unsigned long size)
101162 {
101163- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
101164+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
101165 NUMA_NO_NODE, __builtin_return_address(0));
101166 }
101167
101168@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
101169 {
101170 struct vm_struct *area;
101171
101172+ BUG_ON(vma->vm_mirror);
101173+
101174 size = PAGE_ALIGN(size);
101175
101176 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
101177@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
101178 v->addr, v->addr + v->size, v->size);
101179
101180 if (v->caller)
101181+#ifdef CONFIG_GRKERNSEC_HIDESYM
101182+ seq_printf(m, " %pK", v->caller);
101183+#else
101184 seq_printf(m, " %pS", v->caller);
101185+#endif
101186
101187 if (v->nr_pages)
101188 seq_printf(m, " pages=%d", v->nr_pages);
101189diff --git a/mm/vmstat.c b/mm/vmstat.c
101190index 4f5cd97..9fb715a 100644
101191--- a/mm/vmstat.c
101192+++ b/mm/vmstat.c
101193@@ -27,6 +27,7 @@
101194 #include <linux/mm_inline.h>
101195 #include <linux/page_ext.h>
101196 #include <linux/page_owner.h>
101197+#include <linux/grsecurity.h>
101198
101199 #include "internal.h"
101200
101201@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
101202 *
101203 * vm_stat contains the global counters
101204 */
101205-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101206+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101207 EXPORT_SYMBOL(vm_stat);
101208
101209 #ifdef CONFIG_SMP
101210@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
101211
101212 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101213 if (diff[i]) {
101214- atomic_long_add(diff[i], &vm_stat[i]);
101215+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
101216 changes++;
101217 }
101218 return changes;
101219@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
101220 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
101221 if (v) {
101222
101223- atomic_long_add(v, &zone->vm_stat[i]);
101224+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101225 global_diff[i] += v;
101226 #ifdef CONFIG_NUMA
101227 /* 3 seconds idle till flush */
101228@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
101229
101230 v = p->vm_stat_diff[i];
101231 p->vm_stat_diff[i] = 0;
101232- atomic_long_add(v, &zone->vm_stat[i]);
101233+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101234 global_diff[i] += v;
101235 }
101236 }
101237@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
101238 if (pset->vm_stat_diff[i]) {
101239 int v = pset->vm_stat_diff[i];
101240 pset->vm_stat_diff[i] = 0;
101241- atomic_long_add(v, &zone->vm_stat[i]);
101242- atomic_long_add(v, &vm_stat[i]);
101243+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101244+ atomic_long_add_unchecked(v, &vm_stat[i]);
101245 }
101246 }
101247 #endif
101248@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
101249 stat_items_size += sizeof(struct vm_event_state);
101250 #endif
101251
101252- v = kmalloc(stat_items_size, GFP_KERNEL);
101253+ v = kzalloc(stat_items_size, GFP_KERNEL);
101254 m->private = v;
101255 if (!v)
101256 return ERR_PTR(-ENOMEM);
101257+
101258+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101259+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101260+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
101261+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
101262+ && !in_group_p(grsec_proc_gid)
101263+#endif
101264+ )
101265+ return (unsigned long *)m->private + *pos;
101266+#endif
101267+#endif
101268+
101269 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101270 v[i] = global_page_state(i);
101271 v += NR_VM_ZONE_STAT_ITEMS;
101272@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
101273 cpu_notifier_register_done();
101274 #endif
101275 #ifdef CONFIG_PROC_FS
101276- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
101277- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
101278- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101279- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
101280+ {
101281+ mode_t gr_mode = S_IRUGO;
101282+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101283+ gr_mode = S_IRUSR;
101284+#endif
101285+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
101286+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
101287+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101288+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
101289+ }
101290 #endif
101291 return 0;
101292 }
101293diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
101294index 64c6bed..b79a5de 100644
101295--- a/net/8021q/vlan.c
101296+++ b/net/8021q/vlan.c
101297@@ -481,7 +481,7 @@ out:
101298 return NOTIFY_DONE;
101299 }
101300
101301-static struct notifier_block vlan_notifier_block __read_mostly = {
101302+static struct notifier_block vlan_notifier_block = {
101303 .notifier_call = vlan_device_event,
101304 };
101305
101306@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
101307 err = -EPERM;
101308 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
101309 break;
101310- if ((args.u.name_type >= 0) &&
101311- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
101312+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
101313 struct vlan_net *vn;
101314
101315 vn = net_generic(net, vlan_net_id);
101316diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
101317index c92b52f..006c052 100644
101318--- a/net/8021q/vlan_netlink.c
101319+++ b/net/8021q/vlan_netlink.c
101320@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
101321 return dev_net(real_dev);
101322 }
101323
101324-struct rtnl_link_ops vlan_link_ops __read_mostly = {
101325+struct rtnl_link_ops vlan_link_ops = {
101326 .kind = "vlan",
101327 .maxtype = IFLA_VLAN_MAX,
101328 .policy = vlan_policy,
101329diff --git a/net/9p/client.c b/net/9p/client.c
101330index e86a9bea..e91f70e 100644
101331--- a/net/9p/client.c
101332+++ b/net/9p/client.c
101333@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
101334 len - inline_len);
101335 } else {
101336 err = copy_from_user(ename + inline_len,
101337- uidata, len - inline_len);
101338+ (char __force_user *)uidata, len - inline_len);
101339 if (err) {
101340 err = -EFAULT;
101341 goto out_err;
101342@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
101343 kernel_buf = 1;
101344 indata = data;
101345 } else
101346- indata = (__force char *)udata;
101347+ indata = (__force_kernel char *)udata;
101348 /*
101349 * response header len is 11
101350 * PDU Header(7) + IO Size (4)
101351@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
101352 kernel_buf = 1;
101353 odata = data;
101354 } else
101355- odata = (char *)udata;
101356+ odata = (char __force_kernel *)udata;
101357 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
101358 P9_ZC_HDR_SZ, kernel_buf, "dqd",
101359 fid->fid, offset, rsize);
101360diff --git a/net/9p/mod.c b/net/9p/mod.c
101361index 6ab36ae..6f1841b 100644
101362--- a/net/9p/mod.c
101363+++ b/net/9p/mod.c
101364@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
101365 void v9fs_register_trans(struct p9_trans_module *m)
101366 {
101367 spin_lock(&v9fs_trans_lock);
101368- list_add_tail(&m->list, &v9fs_trans_list);
101369+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
101370 spin_unlock(&v9fs_trans_lock);
101371 }
101372 EXPORT_SYMBOL(v9fs_register_trans);
101373@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
101374 void v9fs_unregister_trans(struct p9_trans_module *m)
101375 {
101376 spin_lock(&v9fs_trans_lock);
101377- list_del_init(&m->list);
101378+ pax_list_del_init((struct list_head *)&m->list);
101379 spin_unlock(&v9fs_trans_lock);
101380 }
101381 EXPORT_SYMBOL(v9fs_unregister_trans);
101382diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
101383index 80d08f6..de63fd1 100644
101384--- a/net/9p/trans_fd.c
101385+++ b/net/9p/trans_fd.c
101386@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
101387 oldfs = get_fs();
101388 set_fs(get_ds());
101389 /* The cast to a user pointer is valid due to the set_fs() */
101390- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
101391+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
101392 set_fs(oldfs);
101393
101394 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
101395diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
101396index af46bc4..f9adfcd 100644
101397--- a/net/appletalk/atalk_proc.c
101398+++ b/net/appletalk/atalk_proc.c
101399@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
101400 struct proc_dir_entry *p;
101401 int rc = -ENOMEM;
101402
101403- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
101404+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
101405 if (!atalk_proc_dir)
101406 goto out;
101407
101408diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
101409index 876fbe8..8bbea9f 100644
101410--- a/net/atm/atm_misc.c
101411+++ b/net/atm/atm_misc.c
101412@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
101413 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
101414 return 1;
101415 atm_return(vcc, truesize);
101416- atomic_inc(&vcc->stats->rx_drop);
101417+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101418 return 0;
101419 }
101420 EXPORT_SYMBOL(atm_charge);
101421@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
101422 }
101423 }
101424 atm_return(vcc, guess);
101425- atomic_inc(&vcc->stats->rx_drop);
101426+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101427 return NULL;
101428 }
101429 EXPORT_SYMBOL(atm_alloc_charge);
101430@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
101431
101432 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101433 {
101434-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101435+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101436 __SONET_ITEMS
101437 #undef __HANDLE_ITEM
101438 }
101439@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
101440
101441 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101442 {
101443-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101444+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
101445 __SONET_ITEMS
101446 #undef __HANDLE_ITEM
101447 }
101448diff --git a/net/atm/lec.c b/net/atm/lec.c
101449index 4b98f89..5a2f6cb 100644
101450--- a/net/atm/lec.c
101451+++ b/net/atm/lec.c
101452@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
101453 }
101454
101455 static struct lane2_ops lane2_ops = {
101456- lane2_resolve, /* resolve, spec 3.1.3 */
101457- lane2_associate_req, /* associate_req, spec 3.1.4 */
101458- NULL /* associate indicator, spec 3.1.5 */
101459+ .resolve = lane2_resolve,
101460+ .associate_req = lane2_associate_req,
101461+ .associate_indicator = NULL
101462 };
101463
101464 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
101465diff --git a/net/atm/lec.h b/net/atm/lec.h
101466index 4149db1..f2ab682 100644
101467--- a/net/atm/lec.h
101468+++ b/net/atm/lec.h
101469@@ -48,7 +48,7 @@ struct lane2_ops {
101470 const u8 *tlvs, u32 sizeoftlvs);
101471 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
101472 const u8 *tlvs, u32 sizeoftlvs);
101473-};
101474+} __no_const;
101475
101476 /*
101477 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
101478diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
101479index d1b2d9a..d549f7f 100644
101480--- a/net/atm/mpoa_caches.c
101481+++ b/net/atm/mpoa_caches.c
101482@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
101483
101484
101485 static struct in_cache_ops ingress_ops = {
101486- in_cache_add_entry, /* add_entry */
101487- in_cache_get, /* get */
101488- in_cache_get_with_mask, /* get_with_mask */
101489- in_cache_get_by_vcc, /* get_by_vcc */
101490- in_cache_put, /* put */
101491- in_cache_remove_entry, /* remove_entry */
101492- cache_hit, /* cache_hit */
101493- clear_count_and_expired, /* clear_count */
101494- check_resolving_entries, /* check_resolving */
101495- refresh_entries, /* refresh */
101496- in_destroy_cache /* destroy_cache */
101497+ .add_entry = in_cache_add_entry,
101498+ .get = in_cache_get,
101499+ .get_with_mask = in_cache_get_with_mask,
101500+ .get_by_vcc = in_cache_get_by_vcc,
101501+ .put = in_cache_put,
101502+ .remove_entry = in_cache_remove_entry,
101503+ .cache_hit = cache_hit,
101504+ .clear_count = clear_count_and_expired,
101505+ .check_resolving = check_resolving_entries,
101506+ .refresh = refresh_entries,
101507+ .destroy_cache = in_destroy_cache
101508 };
101509
101510 static struct eg_cache_ops egress_ops = {
101511- eg_cache_add_entry, /* add_entry */
101512- eg_cache_get_by_cache_id, /* get_by_cache_id */
101513- eg_cache_get_by_tag, /* get_by_tag */
101514- eg_cache_get_by_vcc, /* get_by_vcc */
101515- eg_cache_get_by_src_ip, /* get_by_src_ip */
101516- eg_cache_put, /* put */
101517- eg_cache_remove_entry, /* remove_entry */
101518- update_eg_cache_entry, /* update */
101519- clear_expired, /* clear_expired */
101520- eg_destroy_cache /* destroy_cache */
101521+ .add_entry = eg_cache_add_entry,
101522+ .get_by_cache_id = eg_cache_get_by_cache_id,
101523+ .get_by_tag = eg_cache_get_by_tag,
101524+ .get_by_vcc = eg_cache_get_by_vcc,
101525+ .get_by_src_ip = eg_cache_get_by_src_ip,
101526+ .put = eg_cache_put,
101527+ .remove_entry = eg_cache_remove_entry,
101528+ .update = update_eg_cache_entry,
101529+ .clear_expired = clear_expired,
101530+ .destroy_cache = eg_destroy_cache
101531 };
101532
101533
101534diff --git a/net/atm/proc.c b/net/atm/proc.c
101535index bbb6461..cf04016 100644
101536--- a/net/atm/proc.c
101537+++ b/net/atm/proc.c
101538@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
101539 const struct k_atm_aal_stats *stats)
101540 {
101541 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
101542- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
101543- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
101544- atomic_read(&stats->rx_drop));
101545+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
101546+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
101547+ atomic_read_unchecked(&stats->rx_drop));
101548 }
101549
101550 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
101551diff --git a/net/atm/resources.c b/net/atm/resources.c
101552index 0447d5d..3cf4728 100644
101553--- a/net/atm/resources.c
101554+++ b/net/atm/resources.c
101555@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
101556 static void copy_aal_stats(struct k_atm_aal_stats *from,
101557 struct atm_aal_stats *to)
101558 {
101559-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101560+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101561 __AAL_STAT_ITEMS
101562 #undef __HANDLE_ITEM
101563 }
101564@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
101565 static void subtract_aal_stats(struct k_atm_aal_stats *from,
101566 struct atm_aal_stats *to)
101567 {
101568-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101569+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
101570 __AAL_STAT_ITEMS
101571 #undef __HANDLE_ITEM
101572 }
101573diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
101574index 919a5ce..cc6b444 100644
101575--- a/net/ax25/sysctl_net_ax25.c
101576+++ b/net/ax25/sysctl_net_ax25.c
101577@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
101578 {
101579 char path[sizeof("net/ax25/") + IFNAMSIZ];
101580 int k;
101581- struct ctl_table *table;
101582+ ctl_table_no_const *table;
101583
101584 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101585 if (!table)
101586diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101587index 00e00e0..710fcd2 100644
101588--- a/net/batman-adv/bat_iv_ogm.c
101589+++ b/net/batman-adv/bat_iv_ogm.c
101590@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101591
101592 /* randomize initial seqno to avoid collision */
101593 get_random_bytes(&random_seqno, sizeof(random_seqno));
101594- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101595+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101596
101597 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101598 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101599@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101600 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101601
101602 /* change sequence number to network order */
101603- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101604+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101605 batadv_ogm_packet->seqno = htonl(seqno);
101606- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101607+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101608
101609 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101610
101611@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101612 return;
101613
101614 /* could be changed by schedule_own_packet() */
101615- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101616+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101617
101618 if (ogm_packet->flags & BATADV_DIRECTLINK)
101619 has_directlink_flag = true;
101620diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101621index 3d1dcaa..4699f4e 100644
101622--- a/net/batman-adv/fragmentation.c
101623+++ b/net/batman-adv/fragmentation.c
101624@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101625 frag_header.packet_type = BATADV_UNICAST_FRAG;
101626 frag_header.version = BATADV_COMPAT_VERSION;
101627 frag_header.ttl = BATADV_TTL;
101628- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101629+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101630 frag_header.reserved = 0;
101631 frag_header.no = 0;
101632 frag_header.total_size = htons(skb->len);
101633diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101634index 5ec31d7..e371631 100644
101635--- a/net/batman-adv/soft-interface.c
101636+++ b/net/batman-adv/soft-interface.c
101637@@ -295,7 +295,7 @@ send:
101638 primary_if->net_dev->dev_addr);
101639
101640 /* set broadcast sequence number */
101641- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101642+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101643 bcast_packet->seqno = htonl(seqno);
101644
101645 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101646@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101647 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101648
101649 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101650- atomic_set(&bat_priv->bcast_seqno, 1);
101651+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101652 atomic_set(&bat_priv->tt.vn, 0);
101653 atomic_set(&bat_priv->tt.local_changes, 0);
101654 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101655@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101656
101657 /* randomize initial seqno to avoid collision */
101658 get_random_bytes(&random_seqno, sizeof(random_seqno));
101659- atomic_set(&bat_priv->frag_seqno, random_seqno);
101660+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101661
101662 bat_priv->primary_if = NULL;
101663 bat_priv->num_ifaces = 0;
101664@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
101665 return 0;
101666 }
101667
101668-struct rtnl_link_ops batadv_link_ops __read_mostly = {
101669+struct rtnl_link_ops batadv_link_ops = {
101670 .kind = "batadv",
101671 .priv_size = sizeof(struct batadv_priv),
101672 .setup = batadv_softif_init_early,
101673diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101674index 9398c3f..0e79657 100644
101675--- a/net/batman-adv/types.h
101676+++ b/net/batman-adv/types.h
101677@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101678 struct batadv_hard_iface_bat_iv {
101679 unsigned char *ogm_buff;
101680 int ogm_buff_len;
101681- atomic_t ogm_seqno;
101682+ atomic_unchecked_t ogm_seqno;
101683 };
101684
101685 /**
101686@@ -766,7 +766,7 @@ struct batadv_priv {
101687 atomic_t bonding;
101688 atomic_t fragmentation;
101689 atomic_t packet_size_max;
101690- atomic_t frag_seqno;
101691+ atomic_unchecked_t frag_seqno;
101692 #ifdef CONFIG_BATMAN_ADV_BLA
101693 atomic_t bridge_loop_avoidance;
101694 #endif
101695@@ -785,7 +785,7 @@ struct batadv_priv {
101696 #endif
101697 uint32_t isolation_mark;
101698 uint32_t isolation_mark_mask;
101699- atomic_t bcast_seqno;
101700+ atomic_unchecked_t bcast_seqno;
101701 atomic_t bcast_queue_left;
101702 atomic_t batman_queue_left;
101703 char num_ifaces;
101704diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101705index 1d65c5b..43e55fd 100644
101706--- a/net/bluetooth/hci_sock.c
101707+++ b/net/bluetooth/hci_sock.c
101708@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101709 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101710 }
101711
101712- len = min_t(unsigned int, len, sizeof(uf));
101713+ len = min((size_t)len, sizeof(uf));
101714 if (copy_from_user(&uf, optval, len)) {
101715 err = -EFAULT;
101716 break;
101717diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101718index 6ba33f9..4afc26f 100644
101719--- a/net/bluetooth/l2cap_core.c
101720+++ b/net/bluetooth/l2cap_core.c
101721@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101722 break;
101723
101724 case L2CAP_CONF_RFC:
101725- if (olen == sizeof(rfc))
101726- memcpy(&rfc, (void *)val, olen);
101727+ if (olen != sizeof(rfc))
101728+ break;
101729+
101730+ memcpy(&rfc, (void *)val, olen);
101731
101732 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101733 rfc.mode != chan->mode)
101734diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101735index 60694f0..32623ed 100644
101736--- a/net/bluetooth/l2cap_sock.c
101737+++ b/net/bluetooth/l2cap_sock.c
101738@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101739 struct sock *sk = sock->sk;
101740 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101741 struct l2cap_options opts;
101742- int len, err = 0;
101743+ int err = 0;
101744+ size_t len = optlen;
101745 u32 opt;
101746
101747 BT_DBG("sk %p", sk);
101748@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101749 opts.max_tx = chan->max_tx;
101750 opts.txwin_size = chan->tx_win;
101751
101752- len = min_t(unsigned int, sizeof(opts), optlen);
101753+ len = min(sizeof(opts), len);
101754 if (copy_from_user((char *) &opts, optval, len)) {
101755 err = -EFAULT;
101756 break;
101757@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101758 struct bt_security sec;
101759 struct bt_power pwr;
101760 struct l2cap_conn *conn;
101761- int len, err = 0;
101762+ int err = 0;
101763+ size_t len = optlen;
101764 u32 opt;
101765
101766 BT_DBG("sk %p", sk);
101767@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101768
101769 sec.level = BT_SECURITY_LOW;
101770
101771- len = min_t(unsigned int, sizeof(sec), optlen);
101772+ len = min(sizeof(sec), len);
101773 if (copy_from_user((char *) &sec, optval, len)) {
101774 err = -EFAULT;
101775 break;
101776@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101777
101778 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101779
101780- len = min_t(unsigned int, sizeof(pwr), optlen);
101781+ len = min(sizeof(pwr), len);
101782 if (copy_from_user((char *) &pwr, optval, len)) {
101783 err = -EFAULT;
101784 break;
101785diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101786index 3c6d2c8..6afc970 100644
101787--- a/net/bluetooth/rfcomm/sock.c
101788+++ b/net/bluetooth/rfcomm/sock.c
101789@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101790 struct sock *sk = sock->sk;
101791 struct bt_security sec;
101792 int err = 0;
101793- size_t len;
101794+ size_t len = optlen;
101795 u32 opt;
101796
101797 BT_DBG("sk %p", sk);
101798@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101799
101800 sec.level = BT_SECURITY_LOW;
101801
101802- len = min_t(unsigned int, sizeof(sec), optlen);
101803+ len = min(sizeof(sec), len);
101804 if (copy_from_user((char *) &sec, optval, len)) {
101805 err = -EFAULT;
101806 break;
101807diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101808index 8e385a0..a5bdd8e 100644
101809--- a/net/bluetooth/rfcomm/tty.c
101810+++ b/net/bluetooth/rfcomm/tty.c
101811@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101812 BT_DBG("tty %p id %d", tty, tty->index);
101813
101814 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101815- dev->channel, dev->port.count);
101816+ dev->channel, atomic_read(&dev->port.count));
101817
101818 err = tty_port_open(&dev->port, tty, filp);
101819 if (err)
101820@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101821 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101822
101823 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101824- dev->port.count);
101825+ atomic_read(&dev->port.count));
101826
101827 tty_port_close(&dev->port, tty, filp);
101828 }
101829diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101830index 4fbcea0..69a6786 100644
101831--- a/net/bridge/br_netlink.c
101832+++ b/net/bridge/br_netlink.c
101833@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
101834 .get_link_af_size = br_get_link_af_size,
101835 };
101836
101837-struct rtnl_link_ops br_link_ops __read_mostly = {
101838+struct rtnl_link_ops br_link_ops = {
101839 .kind = "bridge",
101840 .priv_size = sizeof(struct net_bridge),
101841 .setup = br_dev_setup,
101842diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101843index 91180a7..1301daa 100644
101844--- a/net/bridge/netfilter/ebtables.c
101845+++ b/net/bridge/netfilter/ebtables.c
101846@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101847 tmp.valid_hooks = t->table->valid_hooks;
101848 }
101849 mutex_unlock(&ebt_mutex);
101850- if (copy_to_user(user, &tmp, *len) != 0) {
101851+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101852 BUGPRINT("c2u Didn't work\n");
101853 ret = -EFAULT;
101854 break;
101855@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101856 goto out;
101857 tmp.valid_hooks = t->valid_hooks;
101858
101859- if (copy_to_user(user, &tmp, *len) != 0) {
101860+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101861 ret = -EFAULT;
101862 break;
101863 }
101864@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101865 tmp.entries_size = t->table->entries_size;
101866 tmp.valid_hooks = t->table->valid_hooks;
101867
101868- if (copy_to_user(user, &tmp, *len) != 0) {
101869+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101870 ret = -EFAULT;
101871 break;
101872 }
101873diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101874index f5afda1..dcf770a 100644
101875--- a/net/caif/cfctrl.c
101876+++ b/net/caif/cfctrl.c
101877@@ -10,6 +10,7 @@
101878 #include <linux/spinlock.h>
101879 #include <linux/slab.h>
101880 #include <linux/pkt_sched.h>
101881+#include <linux/sched.h>
101882 #include <net/caif/caif_layer.h>
101883 #include <net/caif/cfpkt.h>
101884 #include <net/caif/cfctrl.h>
101885@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101886 memset(&dev_info, 0, sizeof(dev_info));
101887 dev_info.id = 0xff;
101888 cfsrvl_init(&this->serv, 0, &dev_info, false);
101889- atomic_set(&this->req_seq_no, 1);
101890- atomic_set(&this->rsp_seq_no, 1);
101891+ atomic_set_unchecked(&this->req_seq_no, 1);
101892+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101893 this->serv.layer.receive = cfctrl_recv;
101894 sprintf(this->serv.layer.name, "ctrl");
101895 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101896@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101897 struct cfctrl_request_info *req)
101898 {
101899 spin_lock_bh(&ctrl->info_list_lock);
101900- atomic_inc(&ctrl->req_seq_no);
101901- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101902+ atomic_inc_unchecked(&ctrl->req_seq_no);
101903+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101904 list_add_tail(&req->list, &ctrl->list);
101905 spin_unlock_bh(&ctrl->info_list_lock);
101906 }
101907@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101908 if (p != first)
101909 pr_warn("Requests are not received in order\n");
101910
101911- atomic_set(&ctrl->rsp_seq_no,
101912+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101913 p->sequence_no);
101914 list_del(&p->list);
101915 goto out;
101916diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101917index 67a4a36..8d28068 100644
101918--- a/net/caif/chnl_net.c
101919+++ b/net/caif/chnl_net.c
101920@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101921 };
101922
101923
101924-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101925+static struct rtnl_link_ops ipcaif_link_ops = {
101926 .kind = "caif",
101927 .priv_size = sizeof(struct chnl_net),
101928 .setup = ipcaif_net_setup,
101929diff --git a/net/can/af_can.c b/net/can/af_can.c
101930index 32d710e..93bcf05 100644
101931--- a/net/can/af_can.c
101932+++ b/net/can/af_can.c
101933@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101934 };
101935
101936 /* notifier block for netdevice event */
101937-static struct notifier_block can_netdev_notifier __read_mostly = {
101938+static struct notifier_block can_netdev_notifier = {
101939 .notifier_call = can_notifier,
101940 };
101941
101942diff --git a/net/can/bcm.c b/net/can/bcm.c
101943index ee9ffd9..dfdf3d4 100644
101944--- a/net/can/bcm.c
101945+++ b/net/can/bcm.c
101946@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101947 }
101948
101949 /* create /proc/net/can-bcm directory */
101950- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101951+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101952 return 0;
101953 }
101954
101955diff --git a/net/can/gw.c b/net/can/gw.c
101956index a6f448e..5902171 100644
101957--- a/net/can/gw.c
101958+++ b/net/can/gw.c
101959@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101960 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101961
101962 static HLIST_HEAD(cgw_list);
101963-static struct notifier_block notifier;
101964
101965 static struct kmem_cache *cgw_cache __read_mostly;
101966
101967@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101968 return err;
101969 }
101970
101971+static struct notifier_block notifier = {
101972+ .notifier_call = cgw_notifier
101973+};
101974+
101975 static __init int cgw_module_init(void)
101976 {
101977 /* sanitize given module parameter */
101978@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
101979 return -ENOMEM;
101980
101981 /* set notifier */
101982- notifier.notifier_call = cgw_notifier;
101983 register_netdevice_notifier(&notifier);
101984
101985 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101986diff --git a/net/can/proc.c b/net/can/proc.c
101987index 1a19b98..df2b4ec 100644
101988--- a/net/can/proc.c
101989+++ b/net/can/proc.c
101990@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101991 void can_init_proc(void)
101992 {
101993 /* create /proc/net/can directory */
101994- can_dir = proc_mkdir("can", init_net.proc_net);
101995+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101996
101997 if (!can_dir) {
101998 printk(KERN_INFO "can: failed to create /proc/net/can . "
101999diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
102000index a9f4ae4..ee19b92 100644
102001--- a/net/ceph/messenger.c
102002+++ b/net/ceph/messenger.c
102003@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
102004 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
102005
102006 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
102007-static atomic_t addr_str_seq = ATOMIC_INIT(0);
102008+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
102009
102010 static struct page *zero_page; /* used in certain error cases */
102011
102012@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
102013 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
102014 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
102015
102016- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102017+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102018 s = addr_str[i];
102019
102020 switch (ss->ss_family) {
102021diff --git a/net/compat.c b/net/compat.c
102022index f7bd286..76ea56a 100644
102023--- a/net/compat.c
102024+++ b/net/compat.c
102025@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
102026
102027 #define CMSG_COMPAT_FIRSTHDR(msg) \
102028 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
102029- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
102030+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
102031 (struct compat_cmsghdr __user *)NULL)
102032
102033 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
102034 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
102035 (ucmlen) <= (unsigned long) \
102036 ((mhdr)->msg_controllen - \
102037- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
102038+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
102039
102040 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
102041 struct compat_cmsghdr __user *cmsg, int cmsg_len)
102042 {
102043 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
102044- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
102045+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
102046 msg->msg_controllen)
102047 return NULL;
102048 return (struct compat_cmsghdr __user *)ptr;
102049@@ -203,7 +203,7 @@ Efault:
102050
102051 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
102052 {
102053- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102054+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102055 struct compat_cmsghdr cmhdr;
102056 struct compat_timeval ctv;
102057 struct compat_timespec cts[3];
102058@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
102059
102060 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
102061 {
102062- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102063+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102064 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
102065 int fdnum = scm->fp->count;
102066 struct file **fp = scm->fp->fp;
102067@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
102068 return -EFAULT;
102069 old_fs = get_fs();
102070 set_fs(KERNEL_DS);
102071- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
102072+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
102073 set_fs(old_fs);
102074
102075 return err;
102076@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
102077 len = sizeof(ktime);
102078 old_fs = get_fs();
102079 set_fs(KERNEL_DS);
102080- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
102081+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
102082 set_fs(old_fs);
102083
102084 if (!err) {
102085@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102086 case MCAST_JOIN_GROUP:
102087 case MCAST_LEAVE_GROUP:
102088 {
102089- struct compat_group_req __user *gr32 = (void *)optval;
102090+ struct compat_group_req __user *gr32 = (void __user *)optval;
102091 struct group_req __user *kgr =
102092 compat_alloc_user_space(sizeof(struct group_req));
102093 u32 interface;
102094@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102095 case MCAST_BLOCK_SOURCE:
102096 case MCAST_UNBLOCK_SOURCE:
102097 {
102098- struct compat_group_source_req __user *gsr32 = (void *)optval;
102099+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
102100 struct group_source_req __user *kgsr = compat_alloc_user_space(
102101 sizeof(struct group_source_req));
102102 u32 interface;
102103@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102104 }
102105 case MCAST_MSFILTER:
102106 {
102107- struct compat_group_filter __user *gf32 = (void *)optval;
102108+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102109 struct group_filter __user *kgf;
102110 u32 interface, fmode, numsrc;
102111
102112@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
102113 char __user *optval, int __user *optlen,
102114 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
102115 {
102116- struct compat_group_filter __user *gf32 = (void *)optval;
102117+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102118 struct group_filter __user *kgf;
102119 int __user *koptlen;
102120 u32 interface, fmode, numsrc;
102121@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
102122
102123 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
102124 return -EINVAL;
102125- if (copy_from_user(a, args, nas[call]))
102126+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
102127 return -EFAULT;
102128 a0 = a[0];
102129 a1 = a[1];
102130diff --git a/net/core/datagram.c b/net/core/datagram.c
102131index df493d6..1145766 100644
102132--- a/net/core/datagram.c
102133+++ b/net/core/datagram.c
102134@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
102135 }
102136
102137 kfree_skb(skb);
102138- atomic_inc(&sk->sk_drops);
102139+ atomic_inc_unchecked(&sk->sk_drops);
102140 sk_mem_reclaim_partial(sk);
102141
102142 return err;
102143diff --git a/net/core/dev.c b/net/core/dev.c
102144index 45109b7..6b58f14a 100644
102145--- a/net/core/dev.c
102146+++ b/net/core/dev.c
102147@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
102148 {
102149 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
102150 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
102151- atomic_long_inc(&dev->rx_dropped);
102152+ atomic_long_inc_unchecked(&dev->rx_dropped);
102153 kfree_skb(skb);
102154 return NET_RX_DROP;
102155 }
102156 }
102157
102158 if (unlikely(!is_skb_forwardable(dev, skb))) {
102159- atomic_long_inc(&dev->rx_dropped);
102160+ atomic_long_inc_unchecked(&dev->rx_dropped);
102161 kfree_skb(skb);
102162 return NET_RX_DROP;
102163 }
102164@@ -2987,7 +2987,7 @@ recursion_alert:
102165 drop:
102166 rcu_read_unlock_bh();
102167
102168- atomic_long_inc(&dev->tx_dropped);
102169+ atomic_long_inc_unchecked(&dev->tx_dropped);
102170 kfree_skb_list(skb);
102171 return rc;
102172 out:
102173@@ -3336,7 +3336,7 @@ enqueue:
102174
102175 local_irq_restore(flags);
102176
102177- atomic_long_inc(&skb->dev->rx_dropped);
102178+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102179 kfree_skb(skb);
102180 return NET_RX_DROP;
102181 }
102182@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
102183 }
102184 EXPORT_SYMBOL(netif_rx_ni);
102185
102186-static void net_tx_action(struct softirq_action *h)
102187+static __latent_entropy void net_tx_action(void)
102188 {
102189 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
102190
102191@@ -3751,7 +3751,7 @@ ncls:
102192 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
102193 } else {
102194 drop:
102195- atomic_long_inc(&skb->dev->rx_dropped);
102196+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102197 kfree_skb(skb);
102198 /* Jamal, now you will not able to escape explaining
102199 * me how you were going to use this. :-)
102200@@ -4640,7 +4640,7 @@ out_unlock:
102201 return work;
102202 }
102203
102204-static void net_rx_action(struct softirq_action *h)
102205+static __latent_entropy void net_rx_action(void)
102206 {
102207 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
102208 unsigned long time_limit = jiffies + 2;
102209@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
102210 } else {
102211 netdev_stats_to_stats64(storage, &dev->stats);
102212 }
102213- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
102214- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
102215+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
102216+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
102217 return storage;
102218 }
102219 EXPORT_SYMBOL(dev_get_stats);
102220diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
102221index b94b1d2..da3ed7c 100644
102222--- a/net/core/dev_ioctl.c
102223+++ b/net/core/dev_ioctl.c
102224@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
102225 no_module = !dev;
102226 if (no_module && capable(CAP_NET_ADMIN))
102227 no_module = request_module("netdev-%s", name);
102228- if (no_module && capable(CAP_SYS_MODULE))
102229+ if (no_module && capable(CAP_SYS_MODULE)) {
102230+#ifdef CONFIG_GRKERNSEC_MODHARDEN
102231+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
102232+#else
102233 request_module("%s", name);
102234+#endif
102235+ }
102236 }
102237 EXPORT_SYMBOL(dev_load);
102238
102239diff --git a/net/core/filter.c b/net/core/filter.c
102240index f6bdc2b..76eba8e 100644
102241--- a/net/core/filter.c
102242+++ b/net/core/filter.c
102243@@ -533,7 +533,11 @@ do_pass:
102244
102245 /* Unknown instruction. */
102246 default:
102247- goto err;
102248+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
102249+ fp->code, fp->jt, fp->jf, fp->k);
102250+ kfree(addrs);
102251+ BUG();
102252+ return -EINVAL;
102253 }
102254
102255 insn++;
102256@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
102257 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
102258 int pc, ret = 0;
102259
102260- BUILD_BUG_ON(BPF_MEMWORDS > 16);
102261+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
102262
102263 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
102264 if (!masks)
102265@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
102266 if (!fp)
102267 return -ENOMEM;
102268
102269- memcpy(fp->insns, fprog->filter, fsize);
102270+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
102271
102272 fp->len = fprog->len;
102273 /* Since unattached filters are not copied back to user
102274diff --git a/net/core/flow.c b/net/core/flow.c
102275index 1033725..340f65d 100644
102276--- a/net/core/flow.c
102277+++ b/net/core/flow.c
102278@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
102279 static int flow_entry_valid(struct flow_cache_entry *fle,
102280 struct netns_xfrm *xfrm)
102281 {
102282- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
102283+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
102284 return 0;
102285 if (fle->object && !fle->object->ops->check(fle->object))
102286 return 0;
102287@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
102288 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
102289 fcp->hash_count++;
102290 }
102291- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
102292+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
102293 flo = fle->object;
102294 if (!flo)
102295 goto ret_object;
102296@@ -263,7 +263,7 @@ nocache:
102297 }
102298 flo = resolver(net, key, family, dir, flo, ctx);
102299 if (fle) {
102300- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
102301+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
102302 if (!IS_ERR(flo))
102303 fle->object = flo;
102304 else
102305diff --git a/net/core/neighbour.c b/net/core/neighbour.c
102306index 70fe9e1..926784c 100644
102307--- a/net/core/neighbour.c
102308+++ b/net/core/neighbour.c
102309@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
102310 void __user *buffer, size_t *lenp, loff_t *ppos)
102311 {
102312 int size, ret;
102313- struct ctl_table tmp = *ctl;
102314+ ctl_table_no_const tmp = *ctl;
102315
102316 tmp.extra1 = &zero;
102317 tmp.extra2 = &unres_qlen_max;
102318@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
102319 void __user *buffer,
102320 size_t *lenp, loff_t *ppos)
102321 {
102322- struct ctl_table tmp = *ctl;
102323+ ctl_table_no_const tmp = *ctl;
102324 int ret;
102325
102326 tmp.extra1 = &zero;
102327diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
102328index 2bf8329..2eb1423 100644
102329--- a/net/core/net-procfs.c
102330+++ b/net/core/net-procfs.c
102331@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
102332 struct rtnl_link_stats64 temp;
102333 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
102334
102335- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102336+ if (gr_proc_is_restricted())
102337+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102338+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102339+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
102340+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
102341+ else
102342+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102343 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102344 dev->name, stats->rx_bytes, stats->rx_packets,
102345 stats->rx_errors,
102346@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
102347 return 0;
102348 }
102349
102350-static const struct seq_operations dev_seq_ops = {
102351+const struct seq_operations dev_seq_ops = {
102352 .start = dev_seq_start,
102353 .next = dev_seq_next,
102354 .stop = dev_seq_stop,
102355@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
102356
102357 static int softnet_seq_open(struct inode *inode, struct file *file)
102358 {
102359- return seq_open(file, &softnet_seq_ops);
102360+ return seq_open_restrict(file, &softnet_seq_ops);
102361 }
102362
102363 static const struct file_operations softnet_seq_fops = {
102364@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
102365 else
102366 seq_printf(seq, "%04x", ntohs(pt->type));
102367
102368+#ifdef CONFIG_GRKERNSEC_HIDESYM
102369+ seq_printf(seq, " %-8s %pf\n",
102370+ pt->dev ? pt->dev->name : "", NULL);
102371+#else
102372 seq_printf(seq, " %-8s %pf\n",
102373 pt->dev ? pt->dev->name : "", pt->func);
102374+#endif
102375 }
102376
102377 return 0;
102378diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
102379index f2aa73b..0d1a1ea 100644
102380--- a/net/core/net-sysfs.c
102381+++ b/net/core/net-sysfs.c
102382@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
102383 {
102384 struct net_device *netdev = to_net_dev(dev);
102385 return sprintf(buf, fmt_dec,
102386- atomic_read(&netdev->carrier_changes));
102387+ atomic_read_unchecked(&netdev->carrier_changes));
102388 }
102389 static DEVICE_ATTR_RO(carrier_changes);
102390
102391diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
102392index 70d3450..eb7c528 100644
102393--- a/net/core/net_namespace.c
102394+++ b/net/core/net_namespace.c
102395@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
102396 int error;
102397 LIST_HEAD(net_exit_list);
102398
102399- list_add_tail(&ops->list, list);
102400+ pax_list_add_tail((struct list_head *)&ops->list, list);
102401 if (ops->init || (ops->id && ops->size)) {
102402 for_each_net(net) {
102403 error = ops_init(ops, net);
102404@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
102405
102406 out_undo:
102407 /* If I have an error cleanup all namespaces I initialized */
102408- list_del(&ops->list);
102409+ pax_list_del((struct list_head *)&ops->list);
102410 ops_exit_list(ops, &net_exit_list);
102411 ops_free_list(ops, &net_exit_list);
102412 return error;
102413@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
102414 struct net *net;
102415 LIST_HEAD(net_exit_list);
102416
102417- list_del(&ops->list);
102418+ pax_list_del((struct list_head *)&ops->list);
102419 for_each_net(net)
102420 list_add_tail(&net->exit_list, &net_exit_list);
102421 ops_exit_list(ops, &net_exit_list);
102422@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
102423 mutex_lock(&net_mutex);
102424 error = register_pernet_operations(&pernet_list, ops);
102425 if (!error && (first_device == &pernet_list))
102426- first_device = &ops->list;
102427+ first_device = (struct list_head *)&ops->list;
102428 mutex_unlock(&net_mutex);
102429 return error;
102430 }
102431diff --git a/net/core/netpoll.c b/net/core/netpoll.c
102432index c126a87..10ad89d 100644
102433--- a/net/core/netpoll.c
102434+++ b/net/core/netpoll.c
102435@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102436 struct udphdr *udph;
102437 struct iphdr *iph;
102438 struct ethhdr *eth;
102439- static atomic_t ip_ident;
102440+ static atomic_unchecked_t ip_ident;
102441 struct ipv6hdr *ip6h;
102442
102443 udp_len = len + sizeof(*udph);
102444@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102445 put_unaligned(0x45, (unsigned char *)iph);
102446 iph->tos = 0;
102447 put_unaligned(htons(ip_len), &(iph->tot_len));
102448- iph->id = htons(atomic_inc_return(&ip_ident));
102449+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
102450 iph->frag_off = 0;
102451 iph->ttl = 64;
102452 iph->protocol = IPPROTO_UDP;
102453diff --git a/net/core/pktgen.c b/net/core/pktgen.c
102454index 508155b..fad080f 100644
102455--- a/net/core/pktgen.c
102456+++ b/net/core/pktgen.c
102457@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
102458 pn->net = net;
102459 INIT_LIST_HEAD(&pn->pktgen_threads);
102460 pn->pktgen_exiting = false;
102461- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
102462+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
102463 if (!pn->proc_dir) {
102464 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
102465 return -ENODEV;
102466diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
102467index 7ebed55..378bf34 100644
102468--- a/net/core/rtnetlink.c
102469+++ b/net/core/rtnetlink.c
102470@@ -61,7 +61,7 @@ struct rtnl_link {
102471 rtnl_doit_func doit;
102472 rtnl_dumpit_func dumpit;
102473 rtnl_calcit_func calcit;
102474-};
102475+} __no_const;
102476
102477 static DEFINE_MUTEX(rtnl_mutex);
102478
102479@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
102480 * to use the ops for creating device. So do not
102481 * fill up dellink as well. That disables rtnl_dellink.
102482 */
102483- if (ops->setup && !ops->dellink)
102484- ops->dellink = unregister_netdevice_queue;
102485+ if (ops->setup && !ops->dellink) {
102486+ pax_open_kernel();
102487+ *(void **)&ops->dellink = unregister_netdevice_queue;
102488+ pax_close_kernel();
102489+ }
102490
102491- list_add_tail(&ops->list, &link_ops);
102492+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
102493 return 0;
102494 }
102495 EXPORT_SYMBOL_GPL(__rtnl_link_register);
102496@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
102497 for_each_net(net) {
102498 __rtnl_kill_links(net, ops);
102499 }
102500- list_del(&ops->list);
102501+ pax_list_del((struct list_head *)&ops->list);
102502 }
102503 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
102504
102505@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
102506 (dev->ifalias &&
102507 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
102508 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
102509- atomic_read(&dev->carrier_changes)))
102510+ atomic_read_unchecked(&dev->carrier_changes)))
102511 goto nla_put_failure;
102512
102513 if (1) {
102514diff --git a/net/core/scm.c b/net/core/scm.c
102515index 3b6899b..cf36238 100644
102516--- a/net/core/scm.c
102517+++ b/net/core/scm.c
102518@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
102519 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102520 {
102521 struct cmsghdr __user *cm
102522- = (__force struct cmsghdr __user *)msg->msg_control;
102523+ = (struct cmsghdr __force_user *)msg->msg_control;
102524 struct cmsghdr cmhdr;
102525 int cmlen = CMSG_LEN(len);
102526 int err;
102527@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102528 err = -EFAULT;
102529 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
102530 goto out;
102531- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
102532+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
102533 goto out;
102534 cmlen = CMSG_SPACE(len);
102535 if (msg->msg_controllen < cmlen)
102536@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
102537 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102538 {
102539 struct cmsghdr __user *cm
102540- = (__force struct cmsghdr __user*)msg->msg_control;
102541+ = (struct cmsghdr __force_user *)msg->msg_control;
102542
102543 int fdmax = 0;
102544 int fdnum = scm->fp->count;
102545@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102546 if (fdnum < fdmax)
102547 fdmax = fdnum;
102548
102549- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
102550+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
102551 i++, cmfptr++)
102552 {
102553 struct socket *sock;
102554diff --git a/net/core/skbuff.c b/net/core/skbuff.c
102555index 98d45fe..4f9608f 100644
102556--- a/net/core/skbuff.c
102557+++ b/net/core/skbuff.c
102558@@ -2121,7 +2121,7 @@ EXPORT_SYMBOL(__skb_checksum);
102559 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102560 int len, __wsum csum)
102561 {
102562- const struct skb_checksum_ops ops = {
102563+ static const struct skb_checksum_ops ops = {
102564 .update = csum_partial_ext,
102565 .combine = csum_block_add_ext,
102566 };
102567@@ -3361,12 +3361,14 @@ void __init skb_init(void)
102568 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102569 sizeof(struct sk_buff),
102570 0,
102571- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102572+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102573+ SLAB_NO_SANITIZE,
102574 NULL);
102575 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102576 sizeof(struct sk_buff_fclones),
102577 0,
102578- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102579+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102580+ SLAB_NO_SANITIZE,
102581 NULL);
102582 }
102583
102584diff --git a/net/core/sock.c b/net/core/sock.c
102585index 71e3e5f..ab90920 100644
102586--- a/net/core/sock.c
102587+++ b/net/core/sock.c
102588@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102589 struct sk_buff_head *list = &sk->sk_receive_queue;
102590
102591 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102592- atomic_inc(&sk->sk_drops);
102593+ atomic_inc_unchecked(&sk->sk_drops);
102594 trace_sock_rcvqueue_full(sk, skb);
102595 return -ENOMEM;
102596 }
102597@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102598 return err;
102599
102600 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102601- atomic_inc(&sk->sk_drops);
102602+ atomic_inc_unchecked(&sk->sk_drops);
102603 return -ENOBUFS;
102604 }
102605
102606@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102607 skb_dst_force(skb);
102608
102609 spin_lock_irqsave(&list->lock, flags);
102610- skb->dropcount = atomic_read(&sk->sk_drops);
102611+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102612 __skb_queue_tail(list, skb);
102613 spin_unlock_irqrestore(&list->lock, flags);
102614
102615@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102616 skb->dev = NULL;
102617
102618 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102619- atomic_inc(&sk->sk_drops);
102620+ atomic_inc_unchecked(&sk->sk_drops);
102621 goto discard_and_relse;
102622 }
102623 if (nested)
102624@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102625 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102626 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102627 bh_unlock_sock(sk);
102628- atomic_inc(&sk->sk_drops);
102629+ atomic_inc_unchecked(&sk->sk_drops);
102630 goto discard_and_relse;
102631 }
102632
102633@@ -910,6 +910,7 @@ set_rcvbuf:
102634 }
102635 break;
102636
102637+#ifndef GRKERNSEC_BPF_HARDEN
102638 case SO_ATTACH_BPF:
102639 ret = -EINVAL;
102640 if (optlen == sizeof(u32)) {
102641@@ -922,7 +923,7 @@ set_rcvbuf:
102642 ret = sk_attach_bpf(ufd, sk);
102643 }
102644 break;
102645-
102646+#endif
102647 case SO_DETACH_FILTER:
102648 ret = sk_detach_filter(sk);
102649 break;
102650@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102651 struct timeval tm;
102652 } v;
102653
102654- int lv = sizeof(int);
102655- int len;
102656+ unsigned int lv = sizeof(int);
102657+ unsigned int len;
102658
102659 if (get_user(len, optlen))
102660 return -EFAULT;
102661- if (len < 0)
102662+ if (len > INT_MAX)
102663 return -EINVAL;
102664
102665 memset(&v, 0, sizeof(v));
102666@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102667
102668 case SO_PEERNAME:
102669 {
102670- char address[128];
102671+ char address[_K_SS_MAXSIZE];
102672
102673 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102674 return -ENOTCONN;
102675- if (lv < len)
102676+ if (lv < len || sizeof address < len)
102677 return -EINVAL;
102678 if (copy_to_user(optval, address, len))
102679 return -EFAULT;
102680@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102681
102682 if (len > lv)
102683 len = lv;
102684- if (copy_to_user(optval, &v, len))
102685+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102686 return -EFAULT;
102687 lenout:
102688 if (put_user(len, optlen))
102689@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102690 */
102691 smp_wmb();
102692 atomic_set(&sk->sk_refcnt, 1);
102693- atomic_set(&sk->sk_drops, 0);
102694+ atomic_set_unchecked(&sk->sk_drops, 0);
102695 }
102696 EXPORT_SYMBOL(sock_init_data);
102697
102698@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102699 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102700 int level, int type)
102701 {
102702+ struct sock_extended_err ee;
102703 struct sock_exterr_skb *serr;
102704 struct sk_buff *skb;
102705 int copied, err;
102706@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102707 sock_recv_timestamp(msg, sk, skb);
102708
102709 serr = SKB_EXT_ERR(skb);
102710- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102711+ ee = serr->ee;
102712+ put_cmsg(msg, level, type, sizeof ee, &ee);
102713
102714 msg->msg_flags |= MSG_ERRQUEUE;
102715 err = copied;
102716diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102717index ad704c7..ca48aff 100644
102718--- a/net/core/sock_diag.c
102719+++ b/net/core/sock_diag.c
102720@@ -9,26 +9,33 @@
102721 #include <linux/inet_diag.h>
102722 #include <linux/sock_diag.h>
102723
102724-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102725+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102726 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102727 static DEFINE_MUTEX(sock_diag_table_mutex);
102728
102729 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102730 {
102731+#ifndef CONFIG_GRKERNSEC_HIDESYM
102732 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102733 cookie[1] != INET_DIAG_NOCOOKIE) &&
102734 ((u32)(unsigned long)sk != cookie[0] ||
102735 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102736 return -ESTALE;
102737 else
102738+#endif
102739 return 0;
102740 }
102741 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102742
102743 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102744 {
102745+#ifdef CONFIG_GRKERNSEC_HIDESYM
102746+ cookie[0] = 0;
102747+ cookie[1] = 0;
102748+#else
102749 cookie[0] = (u32)(unsigned long)sk;
102750 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102751+#endif
102752 }
102753 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102754
102755@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102756 mutex_lock(&sock_diag_table_mutex);
102757 if (sock_diag_handlers[hndl->family])
102758 err = -EBUSY;
102759- else
102760+ else {
102761+ pax_open_kernel();
102762 sock_diag_handlers[hndl->family] = hndl;
102763+ pax_close_kernel();
102764+ }
102765 mutex_unlock(&sock_diag_table_mutex);
102766
102767 return err;
102768@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102769
102770 mutex_lock(&sock_diag_table_mutex);
102771 BUG_ON(sock_diag_handlers[family] != hnld);
102772+ pax_open_kernel();
102773 sock_diag_handlers[family] = NULL;
102774+ pax_close_kernel();
102775 mutex_unlock(&sock_diag_table_mutex);
102776 }
102777 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102778diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102779index 8ce351f..2c388f7 100644
102780--- a/net/core/sysctl_net_core.c
102781+++ b/net/core/sysctl_net_core.c
102782@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102783 {
102784 unsigned int orig_size, size;
102785 int ret, i;
102786- struct ctl_table tmp = {
102787+ ctl_table_no_const tmp = {
102788 .data = &size,
102789 .maxlen = sizeof(size),
102790 .mode = table->mode
102791@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102792 void __user *buffer, size_t *lenp, loff_t *ppos)
102793 {
102794 char id[IFNAMSIZ];
102795- struct ctl_table tbl = {
102796+ ctl_table_no_const tbl = {
102797 .data = id,
102798 .maxlen = IFNAMSIZ,
102799 };
102800@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102801 static int proc_do_rss_key(struct ctl_table *table, int write,
102802 void __user *buffer, size_t *lenp, loff_t *ppos)
102803 {
102804- struct ctl_table fake_table;
102805+ ctl_table_no_const fake_table;
102806 char buf[NETDEV_RSS_KEY_LEN * 3];
102807
102808 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102809@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102810 .mode = 0444,
102811 .proc_handler = proc_do_rss_key,
102812 },
102813-#ifdef CONFIG_BPF_JIT
102814+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102815 {
102816 .procname = "bpf_jit_enable",
102817 .data = &bpf_jit_enable,
102818@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
102819
102820 static __net_init int sysctl_core_net_init(struct net *net)
102821 {
102822- struct ctl_table *tbl;
102823+ ctl_table_no_const *tbl = NULL;
102824
102825 net->core.sysctl_somaxconn = SOMAXCONN;
102826
102827- tbl = netns_core_table;
102828 if (!net_eq(net, &init_net)) {
102829- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102830+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102831 if (tbl == NULL)
102832 goto err_dup;
102833
102834@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102835 if (net->user_ns != &init_user_ns) {
102836 tbl[0].procname = NULL;
102837 }
102838- }
102839-
102840- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102841+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102842+ } else
102843+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102844 if (net->core.sysctl_hdr == NULL)
102845 goto err_reg;
102846
102847 return 0;
102848
102849 err_reg:
102850- if (tbl != netns_core_table)
102851- kfree(tbl);
102852+ kfree(tbl);
102853 err_dup:
102854 return -ENOMEM;
102855 }
102856@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102857 kfree(tbl);
102858 }
102859
102860-static __net_initdata struct pernet_operations sysctl_core_ops = {
102861+static __net_initconst struct pernet_operations sysctl_core_ops = {
102862 .init = sysctl_core_net_init,
102863 .exit = sysctl_core_net_exit,
102864 };
102865diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102866index 8102286..a0c2755 100644
102867--- a/net/decnet/af_decnet.c
102868+++ b/net/decnet/af_decnet.c
102869@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102870 .sysctl_rmem = sysctl_decnet_rmem,
102871 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102872 .obj_size = sizeof(struct dn_sock),
102873+ .slab_flags = SLAB_USERCOPY,
102874 };
102875
102876 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102877diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102878index b2c26b0..41f803e 100644
102879--- a/net/decnet/dn_dev.c
102880+++ b/net/decnet/dn_dev.c
102881@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102882 .extra1 = &min_t3,
102883 .extra2 = &max_t3
102884 },
102885- {0}
102886+ { }
102887 },
102888 };
102889
102890diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102891index 5325b54..a0d4d69 100644
102892--- a/net/decnet/sysctl_net_decnet.c
102893+++ b/net/decnet/sysctl_net_decnet.c
102894@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102895
102896 if (len > *lenp) len = *lenp;
102897
102898- if (copy_to_user(buffer, addr, len))
102899+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102900 return -EFAULT;
102901
102902 *lenp = len;
102903@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102904
102905 if (len > *lenp) len = *lenp;
102906
102907- if (copy_to_user(buffer, devname, len))
102908+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102909 return -EFAULT;
102910
102911 *lenp = len;
102912diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102913index a2c7e4c..3dc9f67 100644
102914--- a/net/hsr/hsr_netlink.c
102915+++ b/net/hsr/hsr_netlink.c
102916@@ -102,7 +102,7 @@ nla_put_failure:
102917 return -EMSGSIZE;
102918 }
102919
102920-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102921+static struct rtnl_link_ops hsr_link_ops = {
102922 .kind = "hsr",
102923 .maxtype = IFLA_HSR_MAX,
102924 .policy = hsr_policy,
102925diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
102926index 055fbb7..c0dbe60 100644
102927--- a/net/ieee802154/6lowpan/core.c
102928+++ b/net/ieee802154/6lowpan/core.c
102929@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102930 dev_put(real_dev);
102931 }
102932
102933-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102934+static struct rtnl_link_ops lowpan_link_ops = {
102935 .kind = "lowpan",
102936 .priv_size = sizeof(struct lowpan_dev_info),
102937 .setup = lowpan_setup,
102938diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
102939index f46e4d1..30231f1 100644
102940--- a/net/ieee802154/6lowpan/reassembly.c
102941+++ b/net/ieee802154/6lowpan/reassembly.c
102942@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102943
102944 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102945 {
102946- struct ctl_table *table;
102947+ ctl_table_no_const *table = NULL;
102948 struct ctl_table_header *hdr;
102949 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102950 net_ieee802154_lowpan(net);
102951
102952- table = lowpan_frags_ns_ctl_table;
102953 if (!net_eq(net, &init_net)) {
102954- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102955+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102956 GFP_KERNEL);
102957 if (table == NULL)
102958 goto err_alloc;
102959@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102960 /* Don't export sysctls to unprivileged users */
102961 if (net->user_ns != &init_user_ns)
102962 table[0].procname = NULL;
102963- }
102964-
102965- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102966+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102967+ } else
102968+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102969 if (hdr == NULL)
102970 goto err_reg;
102971
102972@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102973 return 0;
102974
102975 err_reg:
102976- if (!net_eq(net, &init_net))
102977- kfree(table);
102978+ kfree(table);
102979 err_alloc:
102980 return -ENOMEM;
102981 }
102982diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102983index d2e49ba..f78e8aa 100644
102984--- a/net/ipv4/af_inet.c
102985+++ b/net/ipv4/af_inet.c
102986@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102987 return ip_recv_error(sk, msg, len, addr_len);
102988 #if IS_ENABLED(CONFIG_IPV6)
102989 if (sk->sk_family == AF_INET6)
102990- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102991+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102992 #endif
102993 return -EINVAL;
102994 }
102995diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102996index 3a8985c..9d2a870 100644
102997--- a/net/ipv4/devinet.c
102998+++ b/net/ipv4/devinet.c
102999@@ -69,7 +69,8 @@
103000
103001 static struct ipv4_devconf ipv4_devconf = {
103002 .data = {
103003- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
103004+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
103005+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
103006 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
103007 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
103008 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
103009@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
103010
103011 static struct ipv4_devconf ipv4_devconf_dflt = {
103012 .data = {
103013- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
103014+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
103015+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
103016 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
103017 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
103018 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
103019@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
103020 idx = 0;
103021 head = &net->dev_index_head[h];
103022 rcu_read_lock();
103023- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103024+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103025 net->dev_base_seq;
103026 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103027 if (idx < s_idx)
103028@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
103029 idx = 0;
103030 head = &net->dev_index_head[h];
103031 rcu_read_lock();
103032- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103033+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103034 net->dev_base_seq;
103035 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103036 if (idx < s_idx)
103037@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
103038 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
103039 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
103040
103041-static struct devinet_sysctl_table {
103042+static const struct devinet_sysctl_table {
103043 struct ctl_table_header *sysctl_header;
103044 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
103045 } devinet_sysctl = {
103046@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
103047 int err;
103048 struct ipv4_devconf *all, *dflt;
103049 #ifdef CONFIG_SYSCTL
103050- struct ctl_table *tbl = ctl_forward_entry;
103051+ ctl_table_no_const *tbl = NULL;
103052 struct ctl_table_header *forw_hdr;
103053 #endif
103054
103055@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
103056 goto err_alloc_dflt;
103057
103058 #ifdef CONFIG_SYSCTL
103059- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
103060+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
103061 if (tbl == NULL)
103062 goto err_alloc_ctl;
103063
103064@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
103065 goto err_reg_dflt;
103066
103067 err = -ENOMEM;
103068- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103069+ if (!net_eq(net, &init_net))
103070+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103071+ else
103072+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
103073 if (forw_hdr == NULL)
103074 goto err_reg_ctl;
103075 net->ipv4.forw_hdr = forw_hdr;
103076@@ -2289,8 +2294,7 @@ err_reg_ctl:
103077 err_reg_dflt:
103078 __devinet_sysctl_unregister(all);
103079 err_reg_all:
103080- if (tbl != ctl_forward_entry)
103081- kfree(tbl);
103082+ kfree(tbl);
103083 err_alloc_ctl:
103084 #endif
103085 if (dflt != &ipv4_devconf_dflt)
103086diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
103087index 23b9b3e..60cf0c4 100644
103088--- a/net/ipv4/fib_frontend.c
103089+++ b/net/ipv4/fib_frontend.c
103090@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
103091 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103092 fib_sync_up(dev);
103093 #endif
103094- atomic_inc(&net->ipv4.dev_addr_genid);
103095+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103096 rt_cache_flush(dev_net(dev));
103097 break;
103098 case NETDEV_DOWN:
103099 fib_del_ifaddr(ifa, NULL);
103100- atomic_inc(&net->ipv4.dev_addr_genid);
103101+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103102 if (ifa->ifa_dev->ifa_list == NULL) {
103103 /* Last address was deleted from this interface.
103104 * Disable IP.
103105@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
103106 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103107 fib_sync_up(dev);
103108 #endif
103109- atomic_inc(&net->ipv4.dev_addr_genid);
103110+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103111 rt_cache_flush(net);
103112 break;
103113 case NETDEV_DOWN:
103114diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
103115index 1e2090e..351a724 100644
103116--- a/net/ipv4/fib_semantics.c
103117+++ b/net/ipv4/fib_semantics.c
103118@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
103119 nh->nh_saddr = inet_select_addr(nh->nh_dev,
103120 nh->nh_gw,
103121 nh->nh_parent->fib_scope);
103122- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
103123+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
103124
103125 return nh->nh_saddr;
103126 }
103127diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
103128index ff069f6..335e752 100644
103129--- a/net/ipv4/fou.c
103130+++ b/net/ipv4/fou.c
103131@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
103132
103133 #ifdef CONFIG_NET_FOU_IP_TUNNELS
103134
103135-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
103136+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
103137 .encap_hlen = fou_encap_hlen,
103138 .build_header = fou_build_header,
103139 };
103140
103141-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
103142+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
103143 .encap_hlen = gue_encap_hlen,
103144 .build_header = gue_build_header,
103145 };
103146diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
103147index 9111a4e..3576905 100644
103148--- a/net/ipv4/inet_hashtables.c
103149+++ b/net/ipv4/inet_hashtables.c
103150@@ -18,6 +18,7 @@
103151 #include <linux/sched.h>
103152 #include <linux/slab.h>
103153 #include <linux/wait.h>
103154+#include <linux/security.h>
103155
103156 #include <net/inet_connection_sock.h>
103157 #include <net/inet_hashtables.h>
103158@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
103159 return inet_ehashfn(net, laddr, lport, faddr, fport);
103160 }
103161
103162+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
103163+
103164 /*
103165 * Allocate and initialize a new local port bind bucket.
103166 * The bindhash mutex for snum's hash chain must be held here.
103167@@ -554,6 +557,8 @@ ok:
103168 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
103169 spin_unlock(&head->lock);
103170
103171+ gr_update_task_in_ip_table(inet_sk(sk));
103172+
103173 if (tw) {
103174 inet_twsk_deschedule(tw, death_row);
103175 while (twrefcnt) {
103176diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
103177index 241afd7..31b95d5 100644
103178--- a/net/ipv4/inetpeer.c
103179+++ b/net/ipv4/inetpeer.c
103180@@ -461,7 +461,7 @@ relookup:
103181 if (p) {
103182 p->daddr = *daddr;
103183 atomic_set(&p->refcnt, 1);
103184- atomic_set(&p->rid, 0);
103185+ atomic_set_unchecked(&p->rid, 0);
103186 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
103187 p->rate_tokens = 0;
103188 /* 60*HZ is arbitrary, but chosen enough high so that the first
103189diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
103190index 145a50c..5dd8cc5 100644
103191--- a/net/ipv4/ip_fragment.c
103192+++ b/net/ipv4/ip_fragment.c
103193@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
103194 return 0;
103195
103196 start = qp->rid;
103197- end = atomic_inc_return(&peer->rid);
103198+ end = atomic_inc_return_unchecked(&peer->rid);
103199 qp->rid = end;
103200
103201 rc = qp->q.fragments && (end - start) > max;
103202@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
103203
103204 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103205 {
103206- struct ctl_table *table;
103207+ ctl_table_no_const *table = NULL;
103208 struct ctl_table_header *hdr;
103209
103210- table = ip4_frags_ns_ctl_table;
103211 if (!net_eq(net, &init_net)) {
103212- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103213+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103214 if (table == NULL)
103215 goto err_alloc;
103216
103217@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103218 /* Don't export sysctls to unprivileged users */
103219 if (net->user_ns != &init_user_ns)
103220 table[0].procname = NULL;
103221- }
103222+ hdr = register_net_sysctl(net, "net/ipv4", table);
103223+ } else
103224+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
103225
103226- hdr = register_net_sysctl(net, "net/ipv4", table);
103227 if (hdr == NULL)
103228 goto err_reg;
103229
103230@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103231 return 0;
103232
103233 err_reg:
103234- if (!net_eq(net, &init_net))
103235- kfree(table);
103236+ kfree(table);
103237 err_alloc:
103238 return -ENOMEM;
103239 }
103240diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
103241index 6207275f..00323a2 100644
103242--- a/net/ipv4/ip_gre.c
103243+++ b/net/ipv4/ip_gre.c
103244@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
103245 module_param(log_ecn_error, bool, 0644);
103246 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103247
103248-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
103249+static struct rtnl_link_ops ipgre_link_ops;
103250 static int ipgre_tunnel_init(struct net_device *dev);
103251
103252 static int ipgre_net_id __read_mostly;
103253@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
103254 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
103255 };
103256
103257-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103258+static struct rtnl_link_ops ipgre_link_ops = {
103259 .kind = "gre",
103260 .maxtype = IFLA_GRE_MAX,
103261 .policy = ipgre_policy,
103262@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103263 .get_link_net = ip_tunnel_get_link_net,
103264 };
103265
103266-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
103267+static struct rtnl_link_ops ipgre_tap_ops = {
103268 .kind = "gretap",
103269 .maxtype = IFLA_GRE_MAX,
103270 .policy = ipgre_policy,
103271diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
103272index 3d4da2c..40f9c29 100644
103273--- a/net/ipv4/ip_input.c
103274+++ b/net/ipv4/ip_input.c
103275@@ -147,6 +147,10 @@
103276 #include <linux/mroute.h>
103277 #include <linux/netlink.h>
103278
103279+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103280+extern int grsec_enable_blackhole;
103281+#endif
103282+
103283 /*
103284 * Process Router Attention IP option (RFC 2113)
103285 */
103286@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
103287 if (!raw) {
103288 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
103289 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
103290+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103291+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103292+#endif
103293 icmp_send(skb, ICMP_DEST_UNREACH,
103294 ICMP_PROT_UNREACH, 0);
103295 }
103296diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
103297index 5cd9927..8610b9f 100644
103298--- a/net/ipv4/ip_sockglue.c
103299+++ b/net/ipv4/ip_sockglue.c
103300@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103301 len = min_t(unsigned int, len, opt->optlen);
103302 if (put_user(len, optlen))
103303 return -EFAULT;
103304- if (copy_to_user(optval, opt->__data, len))
103305+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
103306+ copy_to_user(optval, opt->__data, len))
103307 return -EFAULT;
103308 return 0;
103309 }
103310@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103311 if (sk->sk_type != SOCK_STREAM)
103312 return -ENOPROTOOPT;
103313
103314- msg.msg_control = (__force void *) optval;
103315+ msg.msg_control = (__force_kernel void *) optval;
103316 msg.msg_controllen = len;
103317 msg.msg_flags = flags;
103318
103319diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
103320index 94efe14..1453fcc 100644
103321--- a/net/ipv4/ip_vti.c
103322+++ b/net/ipv4/ip_vti.c
103323@@ -45,7 +45,7 @@
103324 #include <net/net_namespace.h>
103325 #include <net/netns/generic.h>
103326
103327-static struct rtnl_link_ops vti_link_ops __read_mostly;
103328+static struct rtnl_link_ops vti_link_ops;
103329
103330 static int vti_net_id __read_mostly;
103331 static int vti_tunnel_init(struct net_device *dev);
103332@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
103333 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
103334 };
103335
103336-static struct rtnl_link_ops vti_link_ops __read_mostly = {
103337+static struct rtnl_link_ops vti_link_ops = {
103338 .kind = "vti",
103339 .maxtype = IFLA_VTI_MAX,
103340 .policy = vti_policy,
103341diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
103342index b26376e..fc3d733 100644
103343--- a/net/ipv4/ipconfig.c
103344+++ b/net/ipv4/ipconfig.c
103345@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
103346
103347 mm_segment_t oldfs = get_fs();
103348 set_fs(get_ds());
103349- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103350+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103351 set_fs(oldfs);
103352 return res;
103353 }
103354@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
103355
103356 mm_segment_t oldfs = get_fs();
103357 set_fs(get_ds());
103358- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103359+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103360 set_fs(oldfs);
103361 return res;
103362 }
103363@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
103364
103365 mm_segment_t oldfs = get_fs();
103366 set_fs(get_ds());
103367- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
103368+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
103369 set_fs(oldfs);
103370 return res;
103371 }
103372diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
103373index 915d215..48d1db7 100644
103374--- a/net/ipv4/ipip.c
103375+++ b/net/ipv4/ipip.c
103376@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103377 static int ipip_net_id __read_mostly;
103378
103379 static int ipip_tunnel_init(struct net_device *dev);
103380-static struct rtnl_link_ops ipip_link_ops __read_mostly;
103381+static struct rtnl_link_ops ipip_link_ops;
103382
103383 static int ipip_err(struct sk_buff *skb, u32 info)
103384 {
103385@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
103386 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
103387 };
103388
103389-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
103390+static struct rtnl_link_ops ipip_link_ops = {
103391 .kind = "ipip",
103392 .maxtype = IFLA_IPTUN_MAX,
103393 .policy = ipip_policy,
103394diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
103395index f95b6f9..2ee2097 100644
103396--- a/net/ipv4/netfilter/arp_tables.c
103397+++ b/net/ipv4/netfilter/arp_tables.c
103398@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
103399 #endif
103400
103401 static int get_info(struct net *net, void __user *user,
103402- const int *len, int compat)
103403+ int len, int compat)
103404 {
103405 char name[XT_TABLE_MAXNAMELEN];
103406 struct xt_table *t;
103407 int ret;
103408
103409- if (*len != sizeof(struct arpt_getinfo)) {
103410- duprintf("length %u != %Zu\n", *len,
103411+ if (len != sizeof(struct arpt_getinfo)) {
103412+ duprintf("length %u != %Zu\n", len,
103413 sizeof(struct arpt_getinfo));
103414 return -EINVAL;
103415 }
103416@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
103417 info.size = private->size;
103418 strcpy(info.name, name);
103419
103420- if (copy_to_user(user, &info, *len) != 0)
103421+ if (copy_to_user(user, &info, len) != 0)
103422 ret = -EFAULT;
103423 else
103424 ret = 0;
103425@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
103426
103427 switch (cmd) {
103428 case ARPT_SO_GET_INFO:
103429- ret = get_info(sock_net(sk), user, len, 1);
103430+ ret = get_info(sock_net(sk), user, *len, 1);
103431 break;
103432 case ARPT_SO_GET_ENTRIES:
103433 ret = compat_get_entries(sock_net(sk), user, len);
103434@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
103435
103436 switch (cmd) {
103437 case ARPT_SO_GET_INFO:
103438- ret = get_info(sock_net(sk), user, len, 0);
103439+ ret = get_info(sock_net(sk), user, *len, 0);
103440 break;
103441
103442 case ARPT_SO_GET_ENTRIES:
103443diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
103444index cf5e82f..75a20f5 100644
103445--- a/net/ipv4/netfilter/ip_tables.c
103446+++ b/net/ipv4/netfilter/ip_tables.c
103447@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
103448 #endif
103449
103450 static int get_info(struct net *net, void __user *user,
103451- const int *len, int compat)
103452+ int len, int compat)
103453 {
103454 char name[XT_TABLE_MAXNAMELEN];
103455 struct xt_table *t;
103456 int ret;
103457
103458- if (*len != sizeof(struct ipt_getinfo)) {
103459- duprintf("length %u != %zu\n", *len,
103460+ if (len != sizeof(struct ipt_getinfo)) {
103461+ duprintf("length %u != %zu\n", len,
103462 sizeof(struct ipt_getinfo));
103463 return -EINVAL;
103464 }
103465@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
103466 info.size = private->size;
103467 strcpy(info.name, name);
103468
103469- if (copy_to_user(user, &info, *len) != 0)
103470+ if (copy_to_user(user, &info, len) != 0)
103471 ret = -EFAULT;
103472 else
103473 ret = 0;
103474@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103475
103476 switch (cmd) {
103477 case IPT_SO_GET_INFO:
103478- ret = get_info(sock_net(sk), user, len, 1);
103479+ ret = get_info(sock_net(sk), user, *len, 1);
103480 break;
103481 case IPT_SO_GET_ENTRIES:
103482 ret = compat_get_entries(sock_net(sk), user, len);
103483@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103484
103485 switch (cmd) {
103486 case IPT_SO_GET_INFO:
103487- ret = get_info(sock_net(sk), user, len, 0);
103488+ ret = get_info(sock_net(sk), user, *len, 0);
103489 break;
103490
103491 case IPT_SO_GET_ENTRIES:
103492diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103493index e90f83a..3e6acca 100644
103494--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
103495+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103496@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
103497 spin_lock_init(&cn->lock);
103498
103499 #ifdef CONFIG_PROC_FS
103500- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
103501+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
103502 if (!cn->procdir) {
103503 pr_err("Unable to proc dir entry\n");
103504 return -ENOMEM;
103505diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
103506index 208d543..ab6c0ba 100644
103507--- a/net/ipv4/ping.c
103508+++ b/net/ipv4/ping.c
103509@@ -59,7 +59,7 @@ struct ping_table {
103510 };
103511
103512 static struct ping_table ping_table;
103513-struct pingv6_ops pingv6_ops;
103514+struct pingv6_ops *pingv6_ops;
103515 EXPORT_SYMBOL_GPL(pingv6_ops);
103516
103517 static u16 ping_port_rover;
103518@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
103519 if (sk_hashed(sk)) {
103520 write_lock_bh(&ping_table.lock);
103521 hlist_nulls_del(&sk->sk_nulls_node);
103522+ sk_nulls_node_init(&sk->sk_nulls_node);
103523 sock_put(sk);
103524 isk->inet_num = 0;
103525 isk->inet_sport = 0;
103526@@ -358,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
103527 return -ENODEV;
103528 }
103529 }
103530- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
103531+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
103532 scoped);
103533 rcu_read_unlock();
103534
103535@@ -566,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103536 }
103537 #if IS_ENABLED(CONFIG_IPV6)
103538 } else if (skb->protocol == htons(ETH_P_IPV6)) {
103539- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
103540+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
103541 #endif
103542 }
103543
103544@@ -584,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103545 info, (u8 *)icmph);
103546 #if IS_ENABLED(CONFIG_IPV6)
103547 } else if (family == AF_INET6) {
103548- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
103549+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
103550 info, (u8 *)icmph);
103551 #endif
103552 }
103553@@ -918,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103554 }
103555
103556 if (inet6_sk(sk)->rxopt.all)
103557- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
103558+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
103559 if (skb->protocol == htons(ETH_P_IPV6) &&
103560 inet6_sk(sk)->rxopt.all)
103561- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
103562+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
103563 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
103564 ip_cmsg_recv(msg, skb);
103565 #endif
103566@@ -1116,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103567 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103568 0, sock_i_ino(sp),
103569 atomic_read(&sp->sk_refcnt), sp,
103570- atomic_read(&sp->sk_drops));
103571+ atomic_read_unchecked(&sp->sk_drops));
103572 }
103573
103574 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103575diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103576index f027a70..2e64edc 100644
103577--- a/net/ipv4/raw.c
103578+++ b/net/ipv4/raw.c
103579@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103580 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103581 {
103582 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103583- atomic_inc(&sk->sk_drops);
103584+ atomic_inc_unchecked(&sk->sk_drops);
103585 kfree_skb(skb);
103586 return NET_RX_DROP;
103587 }
103588@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
103589
103590 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103591 {
103592+ struct icmp_filter filter;
103593+
103594 if (optlen > sizeof(struct icmp_filter))
103595 optlen = sizeof(struct icmp_filter);
103596- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103597+ if (copy_from_user(&filter, optval, optlen))
103598 return -EFAULT;
103599+ raw_sk(sk)->filter = filter;
103600 return 0;
103601 }
103602
103603 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103604 {
103605 int len, ret = -EFAULT;
103606+ struct icmp_filter filter;
103607
103608 if (get_user(len, optlen))
103609 goto out;
103610@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103611 if (len > sizeof(struct icmp_filter))
103612 len = sizeof(struct icmp_filter);
103613 ret = -EFAULT;
103614- if (put_user(len, optlen) ||
103615- copy_to_user(optval, &raw_sk(sk)->filter, len))
103616+ filter = raw_sk(sk)->filter;
103617+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103618 goto out;
103619 ret = 0;
103620 out: return ret;
103621@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103622 0, 0L, 0,
103623 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103624 0, sock_i_ino(sp),
103625- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103626+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103627 }
103628
103629 static int raw_seq_show(struct seq_file *seq, void *v)
103630diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103631index ad50643..53b7b44 100644
103632--- a/net/ipv4/route.c
103633+++ b/net/ipv4/route.c
103634@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103635
103636 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103637 {
103638- return seq_open(file, &rt_cache_seq_ops);
103639+ return seq_open_restrict(file, &rt_cache_seq_ops);
103640 }
103641
103642 static const struct file_operations rt_cache_seq_fops = {
103643@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103644
103645 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103646 {
103647- return seq_open(file, &rt_cpu_seq_ops);
103648+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103649 }
103650
103651 static const struct file_operations rt_cpu_seq_fops = {
103652@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103653
103654 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103655 {
103656- return single_open(file, rt_acct_proc_show, NULL);
103657+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103658 }
103659
103660 static const struct file_operations rt_acct_proc_fops = {
103661@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103662
103663 #define IP_IDENTS_SZ 2048u
103664 struct ip_ident_bucket {
103665- atomic_t id;
103666+ atomic_unchecked_t id;
103667 u32 stamp32;
103668 };
103669
103670-static struct ip_ident_bucket *ip_idents __read_mostly;
103671+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103672
103673 /* In order to protect privacy, we add a perturbation to identifiers
103674 * if one generator is seldom used. This makes hard for an attacker
103675@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103676 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103677 delta = prandom_u32_max(now - old);
103678
103679- return atomic_add_return(segs + delta, &bucket->id) - segs;
103680+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103681 }
103682 EXPORT_SYMBOL(ip_idents_reserve);
103683
103684@@ -2642,34 +2642,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103685 .maxlen = sizeof(int),
103686 .mode = 0200,
103687 .proc_handler = ipv4_sysctl_rtcache_flush,
103688+ .extra1 = &init_net,
103689 },
103690 { },
103691 };
103692
103693 static __net_init int sysctl_route_net_init(struct net *net)
103694 {
103695- struct ctl_table *tbl;
103696+ ctl_table_no_const *tbl = NULL;
103697
103698- tbl = ipv4_route_flush_table;
103699 if (!net_eq(net, &init_net)) {
103700- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103701+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103702 if (tbl == NULL)
103703 goto err_dup;
103704
103705 /* Don't export sysctls to unprivileged users */
103706 if (net->user_ns != &init_user_ns)
103707 tbl[0].procname = NULL;
103708- }
103709- tbl[0].extra1 = net;
103710+ tbl[0].extra1 = net;
103711+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103712+ } else
103713+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103714
103715- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103716 if (net->ipv4.route_hdr == NULL)
103717 goto err_reg;
103718 return 0;
103719
103720 err_reg:
103721- if (tbl != ipv4_route_flush_table)
103722- kfree(tbl);
103723+ kfree(tbl);
103724 err_dup:
103725 return -ENOMEM;
103726 }
103727@@ -2692,8 +2692,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103728
103729 static __net_init int rt_genid_init(struct net *net)
103730 {
103731- atomic_set(&net->ipv4.rt_genid, 0);
103732- atomic_set(&net->fnhe_genid, 0);
103733+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103734+ atomic_set_unchecked(&net->fnhe_genid, 0);
103735 get_random_bytes(&net->ipv4.dev_addr_genid,
103736 sizeof(net->ipv4.dev_addr_genid));
103737 return 0;
103738@@ -2737,11 +2737,7 @@ int __init ip_rt_init(void)
103739 int rc = 0;
103740 int cpu;
103741
103742- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103743- if (!ip_idents)
103744- panic("IP: failed to allocate ip_idents\n");
103745-
103746- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103747+ prandom_bytes(ip_idents, sizeof(ip_idents));
103748
103749 for_each_possible_cpu(cpu) {
103750 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
103751diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103752index d151539..5f5e247 100644
103753--- a/net/ipv4/sysctl_net_ipv4.c
103754+++ b/net/ipv4/sysctl_net_ipv4.c
103755@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103756 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103757 int ret;
103758 int range[2];
103759- struct ctl_table tmp = {
103760+ ctl_table_no_const tmp = {
103761 .data = &range,
103762 .maxlen = sizeof(range),
103763 .mode = table->mode,
103764@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103765 int ret;
103766 gid_t urange[2];
103767 kgid_t low, high;
103768- struct ctl_table tmp = {
103769+ ctl_table_no_const tmp = {
103770 .data = &urange,
103771 .maxlen = sizeof(urange),
103772 .mode = table->mode,
103773@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103774 void __user *buffer, size_t *lenp, loff_t *ppos)
103775 {
103776 char val[TCP_CA_NAME_MAX];
103777- struct ctl_table tbl = {
103778+ ctl_table_no_const tbl = {
103779 .data = val,
103780 .maxlen = TCP_CA_NAME_MAX,
103781 };
103782@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103783 void __user *buffer, size_t *lenp,
103784 loff_t *ppos)
103785 {
103786- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103787+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103788 int ret;
103789
103790 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103791@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103792 void __user *buffer, size_t *lenp,
103793 loff_t *ppos)
103794 {
103795- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103796+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103797 int ret;
103798
103799 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103800@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103801 void __user *buffer, size_t *lenp,
103802 loff_t *ppos)
103803 {
103804- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103805+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103806 struct tcp_fastopen_context *ctxt;
103807 int ret;
103808 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103809@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
103810
103811 static __net_init int ipv4_sysctl_init_net(struct net *net)
103812 {
103813- struct ctl_table *table;
103814+ ctl_table_no_const *table = NULL;
103815
103816- table = ipv4_net_table;
103817 if (!net_eq(net, &init_net)) {
103818 int i;
103819
103820- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103821+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103822 if (table == NULL)
103823 goto err_alloc;
103824
103825@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103826 table[i].data += (void *)net - (void *)&init_net;
103827 }
103828
103829- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103830+ if (!net_eq(net, &init_net))
103831+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103832+ else
103833+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103834 if (net->ipv4.ipv4_hdr == NULL)
103835 goto err_reg;
103836
103837diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
103838index 995a225..e1e9183 100644
103839--- a/net/ipv4/tcp.c
103840+++ b/net/ipv4/tcp.c
103841@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
103842
103843 /* Race breaker. If space is freed after
103844 * wspace test but before the flags are set,
103845- * IO signal will be lost.
103846+ * IO signal will be lost. Memory barrier
103847+ * pairs with the input side.
103848 */
103849+ smp_mb__after_atomic();
103850 if (sk_stream_is_writeable(sk))
103851 mask |= POLLOUT | POLLWRNORM;
103852 }
103853diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103854index f501ac04..0c5a1b2 100644
103855--- a/net/ipv4/tcp_input.c
103856+++ b/net/ipv4/tcp_input.c
103857@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103858 * without any lock. We want to make sure compiler wont store
103859 * intermediate values in this location.
103860 */
103861- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103862+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103863 sk->sk_max_pacing_rate);
103864 }
103865
103866@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103867 * simplifies code)
103868 */
103869 static void
103870-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103871+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103872 struct sk_buff *head, struct sk_buff *tail,
103873 u32 start, u32 end)
103874 {
103875@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
103876 {
103877 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
103878 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
103879+ /* pairs with tcp_poll() */
103880+ smp_mb__after_atomic();
103881 if (sk->sk_socket &&
103882 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
103883 tcp_new_space(sk);
103884@@ -5525,6 +5527,7 @@ discard:
103885 tcp_paws_reject(&tp->rx_opt, 0))
103886 goto discard_and_undo;
103887
103888+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103889 if (th->syn) {
103890 /* We see SYN without ACK. It is attempt of
103891 * simultaneous connect with crossed SYNs.
103892@@ -5575,6 +5578,7 @@ discard:
103893 goto discard;
103894 #endif
103895 }
103896+#endif
103897 /* "fifth, if neither of the SYN or RST bits is set then
103898 * drop the segment and return."
103899 */
103900@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103901 goto discard;
103902
103903 if (th->syn) {
103904- if (th->fin)
103905+ if (th->fin || th->urg || th->psh)
103906 goto discard;
103907 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103908 return 1;
103909diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103910index f1756ee..8908cb0 100644
103911--- a/net/ipv4/tcp_ipv4.c
103912+++ b/net/ipv4/tcp_ipv4.c
103913@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103914 int sysctl_tcp_low_latency __read_mostly;
103915 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103916
103917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103918+extern int grsec_enable_blackhole;
103919+#endif
103920+
103921 #ifdef CONFIG_TCP_MD5SIG
103922 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103923 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103924@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103925 return 0;
103926
103927 reset:
103928+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103929+ if (!grsec_enable_blackhole)
103930+#endif
103931 tcp_v4_send_reset(rsk, skb);
103932 discard:
103933 kfree_skb(skb);
103934@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103935 TCP_SKB_CB(skb)->sacked = 0;
103936
103937 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103938- if (!sk)
103939+ if (!sk) {
103940+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103941+ ret = 1;
103942+#endif
103943 goto no_tcp_socket;
103944-
103945+ }
103946 process:
103947- if (sk->sk_state == TCP_TIME_WAIT)
103948+ if (sk->sk_state == TCP_TIME_WAIT) {
103949+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103950+ ret = 2;
103951+#endif
103952 goto do_time_wait;
103953+ }
103954
103955 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103956 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103957@@ -1700,6 +1714,10 @@ csum_error:
103958 bad_packet:
103959 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103960 } else {
103961+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103962+ if (!grsec_enable_blackhole || (ret == 1 &&
103963+ (skb->dev->flags & IFF_LOOPBACK)))
103964+#endif
103965 tcp_v4_send_reset(NULL, skb);
103966 }
103967
103968diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103969index dd11ac7..c0872da 100644
103970--- a/net/ipv4/tcp_minisocks.c
103971+++ b/net/ipv4/tcp_minisocks.c
103972@@ -27,6 +27,10 @@
103973 #include <net/inet_common.h>
103974 #include <net/xfrm.h>
103975
103976+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103977+extern int grsec_enable_blackhole;
103978+#endif
103979+
103980 int sysctl_tcp_syncookies __read_mostly = 1;
103981 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103982
103983@@ -785,7 +789,10 @@ embryonic_reset:
103984 * avoid becoming vulnerable to outside attack aiming at
103985 * resetting legit local connections.
103986 */
103987- req->rsk_ops->send_reset(sk, skb);
103988+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103989+ if (!grsec_enable_blackhole)
103990+#endif
103991+ req->rsk_ops->send_reset(sk, skb);
103992 } else if (fastopen) { /* received a valid RST pkt */
103993 reqsk_fastopen_remove(sk, req, true);
103994 tcp_reset(sk);
103995diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103996index ebf5ff5..4d1ff32 100644
103997--- a/net/ipv4/tcp_probe.c
103998+++ b/net/ipv4/tcp_probe.c
103999@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
104000 if (cnt + width >= len)
104001 break;
104002
104003- if (copy_to_user(buf + cnt, tbuf, width))
104004+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
104005 return -EFAULT;
104006 cnt += width;
104007 }
104008diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
104009index 0732b78..a82bdc6 100644
104010--- a/net/ipv4/tcp_timer.c
104011+++ b/net/ipv4/tcp_timer.c
104012@@ -22,6 +22,10 @@
104013 #include <linux/gfp.h>
104014 #include <net/tcp.h>
104015
104016+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104017+extern int grsec_lastack_retries;
104018+#endif
104019+
104020 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
104021 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
104022 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
104023@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
104024 }
104025 }
104026
104027+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104028+ if ((sk->sk_state == TCP_LAST_ACK) &&
104029+ (grsec_lastack_retries > 0) &&
104030+ (grsec_lastack_retries < retry_until))
104031+ retry_until = grsec_lastack_retries;
104032+#endif
104033+
104034 if (retransmits_timed_out(sk, retry_until,
104035 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
104036 /* Has it gone just too far? */
104037diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
104038index 97ef1f8b..e446c33 100644
104039--- a/net/ipv4/udp.c
104040+++ b/net/ipv4/udp.c
104041@@ -87,6 +87,7 @@
104042 #include <linux/types.h>
104043 #include <linux/fcntl.h>
104044 #include <linux/module.h>
104045+#include <linux/security.h>
104046 #include <linux/socket.h>
104047 #include <linux/sockios.h>
104048 #include <linux/igmp.h>
104049@@ -114,6 +115,10 @@
104050 #include <net/busy_poll.h>
104051 #include "udp_impl.h"
104052
104053+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104054+extern int grsec_enable_blackhole;
104055+#endif
104056+
104057 struct udp_table udp_table __read_mostly;
104058 EXPORT_SYMBOL(udp_table);
104059
104060@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
104061 return true;
104062 }
104063
104064+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
104065+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
104066+
104067 /*
104068 * This routine is called by the ICMP module when it gets some
104069 * sort of error condition. If err < 0 then the socket should
104070@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104071 dport = usin->sin_port;
104072 if (dport == 0)
104073 return -EINVAL;
104074+
104075+ err = gr_search_udp_sendmsg(sk, usin);
104076+ if (err)
104077+ return err;
104078 } else {
104079 if (sk->sk_state != TCP_ESTABLISHED)
104080 return -EDESTADDRREQ;
104081+
104082+ err = gr_search_udp_sendmsg(sk, NULL);
104083+ if (err)
104084+ return err;
104085+
104086 daddr = inet->inet_daddr;
104087 dport = inet->inet_dport;
104088 /* Open fast path for connected socket.
104089@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
104090 IS_UDPLITE(sk));
104091 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104092 IS_UDPLITE(sk));
104093- atomic_inc(&sk->sk_drops);
104094+ atomic_inc_unchecked(&sk->sk_drops);
104095 __skb_unlink(skb, rcvq);
104096 __skb_queue_tail(&list_kill, skb);
104097 }
104098@@ -1275,6 +1292,10 @@ try_again:
104099 if (!skb)
104100 goto out;
104101
104102+ err = gr_search_udp_recvmsg(sk, skb);
104103+ if (err)
104104+ goto out_free;
104105+
104106 ulen = skb->len - sizeof(struct udphdr);
104107 copied = len;
104108 if (copied > ulen)
104109@@ -1307,7 +1328,7 @@ try_again:
104110 if (unlikely(err)) {
104111 trace_kfree_skb(skb, udp_recvmsg);
104112 if (!peeked) {
104113- atomic_inc(&sk->sk_drops);
104114+ atomic_inc_unchecked(&sk->sk_drops);
104115 UDP_INC_STATS_USER(sock_net(sk),
104116 UDP_MIB_INERRORS, is_udplite);
104117 }
104118@@ -1605,7 +1626,7 @@ csum_error:
104119 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104120 drop:
104121 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104122- atomic_inc(&sk->sk_drops);
104123+ atomic_inc_unchecked(&sk->sk_drops);
104124 kfree_skb(skb);
104125 return -1;
104126 }
104127@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104128 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104129
104130 if (!skb1) {
104131- atomic_inc(&sk->sk_drops);
104132+ atomic_inc_unchecked(&sk->sk_drops);
104133 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104134 IS_UDPLITE(sk));
104135 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104136@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104137 goto csum_error;
104138
104139 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104140+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104141+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104142+#endif
104143 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
104144
104145 /*
104146@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
104147 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104148 0, sock_i_ino(sp),
104149 atomic_read(&sp->sk_refcnt), sp,
104150- atomic_read(&sp->sk_drops));
104151+ atomic_read_unchecked(&sp->sk_drops));
104152 }
104153
104154 int udp4_seq_show(struct seq_file *seq, void *v)
104155diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
104156index 6156f68..d6ab46d 100644
104157--- a/net/ipv4/xfrm4_policy.c
104158+++ b/net/ipv4/xfrm4_policy.c
104159@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104160 fl4->flowi4_tos = iph->tos;
104161 }
104162
104163-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
104164+static int xfrm4_garbage_collect(struct dst_ops *ops)
104165 {
104166 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
104167
104168- xfrm4_policy_afinfo.garbage_collect(net);
104169+ xfrm_garbage_collect_deferred(net);
104170 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
104171 }
104172
104173@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
104174
104175 static int __net_init xfrm4_net_init(struct net *net)
104176 {
104177- struct ctl_table *table;
104178+ ctl_table_no_const *table = NULL;
104179 struct ctl_table_header *hdr;
104180
104181- table = xfrm4_policy_table;
104182 if (!net_eq(net, &init_net)) {
104183- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104184+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104185 if (!table)
104186 goto err_alloc;
104187
104188 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
104189- }
104190-
104191- hdr = register_net_sysctl(net, "net/ipv4", table);
104192+ hdr = register_net_sysctl(net, "net/ipv4", table);
104193+ } else
104194+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
104195 if (!hdr)
104196 goto err_reg;
104197
104198@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
104199 return 0;
104200
104201 err_reg:
104202- if (!net_eq(net, &init_net))
104203- kfree(table);
104204+ kfree(table);
104205 err_alloc:
104206 return -ENOMEM;
104207 }
104208diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
104209index b603002..0de5c88 100644
104210--- a/net/ipv6/addrconf.c
104211+++ b/net/ipv6/addrconf.c
104212@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
104213 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
104214 .mtu6 = IPV6_MIN_MTU,
104215 .accept_ra = 1,
104216- .accept_redirects = 1,
104217+ .accept_redirects = 0,
104218 .autoconf = 1,
104219 .force_mld_version = 0,
104220 .mldv1_unsolicited_report_interval = 10 * HZ,
104221@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
104222 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
104223 .mtu6 = IPV6_MIN_MTU,
104224 .accept_ra = 1,
104225- .accept_redirects = 1,
104226+ .accept_redirects = 0,
104227 .autoconf = 1,
104228 .force_mld_version = 0,
104229 .mldv1_unsolicited_report_interval = 10 * HZ,
104230@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
104231 idx = 0;
104232 head = &net->dev_index_head[h];
104233 rcu_read_lock();
104234- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
104235+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
104236 net->dev_base_seq;
104237 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104238 if (idx < s_idx)
104239@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
104240 p.iph.ihl = 5;
104241 p.iph.protocol = IPPROTO_IPV6;
104242 p.iph.ttl = 64;
104243- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
104244+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
104245
104246 if (ops->ndo_do_ioctl) {
104247 mm_segment_t oldfs = get_fs();
104248@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
104249 .release = seq_release_net,
104250 };
104251
104252+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
104253+extern void unregister_ipv6_seq_ops_addr(void);
104254+
104255 static int __net_init if6_proc_net_init(struct net *net)
104256 {
104257- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
104258+ register_ipv6_seq_ops_addr(&if6_seq_ops);
104259+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
104260+ unregister_ipv6_seq_ops_addr();
104261 return -ENOMEM;
104262+ }
104263 return 0;
104264 }
104265
104266 static void __net_exit if6_proc_net_exit(struct net *net)
104267 {
104268 remove_proc_entry("if_inet6", net->proc_net);
104269+ unregister_ipv6_seq_ops_addr();
104270 }
104271
104272 static struct pernet_operations if6_proc_net_ops = {
104273@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
104274 s_ip_idx = ip_idx = cb->args[2];
104275
104276 rcu_read_lock();
104277- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104278+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104279 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
104280 idx = 0;
104281 head = &net->dev_index_head[h];
104282@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104283 rt_genid_bump_ipv6(net);
104284 break;
104285 }
104286- atomic_inc(&net->ipv6.dev_addr_genid);
104287+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
104288 }
104289
104290 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104291@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
104292 int *valp = ctl->data;
104293 int val = *valp;
104294 loff_t pos = *ppos;
104295- struct ctl_table lctl;
104296+ ctl_table_no_const lctl;
104297 int ret;
104298
104299 /*
104300@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
104301 {
104302 struct inet6_dev *idev = ctl->extra1;
104303 int min_mtu = IPV6_MIN_MTU;
104304- struct ctl_table lctl;
104305+ ctl_table_no_const lctl;
104306
104307 lctl = *ctl;
104308 lctl.extra1 = &min_mtu;
104309@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
104310 int *valp = ctl->data;
104311 int val = *valp;
104312 loff_t pos = *ppos;
104313- struct ctl_table lctl;
104314+ ctl_table_no_const lctl;
104315 int ret;
104316
104317 /*
104318diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
104319index e8c4400..a4cd5da 100644
104320--- a/net/ipv6/af_inet6.c
104321+++ b/net/ipv6/af_inet6.c
104322@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
104323 net->ipv6.sysctl.icmpv6_time = 1*HZ;
104324 net->ipv6.sysctl.flowlabel_consistency = 1;
104325 net->ipv6.sysctl.auto_flowlabels = 0;
104326- atomic_set(&net->ipv6.fib6_sernum, 1);
104327+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
104328
104329 err = ipv6_init_mibs(net);
104330 if (err)
104331diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
104332index ace8dac..bd6942d 100644
104333--- a/net/ipv6/datagram.c
104334+++ b/net/ipv6/datagram.c
104335@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
104336 0,
104337 sock_i_ino(sp),
104338 atomic_read(&sp->sk_refcnt), sp,
104339- atomic_read(&sp->sk_drops));
104340+ atomic_read_unchecked(&sp->sk_drops));
104341 }
104342diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
104343index a5e9519..16b7412 100644
104344--- a/net/ipv6/icmp.c
104345+++ b/net/ipv6/icmp.c
104346@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
104347
104348 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
104349 {
104350- struct ctl_table *table;
104351+ ctl_table_no_const *table;
104352
104353 table = kmemdup(ipv6_icmp_table_template,
104354 sizeof(ipv6_icmp_table_template),
104355diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
104356index 263ef41..88c7be8 100644
104357--- a/net/ipv6/ip6_fib.c
104358+++ b/net/ipv6/ip6_fib.c
104359@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
104360 int new, old;
104361
104362 do {
104363- old = atomic_read(&net->ipv6.fib6_sernum);
104364+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
104365 new = old < INT_MAX ? old + 1 : 1;
104366- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
104367+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
104368 old, new) != old);
104369 return new;
104370 }
104371diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
104372index bc28b7d..a08feea 100644
104373--- a/net/ipv6/ip6_gre.c
104374+++ b/net/ipv6/ip6_gre.c
104375@@ -71,8 +71,8 @@ struct ip6gre_net {
104376 struct net_device *fb_tunnel_dev;
104377 };
104378
104379-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
104380-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
104381+static struct rtnl_link_ops ip6gre_link_ops;
104382+static struct rtnl_link_ops ip6gre_tap_ops;
104383 static int ip6gre_tunnel_init(struct net_device *dev);
104384 static void ip6gre_tunnel_setup(struct net_device *dev);
104385 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
104386@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
104387 }
104388
104389
104390-static struct inet6_protocol ip6gre_protocol __read_mostly = {
104391+static struct inet6_protocol ip6gre_protocol = {
104392 .handler = ip6gre_rcv,
104393 .err_handler = ip6gre_err,
104394 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
104395@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
104396 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
104397 };
104398
104399-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104400+static struct rtnl_link_ops ip6gre_link_ops = {
104401 .kind = "ip6gre",
104402 .maxtype = IFLA_GRE_MAX,
104403 .policy = ip6gre_policy,
104404@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104405 .get_link_net = ip6_tnl_get_link_net,
104406 };
104407
104408-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
104409+static struct rtnl_link_ops ip6gre_tap_ops = {
104410 .kind = "ip6gretap",
104411 .maxtype = IFLA_GRE_MAX,
104412 .policy = ip6gre_policy,
104413diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
104414index ddd94ec..b7cfefb 100644
104415--- a/net/ipv6/ip6_tunnel.c
104416+++ b/net/ipv6/ip6_tunnel.c
104417@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104418
104419 static int ip6_tnl_dev_init(struct net_device *dev);
104420 static void ip6_tnl_dev_setup(struct net_device *dev);
104421-static struct rtnl_link_ops ip6_link_ops __read_mostly;
104422+static struct rtnl_link_ops ip6_link_ops;
104423
104424 static int ip6_tnl_net_id __read_mostly;
104425 struct ip6_tnl_net {
104426@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
104427 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
104428 };
104429
104430-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
104431+static struct rtnl_link_ops ip6_link_ops = {
104432 .kind = "ip6tnl",
104433 .maxtype = IFLA_IPTUN_MAX,
104434 .policy = ip6_tnl_policy,
104435diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
104436index 5fb9e21..92bf04b 100644
104437--- a/net/ipv6/ip6_vti.c
104438+++ b/net/ipv6/ip6_vti.c
104439@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104440
104441 static int vti6_dev_init(struct net_device *dev);
104442 static void vti6_dev_setup(struct net_device *dev);
104443-static struct rtnl_link_ops vti6_link_ops __read_mostly;
104444+static struct rtnl_link_ops vti6_link_ops;
104445
104446 static int vti6_net_id __read_mostly;
104447 struct vti6_net {
104448@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
104449 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
104450 };
104451
104452-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
104453+static struct rtnl_link_ops vti6_link_ops = {
104454 .kind = "vti6",
104455 .maxtype = IFLA_VTI_MAX,
104456 .policy = vti6_policy,
104457diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
104458index 8d766d9..dcdfea7 100644
104459--- a/net/ipv6/ipv6_sockglue.c
104460+++ b/net/ipv6/ipv6_sockglue.c
104461@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
104462 if (sk->sk_type != SOCK_STREAM)
104463 return -ENOPROTOOPT;
104464
104465- msg.msg_control = optval;
104466+ msg.msg_control = (void __force_kernel *)optval;
104467 msg.msg_controllen = len;
104468 msg.msg_flags = flags;
104469
104470diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
104471index bb00c6f..16c90d7 100644
104472--- a/net/ipv6/netfilter/ip6_tables.c
104473+++ b/net/ipv6/netfilter/ip6_tables.c
104474@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
104475 #endif
104476
104477 static int get_info(struct net *net, void __user *user,
104478- const int *len, int compat)
104479+ int len, int compat)
104480 {
104481 char name[XT_TABLE_MAXNAMELEN];
104482 struct xt_table *t;
104483 int ret;
104484
104485- if (*len != sizeof(struct ip6t_getinfo)) {
104486- duprintf("length %u != %zu\n", *len,
104487+ if (len != sizeof(struct ip6t_getinfo)) {
104488+ duprintf("length %u != %zu\n", len,
104489 sizeof(struct ip6t_getinfo));
104490 return -EINVAL;
104491 }
104492@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
104493 info.size = private->size;
104494 strcpy(info.name, name);
104495
104496- if (copy_to_user(user, &info, *len) != 0)
104497+ if (copy_to_user(user, &info, len) != 0)
104498 ret = -EFAULT;
104499 else
104500 ret = 0;
104501@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104502
104503 switch (cmd) {
104504 case IP6T_SO_GET_INFO:
104505- ret = get_info(sock_net(sk), user, len, 1);
104506+ ret = get_info(sock_net(sk), user, *len, 1);
104507 break;
104508 case IP6T_SO_GET_ENTRIES:
104509 ret = compat_get_entries(sock_net(sk), user, len);
104510@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104511
104512 switch (cmd) {
104513 case IP6T_SO_GET_INFO:
104514- ret = get_info(sock_net(sk), user, len, 0);
104515+ ret = get_info(sock_net(sk), user, *len, 0);
104516 break;
104517
104518 case IP6T_SO_GET_ENTRIES:
104519diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
104520index 6f187c8..34b367f 100644
104521--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
104522+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
104523@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
104524
104525 static int nf_ct_frag6_sysctl_register(struct net *net)
104526 {
104527- struct ctl_table *table;
104528+ ctl_table_no_const *table = NULL;
104529 struct ctl_table_header *hdr;
104530
104531- table = nf_ct_frag6_sysctl_table;
104532 if (!net_eq(net, &init_net)) {
104533- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
104534+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
104535 GFP_KERNEL);
104536 if (table == NULL)
104537 goto err_alloc;
104538@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104539 table[2].data = &net->nf_frag.frags.high_thresh;
104540 table[2].extra1 = &net->nf_frag.frags.low_thresh;
104541 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
104542- }
104543-
104544- hdr = register_net_sysctl(net, "net/netfilter", table);
104545+ hdr = register_net_sysctl(net, "net/netfilter", table);
104546+ } else
104547+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
104548 if (hdr == NULL)
104549 goto err_reg;
104550
104551@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104552 return 0;
104553
104554 err_reg:
104555- if (!net_eq(net, &init_net))
104556- kfree(table);
104557+ kfree(table);
104558 err_alloc:
104559 return -ENOMEM;
104560 }
104561diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
104562index a2dfff6..1e52e6d 100644
104563--- a/net/ipv6/ping.c
104564+++ b/net/ipv6/ping.c
104565@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
104566 };
104567 #endif
104568
104569+static struct pingv6_ops real_pingv6_ops = {
104570+ .ipv6_recv_error = ipv6_recv_error,
104571+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
104572+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
104573+ .icmpv6_err_convert = icmpv6_err_convert,
104574+ .ipv6_icmp_error = ipv6_icmp_error,
104575+ .ipv6_chk_addr = ipv6_chk_addr,
104576+};
104577+
104578+static struct pingv6_ops dummy_pingv6_ops = {
104579+ .ipv6_recv_error = dummy_ipv6_recv_error,
104580+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
104581+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
104582+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
104583+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
104584+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
104585+};
104586+
104587 int __init pingv6_init(void)
104588 {
104589 #ifdef CONFIG_PROC_FS
104590@@ -248,13 +266,7 @@ int __init pingv6_init(void)
104591 if (ret)
104592 return ret;
104593 #endif
104594- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104595- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104596- pingv6_ops.ip6_datagram_recv_specific_ctl =
104597- ip6_datagram_recv_specific_ctl;
104598- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104599- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104600- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104601+ pingv6_ops = &real_pingv6_ops;
104602 return inet6_register_protosw(&pingv6_protosw);
104603 }
104604
104605@@ -263,14 +275,9 @@ int __init pingv6_init(void)
104606 */
104607 void pingv6_exit(void)
104608 {
104609- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104610- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104611- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104612- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104613- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104614- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104615 #ifdef CONFIG_PROC_FS
104616 unregister_pernet_subsys(&ping_v6_net_ops);
104617 #endif
104618+ pingv6_ops = &dummy_pingv6_ops;
104619 inet6_unregister_protosw(&pingv6_protosw);
104620 }
104621diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104622index 679253d0..70b653c 100644
104623--- a/net/ipv6/proc.c
104624+++ b/net/ipv6/proc.c
104625@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104626 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104627 goto proc_snmp6_fail;
104628
104629- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104630+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104631 if (!net->mib.proc_net_devsnmp6)
104632 goto proc_dev_snmp6_fail;
104633 return 0;
104634diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104635index dae7f1a..783b20d 100644
104636--- a/net/ipv6/raw.c
104637+++ b/net/ipv6/raw.c
104638@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104639 {
104640 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104641 skb_checksum_complete(skb)) {
104642- atomic_inc(&sk->sk_drops);
104643+ atomic_inc_unchecked(&sk->sk_drops);
104644 kfree_skb(skb);
104645 return NET_RX_DROP;
104646 }
104647@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104648 struct raw6_sock *rp = raw6_sk(sk);
104649
104650 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104651- atomic_inc(&sk->sk_drops);
104652+ atomic_inc_unchecked(&sk->sk_drops);
104653 kfree_skb(skb);
104654 return NET_RX_DROP;
104655 }
104656@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104657
104658 if (inet->hdrincl) {
104659 if (skb_checksum_complete(skb)) {
104660- atomic_inc(&sk->sk_drops);
104661+ atomic_inc_unchecked(&sk->sk_drops);
104662 kfree_skb(skb);
104663 return NET_RX_DROP;
104664 }
104665@@ -609,7 +609,7 @@ out:
104666 return err;
104667 }
104668
104669-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
104670+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
104671 struct flowi6 *fl6, struct dst_entry **dstp,
104672 unsigned int flags)
104673 {
104674@@ -915,12 +915,15 @@ do_confirm:
104675 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104676 char __user *optval, int optlen)
104677 {
104678+ struct icmp6_filter filter;
104679+
104680 switch (optname) {
104681 case ICMPV6_FILTER:
104682 if (optlen > sizeof(struct icmp6_filter))
104683 optlen = sizeof(struct icmp6_filter);
104684- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104685+ if (copy_from_user(&filter, optval, optlen))
104686 return -EFAULT;
104687+ raw6_sk(sk)->filter = filter;
104688 return 0;
104689 default:
104690 return -ENOPROTOOPT;
104691@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104692 char __user *optval, int __user *optlen)
104693 {
104694 int len;
104695+ struct icmp6_filter filter;
104696
104697 switch (optname) {
104698 case ICMPV6_FILTER:
104699@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104700 len = sizeof(struct icmp6_filter);
104701 if (put_user(len, optlen))
104702 return -EFAULT;
104703- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104704+ filter = raw6_sk(sk)->filter;
104705+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104706 return -EFAULT;
104707 return 0;
104708 default:
104709diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104710index d7d70e6..bd5e9fc 100644
104711--- a/net/ipv6/reassembly.c
104712+++ b/net/ipv6/reassembly.c
104713@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104714
104715 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104716 {
104717- struct ctl_table *table;
104718+ ctl_table_no_const *table = NULL;
104719 struct ctl_table_header *hdr;
104720
104721- table = ip6_frags_ns_ctl_table;
104722 if (!net_eq(net, &init_net)) {
104723- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104724+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104725 if (table == NULL)
104726 goto err_alloc;
104727
104728@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104729 /* Don't export sysctls to unprivileged users */
104730 if (net->user_ns != &init_user_ns)
104731 table[0].procname = NULL;
104732- }
104733+ hdr = register_net_sysctl(net, "net/ipv6", table);
104734+ } else
104735+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104736
104737- hdr = register_net_sysctl(net, "net/ipv6", table);
104738 if (hdr == NULL)
104739 goto err_reg;
104740
104741@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104742 return 0;
104743
104744 err_reg:
104745- if (!net_eq(net, &init_net))
104746- kfree(table);
104747+ kfree(table);
104748 err_alloc:
104749 return -ENOMEM;
104750 }
104751diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104752index 4688bd4..584453d 100644
104753--- a/net/ipv6/route.c
104754+++ b/net/ipv6/route.c
104755@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
104756
104757 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104758 {
104759- struct ctl_table *table;
104760+ ctl_table_no_const *table;
104761
104762 table = kmemdup(ipv6_route_table_template,
104763 sizeof(ipv6_route_table_template),
104764diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104765index e4cbd57..02b1aaa 100644
104766--- a/net/ipv6/sit.c
104767+++ b/net/ipv6/sit.c
104768@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104769 static void ipip6_dev_free(struct net_device *dev);
104770 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104771 __be32 *v4dst);
104772-static struct rtnl_link_ops sit_link_ops __read_mostly;
104773+static struct rtnl_link_ops sit_link_ops;
104774
104775 static int sit_net_id __read_mostly;
104776 struct sit_net {
104777@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104778 unregister_netdevice_queue(dev, head);
104779 }
104780
104781-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104782+static struct rtnl_link_ops sit_link_ops = {
104783 .kind = "sit",
104784 .maxtype = IFLA_IPTUN_MAX,
104785 .policy = ipip6_policy,
104786diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104787index c5c10fa..2577d51 100644
104788--- a/net/ipv6/sysctl_net_ipv6.c
104789+++ b/net/ipv6/sysctl_net_ipv6.c
104790@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104791
104792 static int __net_init ipv6_sysctl_net_init(struct net *net)
104793 {
104794- struct ctl_table *ipv6_table;
104795+ ctl_table_no_const *ipv6_table;
104796 struct ctl_table *ipv6_route_table;
104797 struct ctl_table *ipv6_icmp_table;
104798 int err;
104799diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104800index 1f5e622..8387d90 100644
104801--- a/net/ipv6/tcp_ipv6.c
104802+++ b/net/ipv6/tcp_ipv6.c
104803@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104804 }
104805 }
104806
104807+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104808+extern int grsec_enable_blackhole;
104809+#endif
104810+
104811 static void tcp_v6_hash(struct sock *sk)
104812 {
104813 if (sk->sk_state != TCP_CLOSE) {
104814@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104815 return 0;
104816
104817 reset:
104818+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104819+ if (!grsec_enable_blackhole)
104820+#endif
104821 tcp_v6_send_reset(sk, skb);
104822 discard:
104823 if (opt_skb)
104824@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104825
104826 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104827 inet6_iif(skb));
104828- if (!sk)
104829+ if (!sk) {
104830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104831+ ret = 1;
104832+#endif
104833 goto no_tcp_socket;
104834+ }
104835
104836 process:
104837- if (sk->sk_state == TCP_TIME_WAIT)
104838+ if (sk->sk_state == TCP_TIME_WAIT) {
104839+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104840+ ret = 2;
104841+#endif
104842 goto do_time_wait;
104843+ }
104844
104845 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104846 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104847@@ -1510,6 +1525,10 @@ csum_error:
104848 bad_packet:
104849 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104850 } else {
104851+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104852+ if (!grsec_enable_blackhole || (ret == 1 &&
104853+ (skb->dev->flags & IFF_LOOPBACK)))
104854+#endif
104855 tcp_v6_send_reset(NULL, skb);
104856 }
104857
104858diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104859index d048d46..bf141c3 100644
104860--- a/net/ipv6/udp.c
104861+++ b/net/ipv6/udp.c
104862@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104863 udp_ipv6_hash_secret + net_hash_mix(net));
104864 }
104865
104866+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104867+extern int grsec_enable_blackhole;
104868+#endif
104869+
104870 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104871 {
104872 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104873@@ -448,7 +452,7 @@ try_again:
104874 if (unlikely(err)) {
104875 trace_kfree_skb(skb, udpv6_recvmsg);
104876 if (!peeked) {
104877- atomic_inc(&sk->sk_drops);
104878+ atomic_inc_unchecked(&sk->sk_drops);
104879 if (is_udp4)
104880 UDP_INC_STATS_USER(sock_net(sk),
104881 UDP_MIB_INERRORS,
104882@@ -714,7 +718,7 @@ csum_error:
104883 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104884 drop:
104885 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104886- atomic_inc(&sk->sk_drops);
104887+ atomic_inc_unchecked(&sk->sk_drops);
104888 kfree_skb(skb);
104889 return -1;
104890 }
104891@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104892 if (likely(skb1 == NULL))
104893 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104894 if (!skb1) {
104895- atomic_inc(&sk->sk_drops);
104896+ atomic_inc_unchecked(&sk->sk_drops);
104897 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104898 IS_UDPLITE(sk));
104899 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104900@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104901 goto csum_error;
104902
104903 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104904+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104905+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104906+#endif
104907 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104908
104909 kfree_skb(skb);
104910diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104911index 8d2d01b4..313511e 100644
104912--- a/net/ipv6/xfrm6_policy.c
104913+++ b/net/ipv6/xfrm6_policy.c
104914@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104915 }
104916 }
104917
104918-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104919+static int xfrm6_garbage_collect(struct dst_ops *ops)
104920 {
104921 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104922
104923- xfrm6_policy_afinfo.garbage_collect(net);
104924+ xfrm_garbage_collect_deferred(net);
104925 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104926 }
104927
104928@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104929
104930 static int __net_init xfrm6_net_init(struct net *net)
104931 {
104932- struct ctl_table *table;
104933+ ctl_table_no_const *table = NULL;
104934 struct ctl_table_header *hdr;
104935
104936- table = xfrm6_policy_table;
104937 if (!net_eq(net, &init_net)) {
104938- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104939+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104940 if (!table)
104941 goto err_alloc;
104942
104943 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104944- }
104945+ hdr = register_net_sysctl(net, "net/ipv6", table);
104946+ } else
104947+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104948
104949- hdr = register_net_sysctl(net, "net/ipv6", table);
104950 if (!hdr)
104951 goto err_reg;
104952
104953@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104954 return 0;
104955
104956 err_reg:
104957- if (!net_eq(net, &init_net))
104958- kfree(table);
104959+ kfree(table);
104960 err_alloc:
104961 return -ENOMEM;
104962 }
104963diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104964index c1d247e..9e5949d 100644
104965--- a/net/ipx/ipx_proc.c
104966+++ b/net/ipx/ipx_proc.c
104967@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104968 struct proc_dir_entry *p;
104969 int rc = -ENOMEM;
104970
104971- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104972+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104973
104974 if (!ipx_proc_dir)
104975 goto out;
104976diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104977index 683346d..cb0e12d 100644
104978--- a/net/irda/ircomm/ircomm_tty.c
104979+++ b/net/irda/ircomm/ircomm_tty.c
104980@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104981 add_wait_queue(&port->open_wait, &wait);
104982
104983 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104984- __FILE__, __LINE__, tty->driver->name, port->count);
104985+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104986
104987 spin_lock_irqsave(&port->lock, flags);
104988- port->count--;
104989+ atomic_dec(&port->count);
104990 port->blocked_open++;
104991 spin_unlock_irqrestore(&port->lock, flags);
104992
104993@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104994 }
104995
104996 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104997- __FILE__, __LINE__, tty->driver->name, port->count);
104998+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104999
105000 schedule();
105001 }
105002@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105003
105004 spin_lock_irqsave(&port->lock, flags);
105005 if (!tty_hung_up_p(filp))
105006- port->count++;
105007+ atomic_inc(&port->count);
105008 port->blocked_open--;
105009 spin_unlock_irqrestore(&port->lock, flags);
105010
105011 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
105012- __FILE__, __LINE__, tty->driver->name, port->count);
105013+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105014
105015 if (!retval)
105016 port->flags |= ASYNC_NORMAL_ACTIVE;
105017@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
105018
105019 /* ++ is not atomic, so this should be protected - Jean II */
105020 spin_lock_irqsave(&self->port.lock, flags);
105021- self->port.count++;
105022+ atomic_inc(&self->port.count);
105023 spin_unlock_irqrestore(&self->port.lock, flags);
105024 tty_port_tty_set(&self->port, tty);
105025
105026 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
105027- self->line, self->port.count);
105028+ self->line, atomic_read(&self->port.count));
105029
105030 /* Not really used by us, but lets do it anyway */
105031 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
105032@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
105033 tty_kref_put(port->tty);
105034 }
105035 port->tty = NULL;
105036- port->count = 0;
105037+ atomic_set(&port->count, 0);
105038 spin_unlock_irqrestore(&port->lock, flags);
105039
105040 wake_up_interruptible(&port->open_wait);
105041@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
105042 seq_putc(m, '\n');
105043
105044 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
105045- seq_printf(m, "Open count: %d\n", self->port.count);
105046+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
105047 seq_printf(m, "Max data size: %d\n", self->max_data_size);
105048 seq_printf(m, "Max header size: %d\n", self->max_header_size);
105049
105050diff --git a/net/irda/irproc.c b/net/irda/irproc.c
105051index b9ac598..f88cc56 100644
105052--- a/net/irda/irproc.c
105053+++ b/net/irda/irproc.c
105054@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
105055 {
105056 int i;
105057
105058- proc_irda = proc_mkdir("irda", init_net.proc_net);
105059+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
105060 if (proc_irda == NULL)
105061 return;
105062
105063diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
105064index 53d9311..cbaf99f 100644
105065--- a/net/iucv/af_iucv.c
105066+++ b/net/iucv/af_iucv.c
105067@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
105068 {
105069 char name[12];
105070
105071- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
105072+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105073 while (__iucv_get_sock_by_name(name)) {
105074 sprintf(name, "%08x",
105075- atomic_inc_return(&iucv_sk_list.autobind_name));
105076+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105077 }
105078 memcpy(iucv->src_name, name, 8);
105079 }
105080diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
105081index 2a6a1fd..6c112b0 100644
105082--- a/net/iucv/iucv.c
105083+++ b/net/iucv/iucv.c
105084@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
105085 return NOTIFY_OK;
105086 }
105087
105088-static struct notifier_block __refdata iucv_cpu_notifier = {
105089+static struct notifier_block iucv_cpu_notifier = {
105090 .notifier_call = iucv_cpu_notify,
105091 };
105092
105093diff --git a/net/key/af_key.c b/net/key/af_key.c
105094index f8ac939..1e189bf 100644
105095--- a/net/key/af_key.c
105096+++ b/net/key/af_key.c
105097@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
105098 static u32 get_acqseq(void)
105099 {
105100 u32 res;
105101- static atomic_t acqseq;
105102+ static atomic_unchecked_t acqseq;
105103
105104 do {
105105- res = atomic_inc_return(&acqseq);
105106+ res = atomic_inc_return_unchecked(&acqseq);
105107 } while (!res);
105108 return res;
105109 }
105110diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
105111index 781b3a2..73a7434 100644
105112--- a/net/l2tp/l2tp_eth.c
105113+++ b/net/l2tp/l2tp_eth.c
105114@@ -42,12 +42,12 @@ struct l2tp_eth {
105115 struct sock *tunnel_sock;
105116 struct l2tp_session *session;
105117 struct list_head list;
105118- atomic_long_t tx_bytes;
105119- atomic_long_t tx_packets;
105120- atomic_long_t tx_dropped;
105121- atomic_long_t rx_bytes;
105122- atomic_long_t rx_packets;
105123- atomic_long_t rx_errors;
105124+ atomic_long_unchecked_t tx_bytes;
105125+ atomic_long_unchecked_t tx_packets;
105126+ atomic_long_unchecked_t tx_dropped;
105127+ atomic_long_unchecked_t rx_bytes;
105128+ atomic_long_unchecked_t rx_packets;
105129+ atomic_long_unchecked_t rx_errors;
105130 };
105131
105132 /* via l2tp_session_priv() */
105133@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
105134 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
105135
105136 if (likely(ret == NET_XMIT_SUCCESS)) {
105137- atomic_long_add(len, &priv->tx_bytes);
105138- atomic_long_inc(&priv->tx_packets);
105139+ atomic_long_add_unchecked(len, &priv->tx_bytes);
105140+ atomic_long_inc_unchecked(&priv->tx_packets);
105141 } else {
105142- atomic_long_inc(&priv->tx_dropped);
105143+ atomic_long_inc_unchecked(&priv->tx_dropped);
105144 }
105145 return NETDEV_TX_OK;
105146 }
105147@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
105148 {
105149 struct l2tp_eth *priv = netdev_priv(dev);
105150
105151- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
105152- stats->tx_packets = atomic_long_read(&priv->tx_packets);
105153- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
105154- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
105155- stats->rx_packets = atomic_long_read(&priv->rx_packets);
105156- stats->rx_errors = atomic_long_read(&priv->rx_errors);
105157+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
105158+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
105159+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
105160+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
105161+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
105162+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
105163 return stats;
105164 }
105165
105166@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
105167 nf_reset(skb);
105168
105169 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
105170- atomic_long_inc(&priv->rx_packets);
105171- atomic_long_add(data_len, &priv->rx_bytes);
105172+ atomic_long_inc_unchecked(&priv->rx_packets);
105173+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
105174 } else {
105175- atomic_long_inc(&priv->rx_errors);
105176+ atomic_long_inc_unchecked(&priv->rx_errors);
105177 }
105178 return;
105179
105180 error:
105181- atomic_long_inc(&priv->rx_errors);
105182+ atomic_long_inc_unchecked(&priv->rx_errors);
105183 kfree_skb(skb);
105184 }
105185
105186diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
105187index 05dfc8aa..df6cfd7 100644
105188--- a/net/l2tp/l2tp_ip.c
105189+++ b/net/l2tp/l2tp_ip.c
105190@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
105191 .ops = &l2tp_ip_ops,
105192 };
105193
105194-static struct net_protocol l2tp_ip_protocol __read_mostly = {
105195+static const struct net_protocol l2tp_ip_protocol = {
105196 .handler = l2tp_ip_recv,
105197 .netns_ok = 1,
105198 };
105199diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
105200index 8611f1b..bc60a2d 100644
105201--- a/net/l2tp/l2tp_ip6.c
105202+++ b/net/l2tp/l2tp_ip6.c
105203@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
105204 .ops = &l2tp_ip6_ops,
105205 };
105206
105207-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
105208+static const struct inet6_protocol l2tp_ip6_protocol = {
105209 .handler = l2tp_ip6_recv,
105210 };
105211
105212diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
105213index 1a3c7e0..80f8b0c 100644
105214--- a/net/llc/llc_proc.c
105215+++ b/net/llc/llc_proc.c
105216@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
105217 int rc = -ENOMEM;
105218 struct proc_dir_entry *p;
105219
105220- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
105221+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
105222 if (!llc_proc_dir)
105223 goto out;
105224
105225diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
105226index dd4ff36..3462997 100644
105227--- a/net/mac80211/cfg.c
105228+++ b/net/mac80211/cfg.c
105229@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
105230 ret = ieee80211_vif_use_channel(sdata, chandef,
105231 IEEE80211_CHANCTX_EXCLUSIVE);
105232 }
105233- } else if (local->open_count == local->monitors) {
105234+ } else if (local_read(&local->open_count) == local->monitors) {
105235 local->_oper_chandef = *chandef;
105236 ieee80211_hw_config(local, 0);
105237 }
105238@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
105239 else
105240 local->probe_req_reg--;
105241
105242- if (!local->open_count)
105243+ if (!local_read(&local->open_count))
105244 break;
105245
105246 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
105247@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
105248 if (chanctx_conf) {
105249 *chandef = sdata->vif.bss_conf.chandef;
105250 ret = 0;
105251- } else if (local->open_count > 0 &&
105252- local->open_count == local->monitors &&
105253+ } else if (local_read(&local->open_count) > 0 &&
105254+ local_read(&local->open_count) == local->monitors &&
105255 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
105256 if (local->use_chanctx)
105257 *chandef = local->monitor_chandef;
105258diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
105259index 8d53d65..a4ac794 100644
105260--- a/net/mac80211/ieee80211_i.h
105261+++ b/net/mac80211/ieee80211_i.h
105262@@ -29,6 +29,7 @@
105263 #include <net/ieee80211_radiotap.h>
105264 #include <net/cfg80211.h>
105265 #include <net/mac80211.h>
105266+#include <asm/local.h>
105267 #include "key.h"
105268 #include "sta_info.h"
105269 #include "debug.h"
105270@@ -1126,7 +1127,7 @@ struct ieee80211_local {
105271 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
105272 spinlock_t queue_stop_reason_lock;
105273
105274- int open_count;
105275+ local_t open_count;
105276 int monitors, cooked_mntrs;
105277 /* number of interfaces with corresponding FIF_ flags */
105278 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
105279diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
105280index 81a2751..c06a026 100644
105281--- a/net/mac80211/iface.c
105282+++ b/net/mac80211/iface.c
105283@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105284 break;
105285 }
105286
105287- if (local->open_count == 0) {
105288+ if (local_read(&local->open_count) == 0) {
105289 res = drv_start(local);
105290 if (res)
105291 goto err_del_bss;
105292@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105293 res = drv_add_interface(local, sdata);
105294 if (res)
105295 goto err_stop;
105296- } else if (local->monitors == 0 && local->open_count == 0) {
105297+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
105298 res = ieee80211_add_virtual_monitor(local);
105299 if (res)
105300 goto err_stop;
105301@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105302 atomic_inc(&local->iff_promiscs);
105303
105304 if (coming_up)
105305- local->open_count++;
105306+ local_inc(&local->open_count);
105307
105308 if (hw_reconf_flags)
105309 ieee80211_hw_config(local, hw_reconf_flags);
105310@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105311 err_del_interface:
105312 drv_remove_interface(local, sdata);
105313 err_stop:
105314- if (!local->open_count)
105315+ if (!local_read(&local->open_count))
105316 drv_stop(local);
105317 err_del_bss:
105318 sdata->bss = NULL;
105319@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105320 }
105321
105322 if (going_down)
105323- local->open_count--;
105324+ local_dec(&local->open_count);
105325
105326 switch (sdata->vif.type) {
105327 case NL80211_IFTYPE_AP_VLAN:
105328@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105329 }
105330 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
105331
105332- if (local->open_count == 0)
105333+ if (local_read(&local->open_count) == 0)
105334 ieee80211_clear_tx_pending(local);
105335
105336 /*
105337@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105338 if (cancel_scan)
105339 flush_delayed_work(&local->scan_work);
105340
105341- if (local->open_count == 0) {
105342+ if (local_read(&local->open_count) == 0) {
105343 ieee80211_stop_device(local);
105344
105345 /* no reconfiguring after stop! */
105346@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105347 ieee80211_configure_filter(local);
105348 ieee80211_hw_config(local, hw_reconf_flags);
105349
105350- if (local->monitors == local->open_count)
105351+ if (local->monitors == local_read(&local->open_count))
105352 ieee80211_add_virtual_monitor(local);
105353 }
105354
105355diff --git a/net/mac80211/main.c b/net/mac80211/main.c
105356index 5e09d35..e2fdbe2 100644
105357--- a/net/mac80211/main.c
105358+++ b/net/mac80211/main.c
105359@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
105360 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
105361 IEEE80211_CONF_CHANGE_POWER);
105362
105363- if (changed && local->open_count) {
105364+ if (changed && local_read(&local->open_count)) {
105365 ret = drv_config(local, changed);
105366 /*
105367 * Goal:
105368diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
105369index ca405b6..6cc8bee 100644
105370--- a/net/mac80211/pm.c
105371+++ b/net/mac80211/pm.c
105372@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105373 struct ieee80211_sub_if_data *sdata;
105374 struct sta_info *sta;
105375
105376- if (!local->open_count)
105377+ if (!local_read(&local->open_count))
105378 goto suspend;
105379
105380 ieee80211_scan_cancel(local);
105381@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105382 cancel_work_sync(&local->dynamic_ps_enable_work);
105383 del_timer_sync(&local->dynamic_ps_timer);
105384
105385- local->wowlan = wowlan && local->open_count;
105386+ local->wowlan = wowlan && local_read(&local->open_count);
105387 if (local->wowlan) {
105388 int err = drv_suspend(local, wowlan);
105389 if (err < 0) {
105390@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105391 WARN_ON(!list_empty(&local->chanctx_list));
105392
105393 /* stop hardware - this must stop RX */
105394- if (local->open_count)
105395+ if (local_read(&local->open_count))
105396 ieee80211_stop_device(local);
105397
105398 suspend:
105399diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
105400index d53355b..21f583a 100644
105401--- a/net/mac80211/rate.c
105402+++ b/net/mac80211/rate.c
105403@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
105404
105405 ASSERT_RTNL();
105406
105407- if (local->open_count)
105408+ if (local_read(&local->open_count))
105409 return -EBUSY;
105410
105411 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
105412diff --git a/net/mac80211/util.c b/net/mac80211/util.c
105413index 747bdcf..eb2b981 100644
105414--- a/net/mac80211/util.c
105415+++ b/net/mac80211/util.c
105416@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105417 bool sched_scan_stopped = false;
105418
105419 /* nothing to do if HW shouldn't run */
105420- if (!local->open_count)
105421+ if (!local_read(&local->open_count))
105422 goto wake_up;
105423
105424 #ifdef CONFIG_PM
105425@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105426 local->in_reconfig = false;
105427 barrier();
105428
105429- if (local->monitors == local->open_count && local->monitors > 0)
105430+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
105431 ieee80211_add_virtual_monitor(local);
105432
105433 /*
105434@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105435 * If this is for hw restart things are still running.
105436 * We may want to change that later, however.
105437 */
105438- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
105439+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
105440 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
105441
105442 if (!local->suspended)
105443@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105444 flush_delayed_work(&local->scan_work);
105445 }
105446
105447- if (local->open_count && !reconfig_due_to_wowlan)
105448+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
105449 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
105450
105451 list_for_each_entry(sdata, &local->interfaces, list) {
105452diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
105453index b02660f..c0f791c 100644
105454--- a/net/netfilter/Kconfig
105455+++ b/net/netfilter/Kconfig
105456@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
105457
105458 To compile it as a module, choose M here. If unsure, say N.
105459
105460+config NETFILTER_XT_MATCH_GRADM
105461+ tristate '"gradm" match support'
105462+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
105463+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
105464+ ---help---
105465+ The gradm match allows to match on grsecurity RBAC being enabled.
105466+ It is useful when iptables rules are applied early on bootup to
105467+ prevent connections to the machine (except from a trusted host)
105468+ while the RBAC system is disabled.
105469+
105470 config NETFILTER_XT_MATCH_HASHLIMIT
105471 tristate '"hashlimit" match support'
105472 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
105473diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
105474index 89f73a9..e4e5bd9 100644
105475--- a/net/netfilter/Makefile
105476+++ b/net/netfilter/Makefile
105477@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
105478 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
105479 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
105480 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
105481+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
105482 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
105483 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
105484 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
105485diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
105486index d259da3..6a32b2c 100644
105487--- a/net/netfilter/ipset/ip_set_core.c
105488+++ b/net/netfilter/ipset/ip_set_core.c
105489@@ -1952,7 +1952,7 @@ done:
105490 return ret;
105491 }
105492
105493-static struct nf_sockopt_ops so_set __read_mostly = {
105494+static struct nf_sockopt_ops so_set = {
105495 .pf = PF_INET,
105496 .get_optmin = SO_IP_SET,
105497 .get_optmax = SO_IP_SET + 1,
105498diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
105499index b0f7b62..0541842 100644
105500--- a/net/netfilter/ipvs/ip_vs_conn.c
105501+++ b/net/netfilter/ipvs/ip_vs_conn.c
105502@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
105503 /* Increase the refcnt counter of the dest */
105504 ip_vs_dest_hold(dest);
105505
105506- conn_flags = atomic_read(&dest->conn_flags);
105507+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
105508 if (cp->protocol != IPPROTO_UDP)
105509 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
105510 flags = cp->flags;
105511@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
105512
105513 cp->control = NULL;
105514 atomic_set(&cp->n_control, 0);
105515- atomic_set(&cp->in_pkts, 0);
105516+ atomic_set_unchecked(&cp->in_pkts, 0);
105517
105518 cp->packet_xmit = NULL;
105519 cp->app = NULL;
105520@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
105521
105522 /* Don't drop the entry if its number of incoming packets is not
105523 located in [0, 8] */
105524- i = atomic_read(&cp->in_pkts);
105525+ i = atomic_read_unchecked(&cp->in_pkts);
105526 if (i > 8 || i < 0) return 0;
105527
105528 if (!todrop_rate[i]) return 0;
105529diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
105530index b87ca32..76c7799 100644
105531--- a/net/netfilter/ipvs/ip_vs_core.c
105532+++ b/net/netfilter/ipvs/ip_vs_core.c
105533@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
105534 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
105535 /* do not touch skb anymore */
105536
105537- atomic_inc(&cp->in_pkts);
105538+ atomic_inc_unchecked(&cp->in_pkts);
105539 ip_vs_conn_put(cp);
105540 return ret;
105541 }
105542@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
105543 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
105544 pkts = sysctl_sync_threshold(ipvs);
105545 else
105546- pkts = atomic_add_return(1, &cp->in_pkts);
105547+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105548
105549 if (ipvs->sync_state & IP_VS_STATE_MASTER)
105550 ip_vs_sync_conn(net, cp, pkts);
105551diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
105552index ed99448..3ba6cad 100644
105553--- a/net/netfilter/ipvs/ip_vs_ctl.c
105554+++ b/net/netfilter/ipvs/ip_vs_ctl.c
105555@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
105556 */
105557 ip_vs_rs_hash(ipvs, dest);
105558 }
105559- atomic_set(&dest->conn_flags, conn_flags);
105560+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
105561
105562 /* bind the service */
105563 old_svc = rcu_dereference_protected(dest->svc, 1);
105564@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
105565 * align with netns init in ip_vs_control_net_init()
105566 */
105567
105568-static struct ctl_table vs_vars[] = {
105569+static ctl_table_no_const vs_vars[] __read_only = {
105570 {
105571 .procname = "amemthresh",
105572 .maxlen = sizeof(int),
105573@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105574 " %-7s %-6d %-10d %-10d\n",
105575 &dest->addr.in6,
105576 ntohs(dest->port),
105577- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105578+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105579 atomic_read(&dest->weight),
105580 atomic_read(&dest->activeconns),
105581 atomic_read(&dest->inactconns));
105582@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105583 "%-7s %-6d %-10d %-10d\n",
105584 ntohl(dest->addr.ip),
105585 ntohs(dest->port),
105586- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105587+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105588 atomic_read(&dest->weight),
105589 atomic_read(&dest->activeconns),
105590 atomic_read(&dest->inactconns));
105591@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
105592
105593 entry.addr = dest->addr.ip;
105594 entry.port = dest->port;
105595- entry.conn_flags = atomic_read(&dest->conn_flags);
105596+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
105597 entry.weight = atomic_read(&dest->weight);
105598 entry.u_threshold = dest->u_threshold;
105599 entry.l_threshold = dest->l_threshold;
105600@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
105601 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
105602 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
105603 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
105604- (atomic_read(&dest->conn_flags) &
105605+ (atomic_read_unchecked(&dest->conn_flags) &
105606 IP_VS_CONN_F_FWD_MASK)) ||
105607 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
105608 atomic_read(&dest->weight)) ||
105609@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
105610 {
105611 int idx;
105612 struct netns_ipvs *ipvs = net_ipvs(net);
105613- struct ctl_table *tbl;
105614+ ctl_table_no_const *tbl;
105615
105616 atomic_set(&ipvs->dropentry, 0);
105617 spin_lock_init(&ipvs->dropentry_lock);
105618diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
105619index 127f140..553d652 100644
105620--- a/net/netfilter/ipvs/ip_vs_lblc.c
105621+++ b/net/netfilter/ipvs/ip_vs_lblc.c
105622@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
105623 * IPVS LBLC sysctl table
105624 */
105625 #ifdef CONFIG_SYSCTL
105626-static struct ctl_table vs_vars_table[] = {
105627+static ctl_table_no_const vs_vars_table[] __read_only = {
105628 {
105629 .procname = "lblc_expiration",
105630 .data = NULL,
105631diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105632index 2229d2d..b32b785 100644
105633--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105634+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105635@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105636 * IPVS LBLCR sysctl table
105637 */
105638
105639-static struct ctl_table vs_vars_table[] = {
105640+static ctl_table_no_const vs_vars_table[] __read_only = {
105641 {
105642 .procname = "lblcr_expiration",
105643 .data = NULL,
105644diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105645index d93ceeb..4556144 100644
105646--- a/net/netfilter/ipvs/ip_vs_sync.c
105647+++ b/net/netfilter/ipvs/ip_vs_sync.c
105648@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105649 cp = cp->control;
105650 if (cp) {
105651 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105652- pkts = atomic_add_return(1, &cp->in_pkts);
105653+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105654 else
105655 pkts = sysctl_sync_threshold(ipvs);
105656 ip_vs_sync_conn(net, cp->control, pkts);
105657@@ -771,7 +771,7 @@ control:
105658 if (!cp)
105659 return;
105660 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105661- pkts = atomic_add_return(1, &cp->in_pkts);
105662+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105663 else
105664 pkts = sysctl_sync_threshold(ipvs);
105665 goto sloop;
105666@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105667
105668 if (opt)
105669 memcpy(&cp->in_seq, opt, sizeof(*opt));
105670- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105671+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105672 cp->state = state;
105673 cp->old_state = cp->state;
105674 /*
105675diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105676index 3aedbda..6a63567 100644
105677--- a/net/netfilter/ipvs/ip_vs_xmit.c
105678+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105679@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105680 else
105681 rc = NF_ACCEPT;
105682 /* do not touch skb anymore */
105683- atomic_inc(&cp->in_pkts);
105684+ atomic_inc_unchecked(&cp->in_pkts);
105685 goto out;
105686 }
105687
105688@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105689 else
105690 rc = NF_ACCEPT;
105691 /* do not touch skb anymore */
105692- atomic_inc(&cp->in_pkts);
105693+ atomic_inc_unchecked(&cp->in_pkts);
105694 goto out;
105695 }
105696
105697diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105698index a4b5e2a..13b1de3 100644
105699--- a/net/netfilter/nf_conntrack_acct.c
105700+++ b/net/netfilter/nf_conntrack_acct.c
105701@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105702 #ifdef CONFIG_SYSCTL
105703 static int nf_conntrack_acct_init_sysctl(struct net *net)
105704 {
105705- struct ctl_table *table;
105706+ ctl_table_no_const *table;
105707
105708 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105709 GFP_KERNEL);
105710diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105711index 13fad86..18c984c 100644
105712--- a/net/netfilter/nf_conntrack_core.c
105713+++ b/net/netfilter/nf_conntrack_core.c
105714@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
105715 #define DYING_NULLS_VAL ((1<<30)+1)
105716 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105717
105718+#ifdef CONFIG_GRKERNSEC_HIDESYM
105719+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105720+#endif
105721+
105722 int nf_conntrack_init_net(struct net *net)
105723 {
105724 int ret = -ENOMEM;
105725@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
105726 if (!net->ct.stat)
105727 goto err_pcpu_lists;
105728
105729+#ifdef CONFIG_GRKERNSEC_HIDESYM
105730+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105731+#else
105732 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105733+#endif
105734 if (!net->ct.slabname)
105735 goto err_slabname;
105736
105737diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105738index 4e78c57..ec8fb74 100644
105739--- a/net/netfilter/nf_conntrack_ecache.c
105740+++ b/net/netfilter/nf_conntrack_ecache.c
105741@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105742 #ifdef CONFIG_SYSCTL
105743 static int nf_conntrack_event_init_sysctl(struct net *net)
105744 {
105745- struct ctl_table *table;
105746+ ctl_table_no_const *table;
105747
105748 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105749 GFP_KERNEL);
105750diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105751index bd9d315..989947e 100644
105752--- a/net/netfilter/nf_conntrack_helper.c
105753+++ b/net/netfilter/nf_conntrack_helper.c
105754@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105755
105756 static int nf_conntrack_helper_init_sysctl(struct net *net)
105757 {
105758- struct ctl_table *table;
105759+ ctl_table_no_const *table;
105760
105761 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105762 GFP_KERNEL);
105763diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105764index b65d586..beec902 100644
105765--- a/net/netfilter/nf_conntrack_proto.c
105766+++ b/net/netfilter/nf_conntrack_proto.c
105767@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105768
105769 static void
105770 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105771- struct ctl_table **table,
105772+ ctl_table_no_const **table,
105773 unsigned int users)
105774 {
105775 if (users > 0)
105776diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105777index fc823fa..8311af3 100644
105778--- a/net/netfilter/nf_conntrack_standalone.c
105779+++ b/net/netfilter/nf_conntrack_standalone.c
105780@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105781
105782 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105783 {
105784- struct ctl_table *table;
105785+ ctl_table_no_const *table;
105786
105787 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105788 GFP_KERNEL);
105789diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105790index 7a394df..bd91a8a 100644
105791--- a/net/netfilter/nf_conntrack_timestamp.c
105792+++ b/net/netfilter/nf_conntrack_timestamp.c
105793@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105794 #ifdef CONFIG_SYSCTL
105795 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105796 {
105797- struct ctl_table *table;
105798+ ctl_table_no_const *table;
105799
105800 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105801 GFP_KERNEL);
105802diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105803index 675d12c..b36e825 100644
105804--- a/net/netfilter/nf_log.c
105805+++ b/net/netfilter/nf_log.c
105806@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
105807
105808 #ifdef CONFIG_SYSCTL
105809 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105810-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105811+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105812
105813 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105814 void __user *buffer, size_t *lenp, loff_t *ppos)
105815@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105816 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105817 mutex_unlock(&nf_log_mutex);
105818 } else {
105819+ ctl_table_no_const nf_log_table = *table;
105820+
105821 mutex_lock(&nf_log_mutex);
105822 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105823 if (!logger)
105824- table->data = "NONE";
105825+ nf_log_table.data = "NONE";
105826 else
105827- table->data = logger->name;
105828- r = proc_dostring(table, write, buffer, lenp, ppos);
105829+ nf_log_table.data = logger->name;
105830+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105831 mutex_unlock(&nf_log_mutex);
105832 }
105833
105834diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105835index c68c1e5..8b5d670 100644
105836--- a/net/netfilter/nf_sockopt.c
105837+++ b/net/netfilter/nf_sockopt.c
105838@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105839 }
105840 }
105841
105842- list_add(&reg->list, &nf_sockopts);
105843+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105844 out:
105845 mutex_unlock(&nf_sockopt_mutex);
105846 return ret;
105847@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105848 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105849 {
105850 mutex_lock(&nf_sockopt_mutex);
105851- list_del(&reg->list);
105852+ pax_list_del((struct list_head *)&reg->list);
105853 mutex_unlock(&nf_sockopt_mutex);
105854 }
105855 EXPORT_SYMBOL(nf_unregister_sockopt);
105856diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105857index 11d85b3..7fcc420 100644
105858--- a/net/netfilter/nfnetlink_log.c
105859+++ b/net/netfilter/nfnetlink_log.c
105860@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105861 struct nfnl_log_net {
105862 spinlock_t instances_lock;
105863 struct hlist_head instance_table[INSTANCE_BUCKETS];
105864- atomic_t global_seq;
105865+ atomic_unchecked_t global_seq;
105866 };
105867
105868 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105869@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105870 /* global sequence number */
105871 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105872 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105873- htonl(atomic_inc_return(&log->global_seq))))
105874+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105875 goto nla_put_failure;
105876
105877 if (data_len) {
105878diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105879index 65f3e2b..2e9d6a0 100644
105880--- a/net/netfilter/nft_compat.c
105881+++ b/net/netfilter/nft_compat.c
105882@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
105883 return;
105884 }
105885
105886- switch(ret) {
105887- case true:
105888- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
105889- break;
105890- case false:
105891- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
105892- break;
105893- }
105894+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
105895 }
105896
105897 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
105898diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105899new file mode 100644
105900index 0000000..c566332
105901--- /dev/null
105902+++ b/net/netfilter/xt_gradm.c
105903@@ -0,0 +1,51 @@
105904+/*
105905+ * gradm match for netfilter
105906