]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.1-201505042053.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.1-201505042053.patch
CommitLineData
4b05c312
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index bfcb1a6..2dae09b 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index f499cd2..37a187f 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -884,7 +952,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -934,6 +1002,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -943,7 +1013,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -986,10 +1056,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1103,6 +1176,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1118,7 +1193,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1184,7 +1259,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer vmlinux-gdb.py
547+ signing_key.x509.signer vmlinux-gdb.py \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1223,7 +1301,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1389,6 +1467,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1529,17 +1609,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1551,11 +1635,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index a9a1195..e9b8417 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -101,6 +101,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index cf4c0c9..a87ecf5 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 563b92f..689d58e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -39,7 +39,7 @@ struct outer_cache_fns {
1842 /* This is an ARM L2C thing */
1843 void (*write_sec)(unsigned long, unsigned);
1844 void (*configure)(const struct l2x0_regs *);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index bfd662e..f6cbb02 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -127,6 +127,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a745a2a..481350a 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -80,6 +80,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -91,10 +92,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index f403541..b10df68 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index 72812a1..335f4f3 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -77,9 +77,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 }
2125
2126 #define init_thread_info (init_thread_union.thread_info)
2127@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index ce0786e..a80c264 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a, b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x, p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x, p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check((x), (p)); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x, p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x, p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check((x), (p)); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2260 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x, ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x), (ptr), __gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x, ptr, err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x), (ptr), err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x, ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x), (ptr), __pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x, ptr, err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x), (ptr), err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 672b219..4aa120a 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -48,6 +48,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -90,11 +171,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -479,7 +576,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -513,11 +612,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 0196327..50ac8895 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -444,7 +444,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index 2e11961..07f0704 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 69bda1a..755113a 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index 1d60beb..4aa25d5 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3058 * Register 0 and check for VMSAv7 or PMSAv7 */
3059 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 023ac90..0a69950 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 5560f74..1cc00ea 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1088,7 +1088,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3438index 318d127..9aab0d1 100644
3439--- a/arch/arm/mach-exynos/suspend.c
3440+++ b/arch/arm/mach-exynos/suspend.c
3441@@ -18,6 +18,7 @@
3442 #include <linux/syscore_ops.h>
3443 #include <linux/cpu_pm.h>
3444 #include <linux/io.h>
3445+#include <linux/irq.h>
3446 #include <linux/irqchip/arm-gic.h>
3447 #include <linux/err.h>
3448 #include <linux/regulator/machine.h>
3449@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3450 tmp |= pm_data->wake_disable_mask;
3451 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3452
3453- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3454- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3455+ pax_open_kernel();
3456+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3457+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3458+ pax_close_kernel();
3459
3460 register_syscore_ops(&exynos_pm_syscore_ops);
3461 suspend_set_ops(&exynos_suspend_ops);
3462diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3463index 0662087..004d163 100644
3464--- a/arch/arm/mach-keystone/keystone.c
3465+++ b/arch/arm/mach-keystone/keystone.c
3466@@ -27,7 +27,7 @@
3467
3468 #include "keystone.h"
3469
3470-static struct notifier_block platform_nb;
3471+static notifier_block_no_const platform_nb;
3472 static unsigned long keystone_dma_pfn_offset __read_mostly;
3473
3474 static int keystone_platform_notifier(struct notifier_block *nb,
3475diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3476index e46e9ea..9141c83 100644
3477--- a/arch/arm/mach-mvebu/coherency.c
3478+++ b/arch/arm/mach-mvebu/coherency.c
3479@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3480
3481 /*
3482 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3483- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3484+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3485 * is needed as a workaround for a deadlock issue between the PCIe
3486 * interface and the cache controller.
3487 */
3488@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3489 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3490
3491 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3492- mtype = MT_UNCACHED;
3493+ mtype = MT_UNCACHED_RW;
3494
3495 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3496 }
3497diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3498index b6443a4..20a0b74 100644
3499--- a/arch/arm/mach-omap2/board-n8x0.c
3500+++ b/arch/arm/mach-omap2/board-n8x0.c
3501@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3502 }
3503 #endif
3504
3505-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3506+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3507 .late_init = n8x0_menelaus_late_init,
3508 };
3509
3510diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3511index 79f49d9..70bf184 100644
3512--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3514@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3515 void (*resume)(void);
3516 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3517 void (*hotplug_restart)(void);
3518-};
3519+} __no_const;
3520
3521 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3522 static struct powerdomain *mpuss_pd;
3523@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3524 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3525 {}
3526
3527-struct cpu_pm_ops omap_pm_ops = {
3528+static struct cpu_pm_ops omap_pm_ops __read_only = {
3529 .finish_suspend = default_finish_suspend,
3530 .resume = dummy_cpu_resume,
3531 .scu_prepare = dummy_scu_prepare,
3532diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3533index 5305ec7..6d74045 100644
3534--- a/arch/arm/mach-omap2/omap-smp.c
3535+++ b/arch/arm/mach-omap2/omap-smp.c
3536@@ -19,6 +19,7 @@
3537 #include <linux/device.h>
3538 #include <linux/smp.h>
3539 #include <linux/io.h>
3540+#include <linux/irq.h>
3541 #include <linux/irqchip/arm-gic.h>
3542
3543 #include <asm/smp_scu.h>
3544diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3545index f961c46..4a453dc 100644
3546--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3547+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3548@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3549 return NOTIFY_OK;
3550 }
3551
3552-static struct notifier_block __refdata irq_hotplug_notifier = {
3553+static struct notifier_block irq_hotplug_notifier = {
3554 .notifier_call = irq_cpu_hotplug_notify,
3555 };
3556
3557diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3558index be9541e..821805f 100644
3559--- a/arch/arm/mach-omap2/omap_device.c
3560+++ b/arch/arm/mach-omap2/omap_device.c
3561@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3562 struct platform_device __init *omap_device_build(const char *pdev_name,
3563 int pdev_id,
3564 struct omap_hwmod *oh,
3565- void *pdata, int pdata_len)
3566+ const void *pdata, int pdata_len)
3567 {
3568 struct omap_hwmod *ohs[] = { oh };
3569
3570@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3571 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3572 int pdev_id,
3573 struct omap_hwmod **ohs,
3574- int oh_cnt, void *pdata,
3575+ int oh_cnt, const void *pdata,
3576 int pdata_len)
3577 {
3578 int ret = -ENOMEM;
3579diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3580index 78c02b3..c94109a 100644
3581--- a/arch/arm/mach-omap2/omap_device.h
3582+++ b/arch/arm/mach-omap2/omap_device.h
3583@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3584 /* Core code interface */
3585
3586 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3587- struct omap_hwmod *oh, void *pdata,
3588+ struct omap_hwmod *oh, const void *pdata,
3589 int pdata_len);
3590
3591 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3592 struct omap_hwmod **oh, int oh_cnt,
3593- void *pdata, int pdata_len);
3594+ const void *pdata, int pdata_len);
3595
3596 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3597 struct omap_hwmod **ohs, int oh_cnt);
3598diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3599index 355b089..2c9d7c3 100644
3600--- a/arch/arm/mach-omap2/omap_hwmod.c
3601+++ b/arch/arm/mach-omap2/omap_hwmod.c
3602@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3603 int (*init_clkdm)(struct omap_hwmod *oh);
3604 void (*update_context_lost)(struct omap_hwmod *oh);
3605 int (*get_context_lost)(struct omap_hwmod *oh);
3606-};
3607+} __no_const;
3608
3609 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3610-static struct omap_hwmod_soc_ops soc_ops;
3611+static struct omap_hwmod_soc_ops soc_ops __read_only;
3612
3613 /* omap_hwmod_list contains all registered struct omap_hwmods */
3614 static LIST_HEAD(omap_hwmod_list);
3615diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3616index 95fee54..cfa9cf1 100644
3617--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3618+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3619@@ -10,6 +10,7 @@
3620
3621 #include <linux/kernel.h>
3622 #include <linux/init.h>
3623+#include <asm/pgtable.h>
3624
3625 #include "powerdomain.h"
3626
3627@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3628
3629 void __init am43xx_powerdomains_init(void)
3630 {
3631- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_open_kernel();
3633+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_close_kernel();
3635 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3636 pwrdm_register_pwrdms(powerdomains_am43xx);
3637 pwrdm_complete_init();
3638diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3639index ff0a68c..b312aa0 100644
3640--- a/arch/arm/mach-omap2/wd_timer.c
3641+++ b/arch/arm/mach-omap2/wd_timer.c
3642@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3643 struct omap_hwmod *oh;
3644 char *oh_name = "wd_timer2";
3645 char *dev_name = "omap_wdt";
3646- struct omap_wd_timer_platform_data pdata;
3647+ static struct omap_wd_timer_platform_data pdata = {
3648+ .read_reset_sources = prm_read_reset_sources
3649+ };
3650
3651 if (!cpu_class_is_omap2() || of_have_populated_dt())
3652 return 0;
3653@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3654 return -EINVAL;
3655 }
3656
3657- pdata.read_reset_sources = prm_read_reset_sources;
3658-
3659 pdev = omap_device_build(dev_name, id, oh, &pdata,
3660 sizeof(struct omap_wd_timer_platform_data));
3661 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3662diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663index 4f25a7c..a81be85 100644
3664--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3665+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3666@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3667 bool entered_lp2 = false;
3668
3669 if (tegra_pending_sgi())
3670- ACCESS_ONCE(abort_flag) = true;
3671+ ACCESS_ONCE_RW(abort_flag) = true;
3672
3673 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3674
3675diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3676index ab95f53..4b977a7 100644
3677--- a/arch/arm/mach-tegra/irq.c
3678+++ b/arch/arm/mach-tegra/irq.c
3679@@ -20,6 +20,7 @@
3680 #include <linux/cpu_pm.h>
3681 #include <linux/interrupt.h>
3682 #include <linux/io.h>
3683+#include <linux/irq.h>
3684 #include <linux/irqchip/arm-gic.h>
3685 #include <linux/irq.h>
3686 #include <linux/kernel.h>
3687diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3688index 2cb587b..6ddfebf 100644
3689--- a/arch/arm/mach-ux500/pm.c
3690+++ b/arch/arm/mach-ux500/pm.c
3691@@ -10,6 +10,7 @@
3692 */
3693
3694 #include <linux/kernel.h>
3695+#include <linux/irq.h>
3696 #include <linux/irqchip/arm-gic.h>
3697 #include <linux/delay.h>
3698 #include <linux/io.h>
3699diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3700index 2dea8b5..6499da2 100644
3701--- a/arch/arm/mach-ux500/setup.h
3702+++ b/arch/arm/mach-ux500/setup.h
3703@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3704 .type = MT_DEVICE, \
3705 }
3706
3707-#define __MEM_DEV_DESC(x, sz) { \
3708- .virtual = IO_ADDRESS(x), \
3709- .pfn = __phys_to_pfn(x), \
3710- .length = sz, \
3711- .type = MT_MEMORY_RWX, \
3712-}
3713-
3714 extern struct smp_operations ux500_smp_ops;
3715 extern void ux500_cpu_die(unsigned int cpu);
3716
3717diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3718index 52d768f..5f93180 100644
3719--- a/arch/arm/mach-zynq/platsmp.c
3720+++ b/arch/arm/mach-zynq/platsmp.c
3721@@ -24,6 +24,7 @@
3722 #include <linux/io.h>
3723 #include <asm/cacheflush.h>
3724 #include <asm/smp_scu.h>
3725+#include <linux/irq.h>
3726 #include <linux/irqchip/arm-gic.h>
3727 #include "common.h"
3728
3729diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3730index 9b4f29e..bbf3bfa 100644
3731--- a/arch/arm/mm/Kconfig
3732+++ b/arch/arm/mm/Kconfig
3733@@ -446,6 +446,7 @@ config CPU_32v5
3734
3735 config CPU_32v6
3736 bool
3737+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3738 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3739
3740 config CPU_32v6K
3741@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3742
3743 config CPU_USE_DOMAINS
3744 bool
3745+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3746 help
3747 This option enables or disables the use of domain switching
3748 via the set_fs() function.
3749@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3750
3751 config KUSER_HELPERS
3752 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3753- depends on MMU
3754+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3755 default y
3756 help
3757 Warning: disabling this option may break user programs.
3758@@ -812,7 +814,7 @@ config KUSER_HELPERS
3759 See Documentation/arm/kernel_user_helpers.txt for details.
3760
3761 However, the fixed address nature of these helpers can be used
3762- by ROP (return orientated programming) authors when creating
3763+ by ROP (Return Oriented Programming) authors when creating
3764 exploits.
3765
3766 If all of the binaries and libraries which run on your platform
3767diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3768index 2c0c541..4585df9 100644
3769--- a/arch/arm/mm/alignment.c
3770+++ b/arch/arm/mm/alignment.c
3771@@ -216,10 +216,12 @@ union offset_union {
3772 #define __get16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v, a = addr; \
3775+ pax_open_userland(); \
3776 __get8_unaligned_check(ins,v,a,err); \
3777 val = v << ((BE) ? 8 : 0); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val |= v << ((BE) ? 0 : 8); \
3780+ pax_close_userland(); \
3781 if (err) \
3782 goto fault; \
3783 } while (0)
3784@@ -233,6 +235,7 @@ union offset_union {
3785 #define __get32_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 24 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792@@ -241,6 +244,7 @@ union offset_union {
3793 val |= v << ((BE) ? 8 : 16); \
3794 __get8_unaligned_check(ins,v,a,err); \
3795 val |= v << ((BE) ? 0 : 24); \
3796+ pax_close_userland(); \
3797 if (err) \
3798 goto fault; \
3799 } while (0)
3800@@ -254,6 +258,7 @@ union offset_union {
3801 #define __put16_unaligned_check(ins,val,addr) \
3802 do { \
3803 unsigned int err = 0, v = val, a = addr; \
3804+ pax_open_userland(); \
3805 __asm__( FIRST_BYTE_16 \
3806 ARM( "1: "ins" %1, [%2], #1\n" ) \
3807 THUMB( "1: "ins" %1, [%2]\n" ) \
3808@@ -273,6 +278,7 @@ union offset_union {
3809 " .popsection\n" \
3810 : "=r" (err), "=&r" (v), "=&r" (a) \
3811 : "0" (err), "1" (v), "2" (a)); \
3812+ pax_close_userland(); \
3813 if (err) \
3814 goto fault; \
3815 } while (0)
3816@@ -286,6 +292,7 @@ union offset_union {
3817 #define __put32_unaligned_check(ins,val,addr) \
3818 do { \
3819 unsigned int err = 0, v = val, a = addr; \
3820+ pax_open_userland(); \
3821 __asm__( FIRST_BYTE_32 \
3822 ARM( "1: "ins" %1, [%2], #1\n" ) \
3823 THUMB( "1: "ins" %1, [%2]\n" ) \
3824@@ -315,6 +322,7 @@ union offset_union {
3825 " .popsection\n" \
3826 : "=r" (err), "=&r" (v), "=&r" (a) \
3827 : "0" (err), "1" (v), "2" (a)); \
3828+ pax_close_userland(); \
3829 if (err) \
3830 goto fault; \
3831 } while (0)
3832diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3833index 8f15f70..d599a2b 100644
3834--- a/arch/arm/mm/cache-l2x0.c
3835+++ b/arch/arm/mm/cache-l2x0.c
3836@@ -43,7 +43,7 @@ struct l2c_init_data {
3837 void (*save)(void __iomem *);
3838 void (*configure)(void __iomem *);
3839 struct outer_cache_fns outer_cache;
3840-};
3841+} __do_const;
3842
3843 #define CACHE_LINE_SIZE 32
3844
3845diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3846index 845769e..4278fd7 100644
3847--- a/arch/arm/mm/context.c
3848+++ b/arch/arm/mm/context.c
3849@@ -43,7 +43,7 @@
3850 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3851
3852 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3853-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3854+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3855 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3856
3857 static DEFINE_PER_CPU(atomic64_t, active_asids);
3858@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3859 {
3860 static u32 cur_idx = 1;
3861 u64 asid = atomic64_read(&mm->context.id);
3862- u64 generation = atomic64_read(&asid_generation);
3863+ u64 generation = atomic64_read_unchecked(&asid_generation);
3864
3865 if (asid != 0) {
3866 /*
3867@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3868 */
3869 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3870 if (asid == NUM_USER_ASIDS) {
3871- generation = atomic64_add_return(ASID_FIRST_VERSION,
3872+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3873 &asid_generation);
3874 flush_context(cpu);
3875 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3876@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3877 cpu_set_reserved_ttbr0();
3878
3879 asid = atomic64_read(&mm->context.id);
3880- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3881+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3882 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3883 goto switch_mm_fastpath;
3884
3885 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3886 /* Check that our ASID belongs to the current generation. */
3887 asid = atomic64_read(&mm->context.id);
3888- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3889+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3890 asid = new_context(mm, cpu);
3891 atomic64_set(&mm->context.id, asid);
3892 }
3893diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3894index 6333d9c..fd09b46 100644
3895--- a/arch/arm/mm/fault.c
3896+++ b/arch/arm/mm/fault.c
3897@@ -25,6 +25,7 @@
3898 #include <asm/system_misc.h>
3899 #include <asm/system_info.h>
3900 #include <asm/tlbflush.h>
3901+#include <asm/sections.h>
3902
3903 #include "fault.h"
3904
3905@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3906 if (fixup_exception(regs))
3907 return;
3908
3909+#ifdef CONFIG_PAX_MEMORY_UDEREF
3910+ if (addr < TASK_SIZE) {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3917+ }
3918+#endif
3919+
3920+#ifdef CONFIG_PAX_KERNEXEC
3921+ if ((fsr & FSR_WRITE) &&
3922+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3923+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3924+ {
3925+ if (current->signal->curr_ip)
3926+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3927+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3928+ else
3929+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3931+ }
3932+#endif
3933+
3934 /*
3935 * No handler, we'll have to terminate things with extreme prejudice.
3936 */
3937@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3938 }
3939 #endif
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+ if (fsr & FSR_LNX_PF) {
3943+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3944+ do_group_exit(SIGKILL);
3945+ }
3946+#endif
3947+
3948 tsk->thread.address = addr;
3949 tsk->thread.error_code = fsr;
3950 tsk->thread.trap_no = 14;
3951@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3952 }
3953 #endif /* CONFIG_MMU */
3954
3955+#ifdef CONFIG_PAX_PAGEEXEC
3956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3957+{
3958+ long i;
3959+
3960+ printk(KERN_ERR "PAX: bytes at PC: ");
3961+ for (i = 0; i < 20; i++) {
3962+ unsigned char c;
3963+ if (get_user(c, (__force unsigned char __user *)pc+i))
3964+ printk(KERN_CONT "?? ");
3965+ else
3966+ printk(KERN_CONT "%02x ", c);
3967+ }
3968+ printk("\n");
3969+
3970+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3971+ for (i = -1; i < 20; i++) {
3972+ unsigned long c;
3973+ if (get_user(c, (__force unsigned long __user *)sp+i))
3974+ printk(KERN_CONT "???????? ");
3975+ else
3976+ printk(KERN_CONT "%08lx ", c);
3977+ }
3978+ printk("\n");
3979+}
3980+#endif
3981+
3982 /*
3983 * First Level Translation Fault Handler
3984 *
3985@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3986 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3987 struct siginfo info;
3988
3989+#ifdef CONFIG_PAX_MEMORY_UDEREF
3990+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3991+ if (current->signal->curr_ip)
3992+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3993+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3994+ else
3995+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3997+ goto die;
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, fsr, addr);
4007 show_pte(current->mm, addr);
4008@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4009 ifsr_info[nr].name = name;
4010 }
4011
4012+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4013+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4014+
4015 asmlinkage void __exception
4016 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4017 {
4018 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4019 struct siginfo info;
4020+ unsigned long pc = instruction_pointer(regs);
4021+
4022+ if (user_mode(regs)) {
4023+ unsigned long sigpage = current->mm->context.sigpage;
4024+
4025+ if (sigpage <= pc && pc < sigpage + 7*4) {
4026+ if (pc < sigpage + 3*4)
4027+ sys_sigreturn(regs);
4028+ else
4029+ sys_rt_sigreturn(regs);
4030+ return;
4031+ }
4032+ if (pc == 0xffff0f60UL) {
4033+ /*
4034+ * PaX: __kuser_cmpxchg64 emulation
4035+ */
4036+ // TODO
4037+ //regs->ARM_pc = regs->ARM_lr;
4038+ //return;
4039+ }
4040+ if (pc == 0xffff0fa0UL) {
4041+ /*
4042+ * PaX: __kuser_memory_barrier emulation
4043+ */
4044+ // dmb(); implied by the exception
4045+ regs->ARM_pc = regs->ARM_lr;
4046+ return;
4047+ }
4048+ if (pc == 0xffff0fc0UL) {
4049+ /*
4050+ * PaX: __kuser_cmpxchg emulation
4051+ */
4052+ // TODO
4053+ //long new;
4054+ //int op;
4055+
4056+ //op = FUTEX_OP_SET << 28;
4057+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4058+ //regs->ARM_r0 = old != new;
4059+ //regs->ARM_pc = regs->ARM_lr;
4060+ //return;
4061+ }
4062+ if (pc == 0xffff0fe0UL) {
4063+ /*
4064+ * PaX: __kuser_get_tls emulation
4065+ */
4066+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4067+ regs->ARM_pc = regs->ARM_lr;
4068+ return;
4069+ }
4070+ }
4071+
4072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4073+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4077+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4078+ else
4079+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4080+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4081+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4082+ goto die;
4083+ }
4084+#endif
4085+
4086+#ifdef CONFIG_PAX_REFCOUNT
4087+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4088+#ifdef CONFIG_THUMB2_KERNEL
4089+ unsigned short bkpt;
4090+
4091+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4092+#else
4093+ unsigned int bkpt;
4094+
4095+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4096+#endif
4097+ current->thread.error_code = ifsr;
4098+ current->thread.trap_no = 0;
4099+ pax_report_refcount_overflow(regs);
4100+ fixup_exception(regs);
4101+ return;
4102+ }
4103+ }
4104+#endif
4105
4106 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4107 return;
4108
4109+die:
4110 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4111 inf->name, ifsr, addr);
4112
4113diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4114index cf08bdf..772656c 100644
4115--- a/arch/arm/mm/fault.h
4116+++ b/arch/arm/mm/fault.h
4117@@ -3,6 +3,7 @@
4118
4119 /*
4120 * Fault status register encodings. We steal bit 31 for our own purposes.
4121+ * Set when the FSR value is from an instruction fault.
4122 */
4123 #define FSR_LNX_PF (1 << 31)
4124 #define FSR_WRITE (1 << 11)
4125@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4126 }
4127 #endif
4128
4129+/* valid for LPAE and !LPAE */
4130+static inline int is_xn_fault(unsigned int fsr)
4131+{
4132+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4133+}
4134+
4135+static inline int is_domain_fault(unsigned int fsr)
4136+{
4137+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4138+}
4139+
4140 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4141 unsigned long search_exception_table(unsigned long addr);
4142
4143diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4144index 1609b02..def0785 100644
4145--- a/arch/arm/mm/init.c
4146+++ b/arch/arm/mm/init.c
4147@@ -755,7 +755,46 @@ void free_tcmmem(void)
4148 {
4149 #ifdef CONFIG_HAVE_TCM
4150 extern char __tcm_start, __tcm_end;
4151+#endif
4152
4153+#ifdef CONFIG_PAX_KERNEXEC
4154+ unsigned long addr;
4155+ pgd_t *pgd;
4156+ pud_t *pud;
4157+ pmd_t *pmd;
4158+ int cpu_arch = cpu_architecture();
4159+ unsigned int cr = get_cr();
4160+
4161+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4162+ /* make pages tables, etc before .text NX */
4163+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4164+ pgd = pgd_offset_k(addr);
4165+ pud = pud_offset(pgd, addr);
4166+ pmd = pmd_offset(pud, addr);
4167+ __section_update(pmd, addr, PMD_SECT_XN);
4168+ }
4169+ /* make init NX */
4170+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4171+ pgd = pgd_offset_k(addr);
4172+ pud = pud_offset(pgd, addr);
4173+ pmd = pmd_offset(pud, addr);
4174+ __section_update(pmd, addr, PMD_SECT_XN);
4175+ }
4176+ /* make kernel code/rodata RX */
4177+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4178+ pgd = pgd_offset_k(addr);
4179+ pud = pud_offset(pgd, addr);
4180+ pmd = pmd_offset(pud, addr);
4181+#ifdef CONFIG_ARM_LPAE
4182+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4183+#else
4184+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4185+#endif
4186+ }
4187+ }
4188+#endif
4189+
4190+#ifdef CONFIG_HAVE_TCM
4191 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4192 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4193 #endif
4194diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4195index d1e5ad7..84dcbf2 100644
4196--- a/arch/arm/mm/ioremap.c
4197+++ b/arch/arm/mm/ioremap.c
4198@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4199 unsigned int mtype;
4200
4201 if (cached)
4202- mtype = MT_MEMORY_RWX;
4203+ mtype = MT_MEMORY_RX;
4204 else
4205- mtype = MT_MEMORY_RWX_NONCACHED;
4206+ mtype = MT_MEMORY_RX_NONCACHED;
4207
4208 return __arm_ioremap_caller(phys_addr, size, mtype,
4209 __builtin_return_address(0));
4210diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4211index 5e85ed3..b10a7ed 100644
4212--- a/arch/arm/mm/mmap.c
4213+++ b/arch/arm/mm/mmap.c
4214@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4215 struct vm_area_struct *vma;
4216 int do_align = 0;
4217 int aliasing = cache_is_vipt_aliasing();
4218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4219 struct vm_unmapped_area_info info;
4220
4221 /*
4222@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 if (len > TASK_SIZE)
4224 return -ENOMEM;
4225
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4228+#endif
4229+
4230 if (addr) {
4231 if (do_align)
4232 addr = COLOUR_ALIGN(addr, pgoff);
4233@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4234 addr = PAGE_ALIGN(addr);
4235
4236 vma = find_vma(mm, addr);
4237- if (TASK_SIZE - len >= addr &&
4238- (!vma || addr + len <= vma->vm_start))
4239+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4240 return addr;
4241 }
4242
4243@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 info.high_limit = TASK_SIZE;
4245 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4246 info.align_offset = pgoff << PAGE_SHIFT;
4247+ info.threadstack_offset = offset;
4248 return vm_unmapped_area(&info);
4249 }
4250
4251@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4252 unsigned long addr = addr0;
4253 int do_align = 0;
4254 int aliasing = cache_is_vipt_aliasing();
4255+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4256 struct vm_unmapped_area_info info;
4257
4258 /*
4259@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 return addr;
4261 }
4262
4263+#ifdef CONFIG_PAX_RANDMMAP
4264+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4265+#endif
4266+
4267 /* requesting a specific address */
4268 if (addr) {
4269 if (do_align)
4270@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 else
4272 addr = PAGE_ALIGN(addr);
4273 vma = find_vma(mm, addr);
4274- if (TASK_SIZE - len >= addr &&
4275- (!vma || addr + len <= vma->vm_start))
4276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4277 return addr;
4278 }
4279
4280@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 info.high_limit = mm->mmap_base;
4282 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4283 info.align_offset = pgoff << PAGE_SHIFT;
4284+ info.threadstack_offset = offset;
4285 addr = vm_unmapped_area(&info);
4286
4287 /*
4288@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289 {
4290 unsigned long random_factor = 0UL;
4291
4292+#ifdef CONFIG_PAX_RANDMMAP
4293+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4294+#endif
4295+
4296 /* 8 bits of randomness in 20 address space bits */
4297 if ((current->flags & PF_RANDOMIZE) &&
4298 !(current->personality & ADDR_NO_RANDOMIZE))
4299@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4300
4301 if (mmap_is_legacy()) {
4302 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4303+
4304+#ifdef CONFIG_PAX_RANDMMAP
4305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4306+ mm->mmap_base += mm->delta_mmap;
4307+#endif
4308+
4309 mm->get_unmapped_area = arch_get_unmapped_area;
4310 } else {
4311 mm->mmap_base = mmap_base(random_factor);
4312+
4313+#ifdef CONFIG_PAX_RANDMMAP
4314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4315+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4316+#endif
4317+
4318 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4319 }
4320 }
4321diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4322index 4e6ef89..21c27f2 100644
4323--- a/arch/arm/mm/mmu.c
4324+++ b/arch/arm/mm/mmu.c
4325@@ -41,6 +41,22 @@
4326 #include "mm.h"
4327 #include "tcm.h"
4328
4329+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4330+void modify_domain(unsigned int dom, unsigned int type)
4331+{
4332+ struct thread_info *thread = current_thread_info();
4333+ unsigned int domain = thread->cpu_domain;
4334+ /*
4335+ * DOMAIN_MANAGER might be defined to some other value,
4336+ * use the arch-defined constant
4337+ */
4338+ domain &= ~domain_val(dom, 3);
4339+ thread->cpu_domain = domain | domain_val(dom, type);
4340+ set_domain(thread->cpu_domain);
4341+}
4342+EXPORT_SYMBOL(modify_domain);
4343+#endif
4344+
4345 /*
4346 * empty_zero_page is a special page that is used for
4347 * zero-initialized data and COW.
4348@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4349 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4350 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4351
4352-static struct mem_type mem_types[] = {
4353+#ifdef CONFIG_PAX_KERNEXEC
4354+#define L_PTE_KERNEXEC L_PTE_RDONLY
4355+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4356+#else
4357+#define L_PTE_KERNEXEC L_PTE_DIRTY
4358+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4359+#endif
4360+
4361+static struct mem_type mem_types[] __read_only = {
4362 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4363 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4364 L_PTE_SHARED,
4365@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4366 .prot_sect = PROT_SECT_DEVICE,
4367 .domain = DOMAIN_IO,
4368 },
4369- [MT_UNCACHED] = {
4370+ [MT_UNCACHED_RW] = {
4371 .prot_pte = PROT_PTE_DEVICE,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4374 .domain = DOMAIN_IO,
4375 },
4376- [MT_CACHECLEAN] = {
4377- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4378+ [MT_CACHECLEAN_RO] = {
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382 #ifndef CONFIG_ARM_LPAE
4383- [MT_MINICLEAN] = {
4384- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4385+ [MT_MINICLEAN_RO] = {
4386+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4387 .domain = DOMAIN_KERNEL,
4388 },
4389 #endif
4390@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4391 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4392 L_PTE_RDONLY,
4393 .prot_l1 = PMD_TYPE_TABLE,
4394- .domain = DOMAIN_USER,
4395+ .domain = DOMAIN_VECTORS,
4396 },
4397 [MT_HIGH_VECTORS] = {
4398 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4399 L_PTE_USER | L_PTE_RDONLY,
4400 .prot_l1 = PMD_TYPE_TABLE,
4401- .domain = DOMAIN_USER,
4402+ .domain = DOMAIN_VECTORS,
4403 },
4404- [MT_MEMORY_RWX] = {
4405+ [__MT_MEMORY_RWX] = {
4406 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4407 .prot_l1 = PMD_TYPE_TABLE,
4408 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4409@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411 .domain = DOMAIN_KERNEL,
4412 },
4413- [MT_ROM] = {
4414- .prot_sect = PMD_TYPE_SECT,
4415+ [MT_MEMORY_RX] = {
4416+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4417+ .prot_l1 = PMD_TYPE_TABLE,
4418+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4419+ .domain = DOMAIN_KERNEL,
4420+ },
4421+ [MT_ROM_RX] = {
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425- [MT_MEMORY_RWX_NONCACHED] = {
4426+ [MT_MEMORY_RW_NONCACHED] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428 L_PTE_MT_BUFFERABLE,
4429 .prot_l1 = PMD_TYPE_TABLE,
4430 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4431 .domain = DOMAIN_KERNEL,
4432 },
4433+ [MT_MEMORY_RX_NONCACHED] = {
4434+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4435+ L_PTE_MT_BUFFERABLE,
4436+ .prot_l1 = PMD_TYPE_TABLE,
4437+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438+ .domain = DOMAIN_KERNEL,
4439+ },
4440 [MT_MEMORY_RW_DTCM] = {
4441 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4442 L_PTE_XN,
4443@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4444 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4445 .domain = DOMAIN_KERNEL,
4446 },
4447- [MT_MEMORY_RWX_ITCM] = {
4448- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4449+ [MT_MEMORY_RX_ITCM] = {
4450+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4451 .prot_l1 = PMD_TYPE_TABLE,
4452+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4453 .domain = DOMAIN_KERNEL,
4454 },
4455 [MT_MEMORY_RW_SO] = {
4456@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4457 * Mark cache clean areas and XIP ROM read only
4458 * from SVC mode and no access from userspace.
4459 */
4460- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4461- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4462- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464+#ifdef CONFIG_PAX_KERNEXEC
4465+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4467+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+#endif
4469+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 #endif
4472
4473 /*
4474@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4475 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4476 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4477 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4478- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4479- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4480+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4483 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4484+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4485+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4493 }
4494 }
4495
4496@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4497 if (cpu_arch >= CPU_ARCH_ARMv6) {
4498 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4499 /* Non-cacheable Normal is XCB = 001 */
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4501+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4502+ PMD_SECT_BUFFERED;
4503+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4504 PMD_SECT_BUFFERED;
4505 } else {
4506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4507- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4508+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4509+ PMD_SECT_TEX(1);
4510+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4511 PMD_SECT_TEX(1);
4512 }
4513 } else {
4514- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4515+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517 }
4518
4519 #ifdef CONFIG_ARM_LPAE
4520@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4521 user_pgprot |= PTE_EXT_PXN;
4522 #endif
4523
4524+ user_pgprot |= __supported_pte_mask;
4525+
4526 for (i = 0; i < 16; i++) {
4527 pteval_t v = pgprot_val(protection_map[i]);
4528 protection_map[i] = __pgprot(v | user_pgprot);
4529@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4530
4531 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4532 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4533- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4534- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4535+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4538 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4539+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4540+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4542- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4543- mem_types[MT_ROM].prot_sect |= cp->pmd;
4544+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4545+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4547
4548 switch (cp->pmd) {
4549 case PMD_SECT_WT:
4550- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4551+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4552 break;
4553 case PMD_SECT_WB:
4554 case PMD_SECT_WBWA:
4555- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4556+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4557 break;
4558 }
4559 pr_info("Memory policy: %sData cache %s\n",
4560@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4561 return;
4562 }
4563
4564- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4565+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4566 md->virtual >= PAGE_OFFSET &&
4567 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4568 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4569@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4570 * called function. This means you can't use any function or debugging
4571 * method which may touch any device, otherwise the kernel _will_ crash.
4572 */
4573+
4574+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4575+
4576 static void __init devicemaps_init(const struct machine_desc *mdesc)
4577 {
4578 struct map_desc map;
4579 unsigned long addr;
4580- void *vectors;
4581
4582- /*
4583- * Allocate the vector page early.
4584- */
4585- vectors = early_alloc(PAGE_SIZE * 2);
4586-
4587- early_trap_init(vectors);
4588+ early_trap_init(&vectors);
4589
4590 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4591 pmd_clear(pmd_off_k(addr));
4592@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4593 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4594 map.virtual = MODULES_VADDR;
4595 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4596- map.type = MT_ROM;
4597+ map.type = MT_ROM_RX;
4598 create_mapping(&map);
4599 #endif
4600
4601@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4602 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4603 map.virtual = FLUSH_BASE;
4604 map.length = SZ_1M;
4605- map.type = MT_CACHECLEAN;
4606+ map.type = MT_CACHECLEAN_RO;
4607 create_mapping(&map);
4608 #endif
4609 #ifdef FLUSH_BASE_MINICACHE
4610 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4611 map.virtual = FLUSH_BASE_MINICACHE;
4612 map.length = SZ_1M;
4613- map.type = MT_MINICLEAN;
4614+ map.type = MT_MINICLEAN_RO;
4615 create_mapping(&map);
4616 #endif
4617
4618@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4619 * location (0xffff0000). If we aren't using high-vectors, also
4620 * create a mapping at the low-vectors virtual address.
4621 */
4622- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4623+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4624 map.virtual = 0xffff0000;
4625 map.length = PAGE_SIZE;
4626 #ifdef CONFIG_KUSER_HELPERS
4627@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4628 static void __init map_lowmem(void)
4629 {
4630 struct memblock_region *reg;
4631+#ifndef CONFIG_PAX_KERNEXEC
4632 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4633 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4634+#endif
4635
4636 /* Map all the lowmem memory banks. */
4637 for_each_memblock(memory, reg) {
4638@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4639 if (start >= end)
4640 break;
4641
4642+#ifdef CONFIG_PAX_KERNEXEC
4643+ map.pfn = __phys_to_pfn(start);
4644+ map.virtual = __phys_to_virt(start);
4645+ map.length = end - start;
4646+
4647+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4648+ struct map_desc kernel;
4649+ struct map_desc initmap;
4650+
4651+ /* when freeing initmem we will make this RW */
4652+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4653+ initmap.virtual = (unsigned long)__init_begin;
4654+ initmap.length = _sdata - __init_begin;
4655+ initmap.type = __MT_MEMORY_RWX;
4656+ create_mapping(&initmap);
4657+
4658+ /* when freeing initmem we will make this RX */
4659+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4660+ kernel.virtual = (unsigned long)_stext;
4661+ kernel.length = __init_begin - _stext;
4662+ kernel.type = __MT_MEMORY_RWX;
4663+ create_mapping(&kernel);
4664+
4665+ if (map.virtual < (unsigned long)_stext) {
4666+ map.length = (unsigned long)_stext - map.virtual;
4667+ map.type = __MT_MEMORY_RWX;
4668+ create_mapping(&map);
4669+ }
4670+
4671+ map.pfn = __phys_to_pfn(__pa(_sdata));
4672+ map.virtual = (unsigned long)_sdata;
4673+ map.length = end - __pa(_sdata);
4674+ }
4675+
4676+ map.type = MT_MEMORY_RW;
4677+ create_mapping(&map);
4678+#else
4679 if (end < kernel_x_start) {
4680 map.pfn = __phys_to_pfn(start);
4681 map.virtual = __phys_to_virt(start);
4682 map.length = end - start;
4683- map.type = MT_MEMORY_RWX;
4684+ map.type = __MT_MEMORY_RWX;
4685
4686 create_mapping(&map);
4687 } else if (start >= kernel_x_end) {
4688@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4689 map.pfn = __phys_to_pfn(kernel_x_start);
4690 map.virtual = __phys_to_virt(kernel_x_start);
4691 map.length = kernel_x_end - kernel_x_start;
4692- map.type = MT_MEMORY_RWX;
4693+ map.type = __MT_MEMORY_RWX;
4694
4695 create_mapping(&map);
4696
4697@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4698 create_mapping(&map);
4699 }
4700 }
4701+#endif
4702 }
4703 }
4704
4705diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4706index e1268f9..a9755a7 100644
4707--- a/arch/arm/net/bpf_jit_32.c
4708+++ b/arch/arm/net/bpf_jit_32.c
4709@@ -20,6 +20,7 @@
4710 #include <asm/cacheflush.h>
4711 #include <asm/hwcap.h>
4712 #include <asm/opcodes.h>
4713+#include <asm/pgtable.h>
4714
4715 #include "bpf_jit_32.h"
4716
4717@@ -71,7 +72,11 @@ struct jit_ctx {
4718 #endif
4719 };
4720
4721+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4722+int bpf_jit_enable __read_only;
4723+#else
4724 int bpf_jit_enable __read_mostly;
4725+#endif
4726
4727 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4728 {
4729@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4730 {
4731 u32 *ptr;
4732 /* We are guaranteed to have aligned memory. */
4733+ pax_open_kernel();
4734 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4735 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4736+ pax_close_kernel();
4737 }
4738
4739 static void build_prologue(struct jit_ctx *ctx)
4740diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4741index 5b217f4..c23f40e 100644
4742--- a/arch/arm/plat-iop/setup.c
4743+++ b/arch/arm/plat-iop/setup.c
4744@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4745 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4746 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4747 .length = IOP3XX_PERIPHERAL_SIZE,
4748- .type = MT_UNCACHED,
4749+ .type = MT_UNCACHED_RW,
4750 },
4751 };
4752
4753diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4754index a5bc92d..0bb4730 100644
4755--- a/arch/arm/plat-omap/sram.c
4756+++ b/arch/arm/plat-omap/sram.c
4757@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4758 * Looks like we need to preserve some bootloader code at the
4759 * beginning of SRAM for jumping to flash for reboot to work...
4760 */
4761+ pax_open_kernel();
4762 memset_io(omap_sram_base + omap_sram_skip, 0,
4763 omap_sram_size - omap_sram_skip);
4764+ pax_close_kernel();
4765 }
4766diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4767index 7047051..44e8675 100644
4768--- a/arch/arm64/include/asm/atomic.h
4769+++ b/arch/arm64/include/asm/atomic.h
4770@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 #endif
4785 #endif
4786diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4787index a5abb00..9cbca9a 100644
4788--- a/arch/arm64/include/asm/barrier.h
4789+++ b/arch/arm64/include/asm/barrier.h
4790@@ -44,7 +44,7 @@
4791 do { \
4792 compiletime_assert_atomic_type(*p); \
4793 barrier(); \
4794- ACCESS_ONCE(*p) = (v); \
4795+ ACCESS_ONCE_RW(*p) = (v); \
4796 } while (0)
4797
4798 #define smp_load_acquire(p) \
4799diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4800index 4fde8c1..441f84f 100644
4801--- a/arch/arm64/include/asm/percpu.h
4802+++ b/arch/arm64/include/asm/percpu.h
4803@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4804 {
4805 switch (size) {
4806 case 1:
4807- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4808+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4809 break;
4810 case 2:
4811- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4812+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4813 break;
4814 case 4:
4815- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4816+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4817 break;
4818 case 8:
4819- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4820+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4821 break;
4822 default:
4823 BUILD_BUG();
4824diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4825index e20df38..027ede3 100644
4826--- a/arch/arm64/include/asm/pgalloc.h
4827+++ b/arch/arm64/include/asm/pgalloc.h
4828@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4830 }
4831
4832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4833+{
4834+ pud_populate(mm, pud, pmd);
4835+}
4836+
4837 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4838
4839 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4840diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4841index 07e1ba44..ec8cbbb 100644
4842--- a/arch/arm64/include/asm/uaccess.h
4843+++ b/arch/arm64/include/asm/uaccess.h
4844@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4845 flag; \
4846 })
4847
4848+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4849 #define access_ok(type, addr, size) __range_ok(addr, size)
4850 #define user_addr_max get_fs
4851
4852diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4853index ef7d112..08cd35f 100644
4854--- a/arch/arm64/mm/dma-mapping.c
4855+++ b/arch/arm64/mm/dma-mapping.c
4856@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4857 phys_to_page(paddr),
4858 size >> PAGE_SHIFT);
4859 if (!freed)
4860- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4861+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4862 }
4863
4864 static void *__dma_alloc(struct device *dev, size_t size,
4865diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4866index c3a58a1..78fbf54 100644
4867--- a/arch/avr32/include/asm/cache.h
4868+++ b/arch/avr32/include/asm/cache.h
4869@@ -1,8 +1,10 @@
4870 #ifndef __ASM_AVR32_CACHE_H
4871 #define __ASM_AVR32_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define L1_CACHE_SHIFT 5
4876-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4877+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4878
4879 /*
4880 * Memory returned by kmalloc() may be used for DMA, so we must make
4881diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4882index d232888..87c8df1 100644
4883--- a/arch/avr32/include/asm/elf.h
4884+++ b/arch/avr32/include/asm/elf.h
4885@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4886 the loader. We need to make sure that it is out of the way of the program
4887 that it will "exec", and that there is sufficient room for the brk. */
4888
4889-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4890+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4891
4892+#ifdef CONFIG_PAX_ASLR
4893+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4894+
4895+#define PAX_DELTA_MMAP_LEN 15
4896+#define PAX_DELTA_STACK_LEN 15
4897+#endif
4898
4899 /* This yields a mask that user programs can use to figure out what
4900 instruction set this CPU supports. This could be done in user space,
4901diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4902index 479330b..53717a8 100644
4903--- a/arch/avr32/include/asm/kmap_types.h
4904+++ b/arch/avr32/include/asm/kmap_types.h
4905@@ -2,9 +2,9 @@
4906 #define __ASM_AVR32_KMAP_TYPES_H
4907
4908 #ifdef CONFIG_DEBUG_HIGHMEM
4909-# define KM_TYPE_NR 29
4910+# define KM_TYPE_NR 30
4911 #else
4912-# define KM_TYPE_NR 14
4913+# define KM_TYPE_NR 15
4914 #endif
4915
4916 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4917diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4918index d223a8b..69c5210 100644
4919--- a/arch/avr32/mm/fault.c
4920+++ b/arch/avr32/mm/fault.c
4921@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4922
4923 int exception_trace = 1;
4924
4925+#ifdef CONFIG_PAX_PAGEEXEC
4926+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4927+{
4928+ unsigned long i;
4929+
4930+ printk(KERN_ERR "PAX: bytes at PC: ");
4931+ for (i = 0; i < 20; i++) {
4932+ unsigned char c;
4933+ if (get_user(c, (unsigned char *)pc+i))
4934+ printk(KERN_CONT "???????? ");
4935+ else
4936+ printk(KERN_CONT "%02x ", c);
4937+ }
4938+ printk("\n");
4939+}
4940+#endif
4941+
4942 /*
4943 * This routine handles page faults. It determines the address and the
4944 * problem, and then passes it off to one of the appropriate routines.
4945@@ -178,6 +195,16 @@ bad_area:
4946 up_read(&mm->mmap_sem);
4947
4948 if (user_mode(regs)) {
4949+
4950+#ifdef CONFIG_PAX_PAGEEXEC
4951+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4952+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4953+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4954+ do_group_exit(SIGKILL);
4955+ }
4956+ }
4957+#endif
4958+
4959 if (exception_trace && printk_ratelimit())
4960 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4961 "sp %08lx ecr %lu\n",
4962diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4963index 568885a..f8008df 100644
4964--- a/arch/blackfin/include/asm/cache.h
4965+++ b/arch/blackfin/include/asm/cache.h
4966@@ -7,6 +7,7 @@
4967 #ifndef __ARCH_BLACKFIN_CACHE_H
4968 #define __ARCH_BLACKFIN_CACHE_H
4969
4970+#include <linux/const.h>
4971 #include <linux/linkage.h> /* for asmlinkage */
4972
4973 /*
4974@@ -14,7 +15,7 @@
4975 * Blackfin loads 32 bytes for cache
4976 */
4977 #define L1_CACHE_SHIFT 5
4978-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4979+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4980 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4981
4982 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4983diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4984index aea2718..3639a60 100644
4985--- a/arch/cris/include/arch-v10/arch/cache.h
4986+++ b/arch/cris/include/arch-v10/arch/cache.h
4987@@ -1,8 +1,9 @@
4988 #ifndef _ASM_ARCH_CACHE_H
4989 #define _ASM_ARCH_CACHE_H
4990
4991+#include <linux/const.h>
4992 /* Etrax 100LX have 32-byte cache-lines. */
4993-#define L1_CACHE_BYTES 32
4994 #define L1_CACHE_SHIFT 5
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #endif /* _ASM_ARCH_CACHE_H */
4998diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4999index 7caf25d..ee65ac5 100644
5000--- a/arch/cris/include/arch-v32/arch/cache.h
5001+++ b/arch/cris/include/arch-v32/arch/cache.h
5002@@ -1,11 +1,12 @@
5003 #ifndef _ASM_CRIS_ARCH_CACHE_H
5004 #define _ASM_CRIS_ARCH_CACHE_H
5005
5006+#include <linux/const.h>
5007 #include <arch/hwregs/dma.h>
5008
5009 /* A cache-line is 32 bytes. */
5010-#define L1_CACHE_BYTES 32
5011 #define L1_CACHE_SHIFT 5
5012+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5013
5014 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5015
5016diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5017index 102190a..5334cea 100644
5018--- a/arch/frv/include/asm/atomic.h
5019+++ b/arch/frv/include/asm/atomic.h
5020@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5021 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5022 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5023
5024+#define atomic64_read_unchecked(v) atomic64_read(v)
5025+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5026+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5027+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5028+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5029+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5030+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5031+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5032+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5033+
5034 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5035 {
5036 int c, old;
5037diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5038index 2797163..c2a401df9 100644
5039--- a/arch/frv/include/asm/cache.h
5040+++ b/arch/frv/include/asm/cache.h
5041@@ -12,10 +12,11 @@
5042 #ifndef __ASM_CACHE_H
5043 #define __ASM_CACHE_H
5044
5045+#include <linux/const.h>
5046
5047 /* bytes per L1 cache line */
5048 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5051
5052 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5053 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5054diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5055index 43901f2..0d8b865 100644
5056--- a/arch/frv/include/asm/kmap_types.h
5057+++ b/arch/frv/include/asm/kmap_types.h
5058@@ -2,6 +2,6 @@
5059 #ifndef _ASM_KMAP_TYPES_H
5060 #define _ASM_KMAP_TYPES_H
5061
5062-#define KM_TYPE_NR 17
5063+#define KM_TYPE_NR 18
5064
5065 #endif
5066diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5067index 836f147..4cf23f5 100644
5068--- a/arch/frv/mm/elf-fdpic.c
5069+++ b/arch/frv/mm/elf-fdpic.c
5070@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5071 {
5072 struct vm_area_struct *vma;
5073 struct vm_unmapped_area_info info;
5074+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5075
5076 if (len > TASK_SIZE)
5077 return -ENOMEM;
5078@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5079 if (addr) {
5080 addr = PAGE_ALIGN(addr);
5081 vma = find_vma(current->mm, addr);
5082- if (TASK_SIZE - len >= addr &&
5083- (!vma || addr + len <= vma->vm_start))
5084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5085 goto success;
5086 }
5087
5088@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5089 info.high_limit = (current->mm->start_stack - 0x00200000);
5090 info.align_mask = 0;
5091 info.align_offset = 0;
5092+ info.threadstack_offset = offset;
5093 addr = vm_unmapped_area(&info);
5094 if (!(addr & ~PAGE_MASK))
5095 goto success;
5096diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5097index 69952c1..4fa2908 100644
5098--- a/arch/hexagon/include/asm/cache.h
5099+++ b/arch/hexagon/include/asm/cache.h
5100@@ -21,9 +21,11 @@
5101 #ifndef __ASM_CACHE_H
5102 #define __ASM_CACHE_H
5103
5104+#include <linux/const.h>
5105+
5106 /* Bytes per L1 cache line */
5107-#define L1_CACHE_SHIFT (5)
5108-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5109+#define L1_CACHE_SHIFT 5
5110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5111
5112 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5113
5114diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5115index 074e52b..76afdac 100644
5116--- a/arch/ia64/Kconfig
5117+++ b/arch/ia64/Kconfig
5118@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5119 config KEXEC
5120 bool "kexec system call"
5121 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5122+ depends on !GRKERNSEC_KMEM
5123 help
5124 kexec is a system call that implements the ability to shutdown your
5125 current kernel, and to start another kernel. It is like a reboot
5126diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5127index 970d0bd..e750b9b 100644
5128--- a/arch/ia64/Makefile
5129+++ b/arch/ia64/Makefile
5130@@ -98,5 +98,6 @@ endef
5131 archprepare: make_nr_irqs_h FORCE
5132 PHONY += make_nr_irqs_h FORCE
5133
5134+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5135 make_nr_irqs_h: FORCE
5136 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5137diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5138index 0bf0350..2ad1957 100644
5139--- a/arch/ia64/include/asm/atomic.h
5140+++ b/arch/ia64/include/asm/atomic.h
5141@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5142 #define atomic64_inc(v) atomic64_add(1, (v))
5143 #define atomic64_dec(v) atomic64_sub(1, (v))
5144
5145+#define atomic64_read_unchecked(v) atomic64_read(v)
5146+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5147+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5148+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5149+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5150+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5151+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5152+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5153+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5154+
5155 #endif /* _ASM_IA64_ATOMIC_H */
5156diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5157index f6769eb..1cdb590 100644
5158--- a/arch/ia64/include/asm/barrier.h
5159+++ b/arch/ia64/include/asm/barrier.h
5160@@ -66,7 +66,7 @@
5161 do { \
5162 compiletime_assert_atomic_type(*p); \
5163 barrier(); \
5164- ACCESS_ONCE(*p) = (v); \
5165+ ACCESS_ONCE_RW(*p) = (v); \
5166 } while (0)
5167
5168 #define smp_load_acquire(p) \
5169diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5170index 988254a..e1ee885 100644
5171--- a/arch/ia64/include/asm/cache.h
5172+++ b/arch/ia64/include/asm/cache.h
5173@@ -1,6 +1,7 @@
5174 #ifndef _ASM_IA64_CACHE_H
5175 #define _ASM_IA64_CACHE_H
5176
5177+#include <linux/const.h>
5178
5179 /*
5180 * Copyright (C) 1998-2000 Hewlett-Packard Co
5181@@ -9,7 +10,7 @@
5182
5183 /* Bytes per L1 (data) cache line. */
5184 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5187
5188 #ifdef CONFIG_SMP
5189 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5190diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5191index 5a83c5c..4d7f553 100644
5192--- a/arch/ia64/include/asm/elf.h
5193+++ b/arch/ia64/include/asm/elf.h
5194@@ -42,6 +42,13 @@
5195 */
5196 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5197
5198+#ifdef CONFIG_PAX_ASLR
5199+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5200+
5201+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5202+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5203+#endif
5204+
5205 #define PT_IA_64_UNWIND 0x70000001
5206
5207 /* IA-64 relocations: */
5208diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5209index 5767cdf..7462574 100644
5210--- a/arch/ia64/include/asm/pgalloc.h
5211+++ b/arch/ia64/include/asm/pgalloc.h
5212@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5213 pgd_val(*pgd_entry) = __pa(pud);
5214 }
5215
5216+static inline void
5217+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5218+{
5219+ pgd_populate(mm, pgd_entry, pud);
5220+}
5221+
5222 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5223 {
5224 return quicklist_alloc(0, GFP_KERNEL, NULL);
5225@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5226 pud_val(*pud_entry) = __pa(pmd);
5227 }
5228
5229+static inline void
5230+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5231+{
5232+ pud_populate(mm, pud_entry, pmd);
5233+}
5234+
5235 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5236 {
5237 return quicklist_alloc(0, GFP_KERNEL, NULL);
5238diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5239index 7b6f880..ac8e008 100644
5240--- a/arch/ia64/include/asm/pgtable.h
5241+++ b/arch/ia64/include/asm/pgtable.h
5242@@ -12,7 +12,7 @@
5243 * David Mosberger-Tang <davidm@hpl.hp.com>
5244 */
5245
5246-
5247+#include <linux/const.h>
5248 #include <asm/mman.h>
5249 #include <asm/page.h>
5250 #include <asm/processor.h>
5251@@ -139,6 +139,17 @@
5252 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5253 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5254 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5258+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5259+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5260+#else
5261+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5262+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5263+# define PAGE_COPY_NOEXEC PAGE_COPY
5264+#endif
5265+
5266 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5267 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5268 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5269diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5270index 45698cd..e8e2dbc 100644
5271--- a/arch/ia64/include/asm/spinlock.h
5272+++ b/arch/ia64/include/asm/spinlock.h
5273@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5274 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5275
5276 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5277- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5278+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5279 }
5280
5281 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5282diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5283index 4f3fb6cc..254055e 100644
5284--- a/arch/ia64/include/asm/uaccess.h
5285+++ b/arch/ia64/include/asm/uaccess.h
5286@@ -70,6 +70,7 @@
5287 && ((segment).seg == KERNEL_DS.seg \
5288 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5289 })
5290+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5291 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5292
5293 /*
5294@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5295 static inline unsigned long
5296 __copy_to_user (void __user *to, const void *from, unsigned long count)
5297 {
5298+ if (count > INT_MAX)
5299+ return count;
5300+
5301+ if (!__builtin_constant_p(count))
5302+ check_object_size(from, count, true);
5303+
5304 return __copy_user(to, (__force void __user *) from, count);
5305 }
5306
5307 static inline unsigned long
5308 __copy_from_user (void *to, const void __user *from, unsigned long count)
5309 {
5310+ if (count > INT_MAX)
5311+ return count;
5312+
5313+ if (!__builtin_constant_p(count))
5314+ check_object_size(to, count, false);
5315+
5316 return __copy_user((__force void __user *) to, from, count);
5317 }
5318
5319@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5320 ({ \
5321 void __user *__cu_to = (to); \
5322 const void *__cu_from = (from); \
5323- long __cu_len = (n); \
5324+ unsigned long __cu_len = (n); \
5325 \
5326- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5327+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5328+ if (!__builtin_constant_p(n)) \
5329+ check_object_size(__cu_from, __cu_len, true); \
5330 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5331+ } \
5332 __cu_len; \
5333 })
5334
5335@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5336 ({ \
5337 void *__cu_to = (to); \
5338 const void __user *__cu_from = (from); \
5339- long __cu_len = (n); \
5340+ unsigned long __cu_len = (n); \
5341 \
5342 __chk_user_ptr(__cu_from); \
5343- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5344+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5345+ if (!__builtin_constant_p(n)) \
5346+ check_object_size(__cu_to, __cu_len, false); \
5347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5348+ } \
5349 __cu_len; \
5350 })
5351
5352diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5353index 29754aa..06d2838 100644
5354--- a/arch/ia64/kernel/module.c
5355+++ b/arch/ia64/kernel/module.c
5356@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5357 }
5358
5359 static inline int
5360+in_init_rx (const struct module *mod, uint64_t addr)
5361+{
5362+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5363+}
5364+
5365+static inline int
5366+in_init_rw (const struct module *mod, uint64_t addr)
5367+{
5368+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5369+}
5370+
5371+static inline int
5372 in_init (const struct module *mod, uint64_t addr)
5373 {
5374- return addr - (uint64_t) mod->module_init < mod->init_size;
5375+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5376+}
5377+
5378+static inline int
5379+in_core_rx (const struct module *mod, uint64_t addr)
5380+{
5381+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5382+}
5383+
5384+static inline int
5385+in_core_rw (const struct module *mod, uint64_t addr)
5386+{
5387+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5388 }
5389
5390 static inline int
5391 in_core (const struct module *mod, uint64_t addr)
5392 {
5393- return addr - (uint64_t) mod->module_core < mod->core_size;
5394+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5395 }
5396
5397 static inline int
5398@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5399 break;
5400
5401 case RV_BDREL:
5402- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5403+ if (in_init_rx(mod, val))
5404+ val -= (uint64_t) mod->module_init_rx;
5405+ else if (in_init_rw(mod, val))
5406+ val -= (uint64_t) mod->module_init_rw;
5407+ else if (in_core_rx(mod, val))
5408+ val -= (uint64_t) mod->module_core_rx;
5409+ else if (in_core_rw(mod, val))
5410+ val -= (uint64_t) mod->module_core_rw;
5411 break;
5412
5413 case RV_LTV:
5414@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5415 * addresses have been selected...
5416 */
5417 uint64_t gp;
5418- if (mod->core_size > MAX_LTOFF)
5419+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5420 /*
5421 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5422 * at the end of the module.
5423 */
5424- gp = mod->core_size - MAX_LTOFF / 2;
5425+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5426 else
5427- gp = mod->core_size / 2;
5428- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5429+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5430+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5431 mod->arch.gp = gp;
5432 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5433 }
5434diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5435index c39c3cd..3c77738 100644
5436--- a/arch/ia64/kernel/palinfo.c
5437+++ b/arch/ia64/kernel/palinfo.c
5438@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5439 return NOTIFY_OK;
5440 }
5441
5442-static struct notifier_block __refdata palinfo_cpu_notifier =
5443+static struct notifier_block palinfo_cpu_notifier =
5444 {
5445 .notifier_call = palinfo_cpu_callback,
5446 .priority = 0,
5447diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5448index 41e33f8..65180b2a 100644
5449--- a/arch/ia64/kernel/sys_ia64.c
5450+++ b/arch/ia64/kernel/sys_ia64.c
5451@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5452 unsigned long align_mask = 0;
5453 struct mm_struct *mm = current->mm;
5454 struct vm_unmapped_area_info info;
5455+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5456
5457 if (len > RGN_MAP_LIMIT)
5458 return -ENOMEM;
5459@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5460 if (REGION_NUMBER(addr) == RGN_HPAGE)
5461 addr = 0;
5462 #endif
5463+
5464+#ifdef CONFIG_PAX_RANDMMAP
5465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5466+ addr = mm->free_area_cache;
5467+ else
5468+#endif
5469+
5470 if (!addr)
5471 addr = TASK_UNMAPPED_BASE;
5472
5473@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5474 info.high_limit = TASK_SIZE;
5475 info.align_mask = align_mask;
5476 info.align_offset = 0;
5477+ info.threadstack_offset = offset;
5478 return vm_unmapped_area(&info);
5479 }
5480
5481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5482index 84f8a52..7c76178 100644
5483--- a/arch/ia64/kernel/vmlinux.lds.S
5484+++ b/arch/ia64/kernel/vmlinux.lds.S
5485@@ -192,7 +192,7 @@ SECTIONS {
5486 /* Per-cpu data: */
5487 . = ALIGN(PERCPU_PAGE_SIZE);
5488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5489- __phys_per_cpu_start = __per_cpu_load;
5490+ __phys_per_cpu_start = per_cpu_load;
5491 /*
5492 * ensure percpu data fits
5493 * into percpu page size
5494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5495index ba5ba7a..36e9d3a 100644
5496--- a/arch/ia64/mm/fault.c
5497+++ b/arch/ia64/mm/fault.c
5498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5499 return pte_present(pte);
5500 }
5501
5502+#ifdef CONFIG_PAX_PAGEEXEC
5503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5504+{
5505+ unsigned long i;
5506+
5507+ printk(KERN_ERR "PAX: bytes at PC: ");
5508+ for (i = 0; i < 8; i++) {
5509+ unsigned int c;
5510+ if (get_user(c, (unsigned int *)pc+i))
5511+ printk(KERN_CONT "???????? ");
5512+ else
5513+ printk(KERN_CONT "%08x ", c);
5514+ }
5515+ printk("\n");
5516+}
5517+#endif
5518+
5519 # define VM_READ_BIT 0
5520 # define VM_WRITE_BIT 1
5521 # define VM_EXEC_BIT 2
5522@@ -151,8 +168,21 @@ retry:
5523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5524 goto bad_area;
5525
5526- if ((vma->vm_flags & mask) != mask)
5527+ if ((vma->vm_flags & mask) != mask) {
5528+
5529+#ifdef CONFIG_PAX_PAGEEXEC
5530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5532+ goto bad_area;
5533+
5534+ up_read(&mm->mmap_sem);
5535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5536+ do_group_exit(SIGKILL);
5537+ }
5538+#endif
5539+
5540 goto bad_area;
5541+ }
5542
5543 /*
5544 * If for any reason at all we couldn't handle the fault, make
5545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5546index 52b7604b..455cb85 100644
5547--- a/arch/ia64/mm/hugetlbpage.c
5548+++ b/arch/ia64/mm/hugetlbpage.c
5549@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5550 unsigned long pgoff, unsigned long flags)
5551 {
5552 struct vm_unmapped_area_info info;
5553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5554
5555 if (len > RGN_MAP_LIMIT)
5556 return -ENOMEM;
5557@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5558 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5559 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5560 info.align_offset = 0;
5561+ info.threadstack_offset = offset;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5566index 6b33457..88b5124 100644
5567--- a/arch/ia64/mm/init.c
5568+++ b/arch/ia64/mm/init.c
5569@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5570 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5571 vma->vm_end = vma->vm_start + PAGE_SIZE;
5572 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5573+
5574+#ifdef CONFIG_PAX_PAGEEXEC
5575+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5576+ vma->vm_flags &= ~VM_EXEC;
5577+
5578+#ifdef CONFIG_PAX_MPROTECT
5579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5580+ vma->vm_flags &= ~VM_MAYEXEC;
5581+#endif
5582+
5583+ }
5584+#endif
5585+
5586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5587 down_write(&current->mm->mmap_sem);
5588 if (insert_vm_struct(current->mm, vma)) {
5589@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5590 gate_vma.vm_start = FIXADDR_USER_START;
5591 gate_vma.vm_end = FIXADDR_USER_END;
5592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5593- gate_vma.vm_page_prot = __P101;
5594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5595
5596 return 0;
5597 }
5598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5599index 40b3ee98..8c2c112 100644
5600--- a/arch/m32r/include/asm/cache.h
5601+++ b/arch/m32r/include/asm/cache.h
5602@@ -1,8 +1,10 @@
5603 #ifndef _ASM_M32R_CACHE_H
5604 #define _ASM_M32R_CACHE_H
5605
5606+#include <linux/const.h>
5607+
5608 /* L1 cache line size */
5609 #define L1_CACHE_SHIFT 4
5610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5612
5613 #endif /* _ASM_M32R_CACHE_H */
5614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5615index 82abd15..d95ae5d 100644
5616--- a/arch/m32r/lib/usercopy.c
5617+++ b/arch/m32r/lib/usercopy.c
5618@@ -14,6 +14,9 @@
5619 unsigned long
5620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5621 {
5622+ if ((long)n < 0)
5623+ return n;
5624+
5625 prefetch(from);
5626 if (access_ok(VERIFY_WRITE, to, n))
5627 __copy_user(to,from,n);
5628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5629 unsigned long
5630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5631 {
5632+ if ((long)n < 0)
5633+ return n;
5634+
5635 prefetchw(to);
5636 if (access_ok(VERIFY_READ, from, n))
5637 __copy_user_zeroing(to,from,n);
5638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5639index 0395c51..5f26031 100644
5640--- a/arch/m68k/include/asm/cache.h
5641+++ b/arch/m68k/include/asm/cache.h
5642@@ -4,9 +4,11 @@
5643 #ifndef __ARCH_M68K_CACHE_H
5644 #define __ARCH_M68K_CACHE_H
5645
5646+#include <linux/const.h>
5647+
5648 /* bytes per L1 cache line */
5649 #define L1_CACHE_SHIFT 4
5650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5652
5653 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5654
5655diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5656index d703d8e..a8e2d70 100644
5657--- a/arch/metag/include/asm/barrier.h
5658+++ b/arch/metag/include/asm/barrier.h
5659@@ -90,7 +90,7 @@ static inline void fence(void)
5660 do { \
5661 compiletime_assert_atomic_type(*p); \
5662 smp_mb(); \
5663- ACCESS_ONCE(*p) = (v); \
5664+ ACCESS_ONCE_RW(*p) = (v); \
5665 } while (0)
5666
5667 #define smp_load_acquire(p) \
5668diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5669index 7ca80ac..794ba72 100644
5670--- a/arch/metag/mm/hugetlbpage.c
5671+++ b/arch/metag/mm/hugetlbpage.c
5672@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5673 info.high_limit = TASK_SIZE;
5674 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5675 info.align_offset = 0;
5676+ info.threadstack_offset = 0;
5677 return vm_unmapped_area(&info);
5678 }
5679
5680diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5681index 4efe96a..60e8699 100644
5682--- a/arch/microblaze/include/asm/cache.h
5683+++ b/arch/microblaze/include/asm/cache.h
5684@@ -13,11 +13,12 @@
5685 #ifndef _ASM_MICROBLAZE_CACHE_H
5686 #define _ASM_MICROBLAZE_CACHE_H
5687
5688+#include <linux/const.h>
5689 #include <asm/registers.h>
5690
5691 #define L1_CACHE_SHIFT 5
5692 /* word-granular cache in microblaze */
5693-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5694+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5695
5696 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5697
5698diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5699index c7a1690..28c24b6 100644
5700--- a/arch/mips/Kconfig
5701+++ b/arch/mips/Kconfig
5702@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5703
5704 config KEXEC
5705 bool "Kexec system call"
5706+ depends on !GRKERNSEC_KMEM
5707 help
5708 kexec is a system call that implements the ability to shutdown your
5709 current kernel, and to start another kernel. It is like a reboot
5710diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5711index 7d89878..57c55b7 100644
5712--- a/arch/mips/cavium-octeon/dma-octeon.c
5713+++ b/arch/mips/cavium-octeon/dma-octeon.c
5714@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5715 if (dma_release_from_coherent(dev, order, vaddr))
5716 return;
5717
5718- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5719+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5720 }
5721
5722 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5723diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5724index 26d4363..3c9a82e 100644
5725--- a/arch/mips/include/asm/atomic.h
5726+++ b/arch/mips/include/asm/atomic.h
5727@@ -22,15 +22,39 @@
5728 #include <asm/cmpxchg.h>
5729 #include <asm/war.h>
5730
5731+#ifdef CONFIG_GENERIC_ATOMIC64
5732+#include <asm-generic/atomic64.h>
5733+#endif
5734+
5735 #define ATOMIC_INIT(i) { (i) }
5736
5737+#ifdef CONFIG_64BIT
5738+#define _ASM_EXTABLE(from, to) \
5739+" .section __ex_table,\"a\"\n" \
5740+" .dword " #from ", " #to"\n" \
5741+" .previous\n"
5742+#else
5743+#define _ASM_EXTABLE(from, to) \
5744+" .section __ex_table,\"a\"\n" \
5745+" .word " #from ", " #to"\n" \
5746+" .previous\n"
5747+#endif
5748+
5749 /*
5750 * atomic_read - read atomic variable
5751 * @v: pointer of type atomic_t
5752 *
5753 * Atomically reads the value of @v.
5754 */
5755-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5756+static inline int atomic_read(const atomic_t *v)
5757+{
5758+ return ACCESS_ONCE(v->counter);
5759+}
5760+
5761+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5762+{
5763+ return ACCESS_ONCE(v->counter);
5764+}
5765
5766 /*
5767 * atomic_set - set atomic variable
5768@@ -39,47 +63,77 @@
5769 *
5770 * Atomically sets the value of @v to @i.
5771 */
5772-#define atomic_set(v, i) ((v)->counter = (i))
5773+static inline void atomic_set(atomic_t *v, int i)
5774+{
5775+ v->counter = i;
5776+}
5777
5778-#define ATOMIC_OP(op, c_op, asm_op) \
5779-static __inline__ void atomic_##op(int i, atomic_t * v) \
5780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5781+{
5782+ v->counter = i;
5783+}
5784+
5785+#ifdef CONFIG_PAX_REFCOUNT
5786+#define __OVERFLOW_POST \
5787+ " b 4f \n" \
5788+ " .set noreorder \n" \
5789+ "3: b 5f \n" \
5790+ " move %0, %1 \n" \
5791+ " .set reorder \n"
5792+#define __OVERFLOW_EXTABLE \
5793+ "3:\n" \
5794+ _ASM_EXTABLE(2b, 3b)
5795+#else
5796+#define __OVERFLOW_POST
5797+#define __OVERFLOW_EXTABLE
5798+#endif
5799+
5800+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5801+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5802 { \
5803 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5804 int temp; \
5805 \
5806 __asm__ __volatile__( \
5807- " .set arch=r4000 \n" \
5808- "1: ll %0, %1 # atomic_" #op " \n" \
5809- " " #asm_op " %0, %2 \n" \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813 " sc %0, %1 \n" \
5814 " beqzl %0, 1b \n" \
5815+ extable \
5816 " .set mips0 \n" \
5817 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5818 : "Ir" (i)); \
5819 } else if (kernel_uses_llsc) { \
5820 int temp; \
5821 \
5822- do { \
5823- __asm__ __volatile__( \
5824- " .set "MIPS_ISA_LEVEL" \n" \
5825- " ll %0, %1 # atomic_" #op "\n" \
5826- " " #asm_op " %0, %2 \n" \
5827- " sc %0, %1 \n" \
5828- " .set mips0 \n" \
5829- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5830- : "Ir" (i)); \
5831- } while (unlikely(!temp)); \
5832+ __asm__ __volatile__( \
5833+ " .set "MIPS_ISA_LEVEL" \n" \
5834+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5835+ "2: " #asm_op " %0, %2 \n" \
5836+ " sc %0, %1 \n" \
5837+ " beqz %0, 1b \n" \
5838+ extable \
5839+ " .set mips0 \n" \
5840+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5841+ : "Ir" (i)); \
5842 } else { \
5843 unsigned long flags; \
5844 \
5845 raw_local_irq_save(flags); \
5846- v->counter c_op i; \
5847+ __asm__ __volatile__( \
5848+ "2: " #asm_op " %0, %1 \n" \
5849+ extable \
5850+ : "+r" (v->counter) : "Ir" (i)); \
5851 raw_local_irq_restore(flags); \
5852 } \
5853 }
5854
5855-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5856-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5857+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5858+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5859+
5860+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5861+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5862 { \
5863 int result; \
5864 \
5865@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5866 int temp; \
5867 \
5868 __asm__ __volatile__( \
5869- " .set arch=r4000 \n" \
5870- "1: ll %1, %2 # atomic_" #op "_return \n" \
5871- " " #asm_op " %0, %1, %3 \n" \
5872+ " .set mips3 \n" \
5873+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5874+ "2: " #asm_op " %0, %1, %3 \n" \
5875 " sc %0, %2 \n" \
5876 " beqzl %0, 1b \n" \
5877- " " #asm_op " %0, %1, %3 \n" \
5878+ post_op \
5879+ extable \
5880+ "4: " #asm_op " %0, %1, %3 \n" \
5881+ "5: \n" \
5882 " .set mips0 \n" \
5883 : "=&r" (result), "=&r" (temp), \
5884 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5885@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5886 } else if (kernel_uses_llsc) { \
5887 int temp; \
5888 \
5889- do { \
5890- __asm__ __volatile__( \
5891- " .set "MIPS_ISA_LEVEL" \n" \
5892- " ll %1, %2 # atomic_" #op "_return \n" \
5893- " " #asm_op " %0, %1, %3 \n" \
5894- " sc %0, %2 \n" \
5895- " .set mips0 \n" \
5896- : "=&r" (result), "=&r" (temp), \
5897- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5898- : "Ir" (i)); \
5899- } while (unlikely(!result)); \
5900+ __asm__ __volatile__( \
5901+ " .set "MIPS_ISA_LEVEL" \n" \
5902+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5903+ "2: " #asm_op " %0, %1, %3 \n" \
5904+ " sc %0, %2 \n" \
5905+ post_op \
5906+ extable \
5907+ "4: " #asm_op " %0, %1, %3 \n" \
5908+ "5: \n" \
5909+ " .set mips0 \n" \
5910+ : "=&r" (result), "=&r" (temp), \
5911+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5912+ : "Ir" (i)); \
5913 \
5914 result = temp; result c_op i; \
5915 } else { \
5916 unsigned long flags; \
5917 \
5918 raw_local_irq_save(flags); \
5919- result = v->counter; \
5920- result c_op i; \
5921- v->counter = result; \
5922+ __asm__ __volatile__( \
5923+ " lw %0, %1 \n" \
5924+ "2: " #asm_op " %0, %1, %2 \n" \
5925+ " sw %0, %1 \n" \
5926+ "3: \n" \
5927+ extable \
5928+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5929+ : "Ir" (i)); \
5930 raw_local_irq_restore(flags); \
5931 } \
5932 \
5933@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5934 return result; \
5935 }
5936
5937-#define ATOMIC_OPS(op, c_op, asm_op) \
5938- ATOMIC_OP(op, c_op, asm_op) \
5939- ATOMIC_OP_RETURN(op, c_op, asm_op)
5940+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5941+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5942
5943-ATOMIC_OPS(add, +=, addu)
5944-ATOMIC_OPS(sub, -=, subu)
5945+#define ATOMIC_OPS(op, asm_op) \
5946+ ATOMIC_OP(op, asm_op) \
5947+ ATOMIC_OP_RETURN(op, asm_op)
5948+
5949+ATOMIC_OPS(add, add)
5950+ATOMIC_OPS(sub, sub)
5951
5952 #undef ATOMIC_OPS
5953 #undef ATOMIC_OP_RETURN
5954+#undef __ATOMIC_OP_RETURN
5955 #undef ATOMIC_OP
5956+#undef __ATOMIC_OP
5957
5958 /*
5959 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5960@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5961 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5962 * The function returns the old value of @v minus @i.
5963 */
5964-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5965+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5966 {
5967 int result;
5968
5969@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5970 int temp;
5971
5972 __asm__ __volatile__(
5973- " .set arch=r4000 \n"
5974+ " .set "MIPS_ISA_LEVEL" \n"
5975 "1: ll %1, %2 # atomic_sub_if_positive\n"
5976 " subu %0, %1, %3 \n"
5977 " bltz %0, 1f \n"
5978@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5979 return result;
5980 }
5981
5982-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5983-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5984+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5985+{
5986+ return cmpxchg(&v->counter, old, new);
5987+}
5988+
5989+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5990+ int new)
5991+{
5992+ return cmpxchg(&(v->counter), old, new);
5993+}
5994+
5995+static inline int atomic_xchg(atomic_t *v, int new)
5996+{
5997+ return xchg(&v->counter, new);
5998+}
5999+
6000+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6001+{
6002+ return xchg(&(v->counter), new);
6003+}
6004
6005 /**
6006 * __atomic_add_unless - add unless the number is a given value
6007@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6008
6009 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6010 #define atomic_inc_return(v) atomic_add_return(1, (v))
6011+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6012+{
6013+ return atomic_add_return_unchecked(1, v);
6014+}
6015
6016 /*
6017 * atomic_sub_and_test - subtract value from variable and test result
6018@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6019 * other cases.
6020 */
6021 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6022+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6023+{
6024+ return atomic_add_return_unchecked(1, v) == 0;
6025+}
6026
6027 /*
6028 * atomic_dec_and_test - decrement by 1 and test
6029@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6030 * Atomically increments @v by 1.
6031 */
6032 #define atomic_inc(v) atomic_add(1, (v))
6033+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6034+{
6035+ atomic_add_unchecked(1, v);
6036+}
6037
6038 /*
6039 * atomic_dec - decrement and test
6040@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6041 * Atomically decrements @v by 1.
6042 */
6043 #define atomic_dec(v) atomic_sub(1, (v))
6044+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6045+{
6046+ atomic_sub_unchecked(1, v);
6047+}
6048
6049 /*
6050 * atomic_add_negative - add and test if negative
6051@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6052 * @v: pointer of type atomic64_t
6053 *
6054 */
6055-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6056+static inline long atomic64_read(const atomic64_t *v)
6057+{
6058+ return ACCESS_ONCE(v->counter);
6059+}
6060+
6061+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6062+{
6063+ return ACCESS_ONCE(v->counter);
6064+}
6065
6066 /*
6067 * atomic64_set - set atomic variable
6068 * @v: pointer of type atomic64_t
6069 * @i: required value
6070 */
6071-#define atomic64_set(v, i) ((v)->counter = (i))
6072+static inline void atomic64_set(atomic64_t *v, long i)
6073+{
6074+ v->counter = i;
6075+}
6076
6077-#define ATOMIC64_OP(op, c_op, asm_op) \
6078-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6079+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6080+{
6081+ v->counter = i;
6082+}
6083+
6084+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6085+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6086 { \
6087 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6088 long temp; \
6089 \
6090 __asm__ __volatile__( \
6091- " .set arch=r4000 \n" \
6092- "1: lld %0, %1 # atomic64_" #op " \n" \
6093- " " #asm_op " %0, %2 \n" \
6094+ " .set "MIPS_ISA_LEVEL" \n" \
6095+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6096+ "2: " #asm_op " %0, %2 \n" \
6097 " scd %0, %1 \n" \
6098 " beqzl %0, 1b \n" \
6099+ extable \
6100 " .set mips0 \n" \
6101 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6102 : "Ir" (i)); \
6103 } else if (kernel_uses_llsc) { \
6104 long temp; \
6105 \
6106- do { \
6107- __asm__ __volatile__( \
6108- " .set "MIPS_ISA_LEVEL" \n" \
6109- " lld %0, %1 # atomic64_" #op "\n" \
6110- " " #asm_op " %0, %2 \n" \
6111- " scd %0, %1 \n" \
6112- " .set mips0 \n" \
6113- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6114- : "Ir" (i)); \
6115- } while (unlikely(!temp)); \
6116+ __asm__ __volatile__( \
6117+ " .set "MIPS_ISA_LEVEL" \n" \
6118+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6119+ "2: " #asm_op " %0, %2 \n" \
6120+ " scd %0, %1 \n" \
6121+ " beqz %0, 1b \n" \
6122+ extable \
6123+ " .set mips0 \n" \
6124+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6125+ : "Ir" (i)); \
6126 } else { \
6127 unsigned long flags; \
6128 \
6129 raw_local_irq_save(flags); \
6130- v->counter c_op i; \
6131+ __asm__ __volatile__( \
6132+ "2: " #asm_op " %0, %1 \n" \
6133+ extable \
6134+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6135 raw_local_irq_restore(flags); \
6136 } \
6137 }
6138
6139-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6140-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6141+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6142+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6143+
6144+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6145+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6146 { \
6147 long result; \
6148 \
6149@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6150 long temp; \
6151 \
6152 __asm__ __volatile__( \
6153- " .set arch=r4000 \n" \
6154+ " .set mips3 \n" \
6155 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6156- " " #asm_op " %0, %1, %3 \n" \
6157+ "2: " #asm_op " %0, %1, %3 \n" \
6158 " scd %0, %2 \n" \
6159 " beqzl %0, 1b \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161+ post_op \
6162+ extable \
6163+ "4: " #asm_op " %0, %1, %3 \n" \
6164+ "5: \n" \
6165 " .set mips0 \n" \
6166 : "=&r" (result), "=&r" (temp), \
6167 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6168@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6169 } else if (kernel_uses_llsc) { \
6170 long temp; \
6171 \
6172- do { \
6173- __asm__ __volatile__( \
6174- " .set "MIPS_ISA_LEVEL" \n" \
6175- " lld %1, %2 # atomic64_" #op "_return\n" \
6176- " " #asm_op " %0, %1, %3 \n" \
6177- " scd %0, %2 \n" \
6178- " .set mips0 \n" \
6179- : "=&r" (result), "=&r" (temp), \
6180- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6181- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6182- : "memory"); \
6183- } while (unlikely(!result)); \
6184+ __asm__ __volatile__( \
6185+ " .set "MIPS_ISA_LEVEL" \n" \
6186+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6187+ "2: " #asm_op " %0, %1, %3 \n" \
6188+ " scd %0, %2 \n" \
6189+ " beqz %0, 1b \n" \
6190+ post_op \
6191+ extable \
6192+ "4: " #asm_op " %0, %1, %3 \n" \
6193+ "5: \n" \
6194+ " .set mips0 \n" \
6195+ : "=&r" (result), "=&r" (temp), \
6196+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6197+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6198+ : "memory"); \
6199 \
6200 result = temp; result c_op i; \
6201 } else { \
6202 unsigned long flags; \
6203 \
6204 raw_local_irq_save(flags); \
6205- result = v->counter; \
6206- result c_op i; \
6207- v->counter = result; \
6208+ __asm__ __volatile__( \
6209+ " ld %0, %1 \n" \
6210+ "2: " #asm_op " %0, %1, %2 \n" \
6211+ " sd %0, %1 \n" \
6212+ "3: \n" \
6213+ extable \
6214+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6215+ : "Ir" (i)); \
6216 raw_local_irq_restore(flags); \
6217 } \
6218 \
6219@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6220 return result; \
6221 }
6222
6223-#define ATOMIC64_OPS(op, c_op, asm_op) \
6224- ATOMIC64_OP(op, c_op, asm_op) \
6225- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6226+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6227+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6228
6229-ATOMIC64_OPS(add, +=, daddu)
6230-ATOMIC64_OPS(sub, -=, dsubu)
6231+#define ATOMIC64_OPS(op, asm_op) \
6232+ ATOMIC64_OP(op, asm_op) \
6233+ ATOMIC64_OP_RETURN(op, asm_op)
6234+
6235+ATOMIC64_OPS(add, dadd)
6236+ATOMIC64_OPS(sub, dsub)
6237
6238 #undef ATOMIC64_OPS
6239 #undef ATOMIC64_OP_RETURN
6240+#undef __ATOMIC64_OP_RETURN
6241 #undef ATOMIC64_OP
6242+#undef __ATOMIC64_OP
6243+#undef __OVERFLOW_EXTABLE
6244+#undef __OVERFLOW_POST
6245
6246 /*
6247 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6248@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6249 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6250 * The function returns the old value of @v minus @i.
6251 */
6252-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6253+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6254 {
6255 long result;
6256
6257@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6258 long temp;
6259
6260 __asm__ __volatile__(
6261- " .set arch=r4000 \n"
6262+ " .set "MIPS_ISA_LEVEL" \n"
6263 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6264 " dsubu %0, %1, %3 \n"
6265 " bltz %0, 1f \n"
6266@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6267 return result;
6268 }
6269
6270-#define atomic64_cmpxchg(v, o, n) \
6271- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6272-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6273+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6274+{
6275+ return cmpxchg(&v->counter, old, new);
6276+}
6277+
6278+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6279+ long new)
6280+{
6281+ return cmpxchg(&(v->counter), old, new);
6282+}
6283+
6284+static inline long atomic64_xchg(atomic64_t *v, long new)
6285+{
6286+ return xchg(&v->counter, new);
6287+}
6288+
6289+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6290+{
6291+ return xchg(&(v->counter), new);
6292+}
6293
6294 /**
6295 * atomic64_add_unless - add unless the number is a given value
6296@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6297
6298 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6299 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6300+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6301
6302 /*
6303 * atomic64_sub_and_test - subtract value from variable and test result
6304@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305 * other cases.
6306 */
6307 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6308+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6309
6310 /*
6311 * atomic64_dec_and_test - decrement by 1 and test
6312@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * Atomically increments @v by 1.
6314 */
6315 #define atomic64_inc(v) atomic64_add(1, (v))
6316+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6317
6318 /*
6319 * atomic64_dec - decrement and test
6320@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically decrements @v by 1.
6322 */
6323 #define atomic64_dec(v) atomic64_sub(1, (v))
6324+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_add_negative - add and test if negative
6328diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6329index 2b8bbbc..4556df6 100644
6330--- a/arch/mips/include/asm/barrier.h
6331+++ b/arch/mips/include/asm/barrier.h
6332@@ -133,7 +133,7 @@
6333 do { \
6334 compiletime_assert_atomic_type(*p); \
6335 smp_mb(); \
6336- ACCESS_ONCE(*p) = (v); \
6337+ ACCESS_ONCE_RW(*p) = (v); \
6338 } while (0)
6339
6340 #define smp_load_acquire(p) \
6341diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6342index b4db69f..8f3b093 100644
6343--- a/arch/mips/include/asm/cache.h
6344+++ b/arch/mips/include/asm/cache.h
6345@@ -9,10 +9,11 @@
6346 #ifndef _ASM_CACHE_H
6347 #define _ASM_CACHE_H
6348
6349+#include <linux/const.h>
6350 #include <kmalloc.h>
6351
6352 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6355
6356 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6357 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6358diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6359index 535f196..2ab029e 100644
6360--- a/arch/mips/include/asm/elf.h
6361+++ b/arch/mips/include/asm/elf.h
6362@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6363 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6364 #endif
6365
6366+#ifdef CONFIG_PAX_ASLR
6367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6368+
6369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6371+#endif
6372+
6373 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6374 struct linux_binprm;
6375 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6376 int uses_interp);
6377
6378-struct mm_struct;
6379-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6380-#define arch_randomize_brk arch_randomize_brk
6381-
6382 struct arch_elf_state {
6383 int fp_abi;
6384 int interp_fp_abi;
6385diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6386index c1f6afa..38cc6e9 100644
6387--- a/arch/mips/include/asm/exec.h
6388+++ b/arch/mips/include/asm/exec.h
6389@@ -12,6 +12,6 @@
6390 #ifndef _ASM_EXEC_H
6391 #define _ASM_EXEC_H
6392
6393-extern unsigned long arch_align_stack(unsigned long sp);
6394+#define arch_align_stack(x) ((x) & ~0xfUL)
6395
6396 #endif /* _ASM_EXEC_H */
6397diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6398index 9e8ef59..1139d6b 100644
6399--- a/arch/mips/include/asm/hw_irq.h
6400+++ b/arch/mips/include/asm/hw_irq.h
6401@@ -10,7 +10,7 @@
6402
6403 #include <linux/atomic.h>
6404
6405-extern atomic_t irq_err_count;
6406+extern atomic_unchecked_t irq_err_count;
6407
6408 /*
6409 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6410diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6411index 8feaed6..1bd8a64 100644
6412--- a/arch/mips/include/asm/local.h
6413+++ b/arch/mips/include/asm/local.h
6414@@ -13,15 +13,25 @@ typedef struct
6415 atomic_long_t a;
6416 } local_t;
6417
6418+typedef struct {
6419+ atomic_long_unchecked_t a;
6420+} local_unchecked_t;
6421+
6422 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6423
6424 #define local_read(l) atomic_long_read(&(l)->a)
6425+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6426 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6427+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6428
6429 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6430+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6431 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6432+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6433 #define local_inc(l) atomic_long_inc(&(l)->a)
6434+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6435 #define local_dec(l) atomic_long_dec(&(l)->a)
6436+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6437
6438 /*
6439 * Same as above, but return the result value
6440@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6441 return result;
6442 }
6443
6444+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6445+{
6446+ unsigned long result;
6447+
6448+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6449+ unsigned long temp;
6450+
6451+ __asm__ __volatile__(
6452+ " .set mips3 \n"
6453+ "1:" __LL "%1, %2 # local_add_return \n"
6454+ " addu %0, %1, %3 \n"
6455+ __SC "%0, %2 \n"
6456+ " beqzl %0, 1b \n"
6457+ " addu %0, %1, %3 \n"
6458+ " .set mips0 \n"
6459+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6460+ : "Ir" (i), "m" (l->a.counter)
6461+ : "memory");
6462+ } else if (kernel_uses_llsc) {
6463+ unsigned long temp;
6464+
6465+ __asm__ __volatile__(
6466+ " .set mips3 \n"
6467+ "1:" __LL "%1, %2 # local_add_return \n"
6468+ " addu %0, %1, %3 \n"
6469+ __SC "%0, %2 \n"
6470+ " beqz %0, 1b \n"
6471+ " addu %0, %1, %3 \n"
6472+ " .set mips0 \n"
6473+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6474+ : "Ir" (i), "m" (l->a.counter)
6475+ : "memory");
6476+ } else {
6477+ unsigned long flags;
6478+
6479+ local_irq_save(flags);
6480+ result = l->a.counter;
6481+ result += i;
6482+ l->a.counter = result;
6483+ local_irq_restore(flags);
6484+ }
6485+
6486+ return result;
6487+}
6488+
6489 static __inline__ long local_sub_return(long i, local_t * l)
6490 {
6491 unsigned long result;
6492@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6493
6494 #define local_cmpxchg(l, o, n) \
6495 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6496+#define local_cmpxchg_unchecked(l, o, n) \
6497+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6499
6500 /**
6501diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6502index 154b70a..426ae3d 100644
6503--- a/arch/mips/include/asm/page.h
6504+++ b/arch/mips/include/asm/page.h
6505@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6506 #ifdef CONFIG_CPU_MIPS32
6507 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6508 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6509- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6510+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6511 #else
6512 typedef struct { unsigned long long pte; } pte_t;
6513 #define pte_val(x) ((x).pte)
6514diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6515index b336037..5b874cc 100644
6516--- a/arch/mips/include/asm/pgalloc.h
6517+++ b/arch/mips/include/asm/pgalloc.h
6518@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6519 {
6520 set_pud(pud, __pud((unsigned long)pmd));
6521 }
6522+
6523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6524+{
6525+ pud_populate(mm, pud, pmd);
6526+}
6527 #endif
6528
6529 /*
6530diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6531index bef782c..d99df93 100644
6532--- a/arch/mips/include/asm/pgtable.h
6533+++ b/arch/mips/include/asm/pgtable.h
6534@@ -20,6 +20,9 @@
6535 #include <asm/io.h>
6536 #include <asm/pgtable-bits.h>
6537
6538+#define ktla_ktva(addr) (addr)
6539+#define ktva_ktla(addr) (addr)
6540+
6541 struct mm_struct;
6542 struct vm_area_struct;
6543
6544diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6545index 55ed660..3dc9422 100644
6546--- a/arch/mips/include/asm/thread_info.h
6547+++ b/arch/mips/include/asm/thread_info.h
6548@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6549 #define TIF_SECCOMP 4 /* secure computing */
6550 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6551 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6552+/* li takes a 32bit immediate */
6553+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6554+
6555 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6556 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6557 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6558@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6559 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6560 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6561 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6562+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6563
6564 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6565 _TIF_SYSCALL_AUDIT | \
6566- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6567+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6568+ _TIF_GRSEC_SETXID)
6569
6570 /* work to do in syscall_trace_leave() */
6571 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do on interrupt/exception return */
6576 #define _TIF_WORK_MASK \
6577@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6578 /* work to do on any return to u-space */
6579 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6580 _TIF_WORK_SYSCALL_EXIT | \
6581- _TIF_SYSCALL_TRACEPOINT)
6582+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6583
6584 /*
6585 * We stash processor id into a COP0 register to retrieve it fast
6586diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6587index bf8b324..cec5705 100644
6588--- a/arch/mips/include/asm/uaccess.h
6589+++ b/arch/mips/include/asm/uaccess.h
6590@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6591 __ok == 0; \
6592 })
6593
6594+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6595 #define access_ok(type, addr, size) \
6596 likely(__access_ok((addr), (size), __access_mask))
6597
6598diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6599index 1188e00..41cf144 100644
6600--- a/arch/mips/kernel/binfmt_elfn32.c
6601+++ b/arch/mips/kernel/binfmt_elfn32.c
6602@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6603 #undef ELF_ET_DYN_BASE
6604 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6605
6606+#ifdef CONFIG_PAX_ASLR
6607+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6608+
6609+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6610+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6611+#endif
6612+
6613 #include <asm/processor.h>
6614 #include <linux/module.h>
6615 #include <linux/elfcore.h>
6616diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6617index 9287678..f870e47 100644
6618--- a/arch/mips/kernel/binfmt_elfo32.c
6619+++ b/arch/mips/kernel/binfmt_elfo32.c
6620@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6621 #undef ELF_ET_DYN_BASE
6622 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6623
6624+#ifdef CONFIG_PAX_ASLR
6625+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6626+
6627+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6628+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6629+#endif
6630+
6631 #include <asm/processor.h>
6632
6633 #include <linux/module.h>
6634diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6635index a74ec3a..4f06f18 100644
6636--- a/arch/mips/kernel/i8259.c
6637+++ b/arch/mips/kernel/i8259.c
6638@@ -202,7 +202,7 @@ spurious_8259A_irq:
6639 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6640 spurious_irq_mask |= irqmask;
6641 }
6642- atomic_inc(&irq_err_count);
6643+ atomic_inc_unchecked(&irq_err_count);
6644 /*
6645 * Theoretically we do not have to handle this IRQ,
6646 * but in Linux this does not cause problems and is
6647diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6648index 44a1f79..2bd6aa3 100644
6649--- a/arch/mips/kernel/irq-gt641xx.c
6650+++ b/arch/mips/kernel/irq-gt641xx.c
6651@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6652 }
6653 }
6654
6655- atomic_inc(&irq_err_count);
6656+ atomic_inc_unchecked(&irq_err_count);
6657 }
6658
6659 void __init gt641xx_irq_init(void)
6660diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6661index d2bfbc2..a8eacd2 100644
6662--- a/arch/mips/kernel/irq.c
6663+++ b/arch/mips/kernel/irq.c
6664@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6665 printk("unexpected IRQ # %d\n", irq);
6666 }
6667
6668-atomic_t irq_err_count;
6669+atomic_unchecked_t irq_err_count;
6670
6671 int arch_show_interrupts(struct seq_file *p, int prec)
6672 {
6673- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6674+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6675 return 0;
6676 }
6677
6678 asmlinkage void spurious_interrupt(void)
6679 {
6680- atomic_inc(&irq_err_count);
6681+ atomic_inc_unchecked(&irq_err_count);
6682 }
6683
6684 void __init init_IRQ(void)
6685@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6686 #endif
6687 }
6688
6689+
6690 #ifdef DEBUG_STACKOVERFLOW
6691+extern void gr_handle_kernel_exploit(void);
6692+
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index c9eccf5..3903621 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 3391d06..c23a2cc 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index e1ffea2..46ed66e 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..371e85c 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9665 */
9666 if (mmap_is_legacy()) {
9667 mm->mmap_base = mmap_base_legacy();
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9671+ mm->mmap_base += mm->delta_mmap;
9672+#endif
9673+
9674 mm->get_unmapped_area = arch_get_unmapped_area;
9675 } else {
9676 mm->mmap_base = mmap_base();
9677+
9678+#ifdef CONFIG_PAX_RANDMMAP
9679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9681+#endif
9682+
9683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9684 }
9685 }
9686@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 */
9688 if (mmap_is_legacy()) {
9689 mm->mmap_base = mmap_base_legacy();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = s390_get_unmapped_area;
9697 } else {
9698 mm->mmap_base = mmap_base();
9699+
9700+#ifdef CONFIG_PAX_RANDMMAP
9701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9702+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9703+#endif
9704+
9705 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9706 }
9707 }
9708diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9709index ae3d59f..f65f075 100644
9710--- a/arch/score/include/asm/cache.h
9711+++ b/arch/score/include/asm/cache.h
9712@@ -1,7 +1,9 @@
9713 #ifndef _ASM_SCORE_CACHE_H
9714 #define _ASM_SCORE_CACHE_H
9715
9716+#include <linux/const.h>
9717+
9718 #define L1_CACHE_SHIFT 4
9719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9721
9722 #endif /* _ASM_SCORE_CACHE_H */
9723diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9724index f9f3cd5..58ff438 100644
9725--- a/arch/score/include/asm/exec.h
9726+++ b/arch/score/include/asm/exec.h
9727@@ -1,6 +1,6 @@
9728 #ifndef _ASM_SCORE_EXEC_H
9729 #define _ASM_SCORE_EXEC_H
9730
9731-extern unsigned long arch_align_stack(unsigned long sp);
9732+#define arch_align_stack(x) (x)
9733
9734 #endif /* _ASM_SCORE_EXEC_H */
9735diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9736index a1519ad3..e8ac1ff 100644
9737--- a/arch/score/kernel/process.c
9738+++ b/arch/score/kernel/process.c
9739@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9740
9741 return task_pt_regs(task)->cp0_epc;
9742 }
9743-
9744-unsigned long arch_align_stack(unsigned long sp)
9745-{
9746- return sp;
9747-}
9748diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9749index ef9e555..331bd29 100644
9750--- a/arch/sh/include/asm/cache.h
9751+++ b/arch/sh/include/asm/cache.h
9752@@ -9,10 +9,11 @@
9753 #define __ASM_SH_CACHE_H
9754 #ifdef __KERNEL__
9755
9756+#include <linux/const.h>
9757 #include <linux/init.h>
9758 #include <cpu/cache.h>
9759
9760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9762
9763 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9764
9765diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9766index 6777177..cb5e44f 100644
9767--- a/arch/sh/mm/mmap.c
9768+++ b/arch/sh/mm/mmap.c
9769@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9770 struct mm_struct *mm = current->mm;
9771 struct vm_area_struct *vma;
9772 int do_colour_align;
9773+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9774 struct vm_unmapped_area_info info;
9775
9776 if (flags & MAP_FIXED) {
9777@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9778 if (filp || (flags & MAP_SHARED))
9779 do_colour_align = 1;
9780
9781+#ifdef CONFIG_PAX_RANDMMAP
9782+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9783+#endif
9784+
9785 if (addr) {
9786 if (do_colour_align)
9787 addr = COLOUR_ALIGN(addr, pgoff);
9788@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9789 addr = PAGE_ALIGN(addr);
9790
9791 vma = find_vma(mm, addr);
9792- if (TASK_SIZE - len >= addr &&
9793- (!vma || addr + len <= vma->vm_start))
9794+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9795 return addr;
9796 }
9797
9798 info.flags = 0;
9799 info.length = len;
9800- info.low_limit = TASK_UNMAPPED_BASE;
9801+ info.low_limit = mm->mmap_base;
9802 info.high_limit = TASK_SIZE;
9803 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9804 info.align_offset = pgoff << PAGE_SHIFT;
9805@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9806 struct mm_struct *mm = current->mm;
9807 unsigned long addr = addr0;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 /* requesting a specific address */
9822 if (addr) {
9823 if (do_colour_align)
9824@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 VM_BUG_ON(addr != -ENOMEM);
9836 info.flags = 0;
9837 info.low_limit = TASK_UNMAPPED_BASE;
9838+
9839+#ifdef CONFIG_PAX_RANDMMAP
9840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9841+ info.low_limit += mm->delta_mmap;
9842+#endif
9843+
9844 info.high_limit = TASK_SIZE;
9845 addr = vm_unmapped_area(&info);
9846 }
9847diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9848index 4082749..fd97781 100644
9849--- a/arch/sparc/include/asm/atomic_64.h
9850+++ b/arch/sparc/include/asm/atomic_64.h
9851@@ -15,18 +15,38 @@
9852 #define ATOMIC64_INIT(i) { (i) }
9853
9854 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9855+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9856+{
9857+ return ACCESS_ONCE(v->counter);
9858+}
9859 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9861+{
9862+ return ACCESS_ONCE(v->counter);
9863+}
9864
9865 #define atomic_set(v, i) (((v)->counter) = i)
9866+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9867+{
9868+ v->counter = i;
9869+}
9870 #define atomic64_set(v, i) (((v)->counter) = i)
9871+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9872+{
9873+ v->counter = i;
9874+}
9875
9876-#define ATOMIC_OP(op) \
9877-void atomic_##op(int, atomic_t *); \
9878-void atomic64_##op(long, atomic64_t *);
9879+#define __ATOMIC_OP(op, suffix) \
9880+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9881+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9882
9883-#define ATOMIC_OP_RETURN(op) \
9884-int atomic_##op##_return(int, atomic_t *); \
9885-long atomic64_##op##_return(long, atomic64_t *);
9886+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9887+
9888+#define __ATOMIC_OP_RETURN(op, suffix) \
9889+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9890+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9891+
9892+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9893
9894 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9895
9896@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9897
9898 #undef ATOMIC_OPS
9899 #undef ATOMIC_OP_RETURN
9900+#undef __ATOMIC_OP_RETURN
9901 #undef ATOMIC_OP
9902+#undef __ATOMIC_OP
9903
9904 #define atomic_dec_return(v) atomic_sub_return(1, v)
9905 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9906
9907 #define atomic_inc_return(v) atomic_add_return(1, v)
9908+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9909+{
9910+ return atomic_add_return_unchecked(1, v);
9911+}
9912 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9913+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914+{
9915+ return atomic64_add_return_unchecked(1, v);
9916+}
9917
9918 /*
9919 * atomic_inc_and_test - increment and test
9920@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9921 * other cases.
9922 */
9923 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9924+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9925+{
9926+ return atomic_inc_return_unchecked(v) == 0;
9927+}
9928 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9929
9930 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9931@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9932 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9933
9934 #define atomic_inc(v) atomic_add(1, v)
9935+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9936+{
9937+ atomic_add_unchecked(1, v);
9938+}
9939 #define atomic64_inc(v) atomic64_add(1, v)
9940+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941+{
9942+ atomic64_add_unchecked(1, v);
9943+}
9944
9945 #define atomic_dec(v) atomic_sub(1, v)
9946+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9947+{
9948+ atomic_sub_unchecked(1, v);
9949+}
9950 #define atomic64_dec(v) atomic64_sub(1, v)
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ atomic64_sub_unchecked(1, v);
9954+}
9955
9956 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9957 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9958
9959 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9960+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9961+{
9962+ return cmpxchg(&v->counter, old, new);
9963+}
9964 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9966+{
9967+ return xchg(&v->counter, new);
9968+}
9969
9970 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9971 {
9972- int c, old;
9973+ int c, old, new;
9974 c = atomic_read(v);
9975 for (;;) {
9976- if (unlikely(c == (u)))
9977+ if (unlikely(c == u))
9978 break;
9979- old = atomic_cmpxchg((v), c, c + (a));
9980+
9981+ asm volatile("addcc %2, %0, %0\n"
9982+
9983+#ifdef CONFIG_PAX_REFCOUNT
9984+ "tvs %%icc, 6\n"
9985+#endif
9986+
9987+ : "=r" (new)
9988+ : "0" (c), "ir" (a)
9989+ : "cc");
9990+
9991+ old = atomic_cmpxchg(v, c, new);
9992 if (likely(old == c))
9993 break;
9994 c = old;
9995@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9996 #define atomic64_cmpxchg(v, o, n) \
9997 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9998 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9999+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10000+{
10001+ return xchg(&v->counter, new);
10002+}
10003
10004 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10005 {
10006- long c, old;
10007+ long c, old, new;
10008 c = atomic64_read(v);
10009 for (;;) {
10010- if (unlikely(c == (u)))
10011+ if (unlikely(c == u))
10012 break;
10013- old = atomic64_cmpxchg((v), c, c + (a));
10014+
10015+ asm volatile("addcc %2, %0, %0\n"
10016+
10017+#ifdef CONFIG_PAX_REFCOUNT
10018+ "tvs %%xcc, 6\n"
10019+#endif
10020+
10021+ : "=r" (new)
10022+ : "0" (c), "ir" (a)
10023+ : "cc");
10024+
10025+ old = atomic64_cmpxchg(v, c, new);
10026 if (likely(old == c))
10027 break;
10028 c = old;
10029 }
10030- return c != (u);
10031+ return c != u;
10032 }
10033
10034 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10035diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10036index 7664894..45a974b 100644
10037--- a/arch/sparc/include/asm/barrier_64.h
10038+++ b/arch/sparc/include/asm/barrier_64.h
10039@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10040 do { \
10041 compiletime_assert_atomic_type(*p); \
10042 barrier(); \
10043- ACCESS_ONCE(*p) = (v); \
10044+ ACCESS_ONCE_RW(*p) = (v); \
10045 } while (0)
10046
10047 #define smp_load_acquire(p) \
10048diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10049index 5bb6991..5c2132e 100644
10050--- a/arch/sparc/include/asm/cache.h
10051+++ b/arch/sparc/include/asm/cache.h
10052@@ -7,10 +7,12 @@
10053 #ifndef _SPARC_CACHE_H
10054 #define _SPARC_CACHE_H
10055
10056+#include <linux/const.h>
10057+
10058 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10059
10060 #define L1_CACHE_SHIFT 5
10061-#define L1_CACHE_BYTES 32
10062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10063
10064 #ifdef CONFIG_SPARC32
10065 #define SMP_CACHE_BYTES_SHIFT 5
10066diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10067index a24e41f..47677ff 100644
10068--- a/arch/sparc/include/asm/elf_32.h
10069+++ b/arch/sparc/include/asm/elf_32.h
10070@@ -114,6 +114,13 @@ typedef struct {
10071
10072 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10073
10074+#ifdef CONFIG_PAX_ASLR
10075+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10076+
10077+#define PAX_DELTA_MMAP_LEN 16
10078+#define PAX_DELTA_STACK_LEN 16
10079+#endif
10080+
10081 /* This yields a mask that user programs can use to figure out what
10082 instruction set this cpu supports. This can NOT be done in userspace
10083 on Sparc. */
10084diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10085index 370ca1e..d4f4a98 100644
10086--- a/arch/sparc/include/asm/elf_64.h
10087+++ b/arch/sparc/include/asm/elf_64.h
10088@@ -189,6 +189,13 @@ typedef struct {
10089 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10090 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10091
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10097+#endif
10098+
10099 extern unsigned long sparc64_elf_hwcap;
10100 #define ELF_HWCAP sparc64_elf_hwcap
10101
10102diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10103index a3890da..f6a408e 100644
10104--- a/arch/sparc/include/asm/pgalloc_32.h
10105+++ b/arch/sparc/include/asm/pgalloc_32.h
10106@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10107 }
10108
10109 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10110+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10111
10112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10113 unsigned long address)
10114diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10115index 5e31871..13469c6 100644
10116--- a/arch/sparc/include/asm/pgalloc_64.h
10117+++ b/arch/sparc/include/asm/pgalloc_64.h
10118@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10119 }
10120
10121 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10122+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10123
10124 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10125 {
10126@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10127 }
10128
10129 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10130+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10131
10132 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10133 {
10134diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10135index 59ba6f6..4518128 100644
10136--- a/arch/sparc/include/asm/pgtable.h
10137+++ b/arch/sparc/include/asm/pgtable.h
10138@@ -5,4 +5,8 @@
10139 #else
10140 #include <asm/pgtable_32.h>
10141 #endif
10142+
10143+#define ktla_ktva(addr) (addr)
10144+#define ktva_ktla(addr) (addr)
10145+
10146 #endif
10147diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10148index f06b36a..bca3189 100644
10149--- a/arch/sparc/include/asm/pgtable_32.h
10150+++ b/arch/sparc/include/asm/pgtable_32.h
10151@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10152 #define PAGE_SHARED SRMMU_PAGE_SHARED
10153 #define PAGE_COPY SRMMU_PAGE_COPY
10154 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10155+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10156+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10157+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10158 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10159
10160 /* Top-level page directory - dummy used by init-mm.
10161@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10162
10163 /* xwr */
10164 #define __P000 PAGE_NONE
10165-#define __P001 PAGE_READONLY
10166-#define __P010 PAGE_COPY
10167-#define __P011 PAGE_COPY
10168+#define __P001 PAGE_READONLY_NOEXEC
10169+#define __P010 PAGE_COPY_NOEXEC
10170+#define __P011 PAGE_COPY_NOEXEC
10171 #define __P100 PAGE_READONLY
10172 #define __P101 PAGE_READONLY
10173 #define __P110 PAGE_COPY
10174 #define __P111 PAGE_COPY
10175
10176 #define __S000 PAGE_NONE
10177-#define __S001 PAGE_READONLY
10178-#define __S010 PAGE_SHARED
10179-#define __S011 PAGE_SHARED
10180+#define __S001 PAGE_READONLY_NOEXEC
10181+#define __S010 PAGE_SHARED_NOEXEC
10182+#define __S011 PAGE_SHARED_NOEXEC
10183 #define __S100 PAGE_READONLY
10184 #define __S101 PAGE_READONLY
10185 #define __S110 PAGE_SHARED
10186diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10187index ae51a11..eadfd03 100644
10188--- a/arch/sparc/include/asm/pgtsrmmu.h
10189+++ b/arch/sparc/include/asm/pgtsrmmu.h
10190@@ -111,6 +111,11 @@
10191 SRMMU_EXEC | SRMMU_REF)
10192 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10193 SRMMU_EXEC | SRMMU_REF)
10194+
10195+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10196+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10197+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10198+
10199 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10200 SRMMU_DIRTY | SRMMU_REF)
10201
10202diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10203index 29d64b1..4272fe8 100644
10204--- a/arch/sparc/include/asm/setup.h
10205+++ b/arch/sparc/include/asm/setup.h
10206@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10207 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10208
10209 /* init_64.c */
10210-extern atomic_t dcpage_flushes;
10211-extern atomic_t dcpage_flushes_xcall;
10212+extern atomic_unchecked_t dcpage_flushes;
10213+extern atomic_unchecked_t dcpage_flushes_xcall;
10214
10215 extern int sysctl_tsb_ratio;
10216 #endif
10217diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10218index 9689176..63c18ea 100644
10219--- a/arch/sparc/include/asm/spinlock_64.h
10220+++ b/arch/sparc/include/asm/spinlock_64.h
10221@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10222
10223 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10224
10225-static void inline arch_read_lock(arch_rwlock_t *lock)
10226+static inline void arch_read_lock(arch_rwlock_t *lock)
10227 {
10228 unsigned long tmp1, tmp2;
10229
10230 __asm__ __volatile__ (
10231 "1: ldsw [%2], %0\n"
10232 " brlz,pn %0, 2f\n"
10233-"4: add %0, 1, %1\n"
10234+"4: addcc %0, 1, %1\n"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+" tvs %%icc, 6\n"
10238+#endif
10239+
10240 " cas [%2], %0, %1\n"
10241 " cmp %0, %1\n"
10242 " bne,pn %%icc, 1b\n"
10243@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10244 " .previous"
10245 : "=&r" (tmp1), "=&r" (tmp2)
10246 : "r" (lock)
10247- : "memory");
10248+ : "memory", "cc");
10249 }
10250
10251-static int inline arch_read_trylock(arch_rwlock_t *lock)
10252+static inline int arch_read_trylock(arch_rwlock_t *lock)
10253 {
10254 int tmp1, tmp2;
10255
10256@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10257 "1: ldsw [%2], %0\n"
10258 " brlz,a,pn %0, 2f\n"
10259 " mov 0, %0\n"
10260-" add %0, 1, %1\n"
10261+" addcc %0, 1, %1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+" tvs %%icc, 6\n"
10265+#endif
10266+
10267 " cas [%2], %0, %1\n"
10268 " cmp %0, %1\n"
10269 " bne,pn %%icc, 1b\n"
10270@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10271 return tmp1;
10272 }
10273
10274-static void inline arch_read_unlock(arch_rwlock_t *lock)
10275+static inline void arch_read_unlock(arch_rwlock_t *lock)
10276 {
10277 unsigned long tmp1, tmp2;
10278
10279 __asm__ __volatile__(
10280 "1: lduw [%2], %0\n"
10281-" sub %0, 1, %1\n"
10282+" subcc %0, 1, %1\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+" tvs %%icc, 6\n"
10286+#endif
10287+
10288 " cas [%2], %0, %1\n"
10289 " cmp %0, %1\n"
10290 " bne,pn %%xcc, 1b\n"
10291@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10292 : "memory");
10293 }
10294
10295-static void inline arch_write_lock(arch_rwlock_t *lock)
10296+static inline void arch_write_lock(arch_rwlock_t *lock)
10297 {
10298 unsigned long mask, tmp1, tmp2;
10299
10300@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10301 : "memory");
10302 }
10303
10304-static void inline arch_write_unlock(arch_rwlock_t *lock)
10305+static inline void arch_write_unlock(arch_rwlock_t *lock)
10306 {
10307 __asm__ __volatile__(
10308 " stw %%g0, [%0]"
10309@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10310 : "memory");
10311 }
10312
10313-static int inline arch_write_trylock(arch_rwlock_t *lock)
10314+static inline int arch_write_trylock(arch_rwlock_t *lock)
10315 {
10316 unsigned long mask, tmp1, tmp2, result;
10317
10318diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10319index fd7bd0a..2e2fa7a 100644
10320--- a/arch/sparc/include/asm/thread_info_32.h
10321+++ b/arch/sparc/include/asm/thread_info_32.h
10322@@ -47,6 +47,7 @@ struct thread_info {
10323 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10324 unsigned long rwbuf_stkptrs[NSWINS];
10325 unsigned long w_saved;
10326+ unsigned long lowest_stack;
10327 };
10328
10329 /*
10330diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10331index ff45516..73001ab 100644
10332--- a/arch/sparc/include/asm/thread_info_64.h
10333+++ b/arch/sparc/include/asm/thread_info_64.h
10334@@ -61,6 +61,8 @@ struct thread_info {
10335 struct pt_regs *kern_una_regs;
10336 unsigned int kern_una_insn;
10337
10338+ unsigned long lowest_stack;
10339+
10340 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10341 __attribute__ ((aligned(64)));
10342 };
10343@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10344 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10345 /* flag bit 4 is available */
10346 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10347-/* flag bit 6 is available */
10348+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10349 #define TIF_32BIT 7 /* 32-bit binary */
10350 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10351 #define TIF_SECCOMP 9 /* secure computing */
10352 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10353 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10354+
10355 /* NOTE: Thread flags >= 12 should be ones we have no interest
10356 * in using in assembly, else we can't use the mask as
10357 * an immediate value in instructions such as andcc.
10358@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10359 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10360 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10361 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10362+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10363
10364 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10365 _TIF_DO_NOTIFY_RESUME_MASK | \
10366 _TIF_NEED_RESCHED)
10367 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10368
10369+#define _TIF_WORK_SYSCALL \
10370+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10371+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10372+
10373 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10374
10375 /*
10376diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10377index bd56c28..4b63d83 100644
10378--- a/arch/sparc/include/asm/uaccess.h
10379+++ b/arch/sparc/include/asm/uaccess.h
10380@@ -1,5 +1,6 @@
10381 #ifndef ___ASM_SPARC_UACCESS_H
10382 #define ___ASM_SPARC_UACCESS_H
10383+
10384 #if defined(__sparc__) && defined(__arch64__)
10385 #include <asm/uaccess_64.h>
10386 #else
10387diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10388index 64ee103..388aef0 100644
10389--- a/arch/sparc/include/asm/uaccess_32.h
10390+++ b/arch/sparc/include/asm/uaccess_32.h
10391@@ -47,6 +47,7 @@
10392 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10393 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10394 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10395+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10396 #define access_ok(type, addr, size) \
10397 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10398
10399@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10400
10401 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10402 {
10403- if (n && __access_ok((unsigned long) to, n))
10404+ if ((long)n < 0)
10405+ return n;
10406+
10407+ if (n && __access_ok((unsigned long) to, n)) {
10408+ if (!__builtin_constant_p(n))
10409+ check_object_size(from, n, true);
10410 return __copy_user(to, (__force void __user *) from, n);
10411- else
10412+ } else
10413 return n;
10414 }
10415
10416 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10417 {
10418+ if ((long)n < 0)
10419+ return n;
10420+
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423+
10424 return __copy_user(to, (__force void __user *) from, n);
10425 }
10426
10427 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) from, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) from, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(to, n, false);
10436 return __copy_user((__force void __user *) to, from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447 return __copy_user((__force void __user *) to, from, n);
10448 }
10449
10450diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10451index a35194b..47dabc0d 100644
10452--- a/arch/sparc/include/asm/uaccess_64.h
10453+++ b/arch/sparc/include/asm/uaccess_64.h
10454@@ -10,6 +10,7 @@
10455 #include <linux/compiler.h>
10456 #include <linux/string.h>
10457 #include <linux/thread_info.h>
10458+#include <linux/kernel.h>
10459 #include <asm/asi.h>
10460 #include <asm/spitfire.h>
10461 #include <asm-generic/uaccess-unaligned.h>
10462@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10463 return 1;
10464 }
10465
10466+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10467+{
10468+ return 1;
10469+}
10470+
10471 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10472 {
10473 return 1;
10474@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10475 static inline unsigned long __must_check
10476 copy_from_user(void *to, const void __user *from, unsigned long size)
10477 {
10478- unsigned long ret = ___copy_from_user(to, from, size);
10479+ unsigned long ret;
10480
10481+ if ((long)size < 0 || size > INT_MAX)
10482+ return size;
10483+
10484+ if (!__builtin_constant_p(size))
10485+ check_object_size(to, size, false);
10486+
10487+ ret = ___copy_from_user(to, from, size);
10488 if (unlikely(ret))
10489 ret = copy_from_user_fixup(to, from, size);
10490
10491@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10492 static inline unsigned long __must_check
10493 copy_to_user(void __user *to, const void *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_to_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(from, size, true);
10503+
10504+ ret = ___copy_to_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_to_user_fixup(to, from, size);
10507 return ret;
10508diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10509index 7cf9c6e..6206648 100644
10510--- a/arch/sparc/kernel/Makefile
10511+++ b/arch/sparc/kernel/Makefile
10512@@ -4,7 +4,7 @@
10513 #
10514
10515 asflags-y := -ansi
10516-ccflags-y := -Werror
10517+#ccflags-y := -Werror
10518
10519 extra-y := head_$(BITS).o
10520
10521diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10522index 50e7b62..79fae35 100644
10523--- a/arch/sparc/kernel/process_32.c
10524+++ b/arch/sparc/kernel/process_32.c
10525@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10526
10527 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10528 r->psr, r->pc, r->npc, r->y, print_tainted());
10529- printk("PC: <%pS>\n", (void *) r->pc);
10530+ printk("PC: <%pA>\n", (void *) r->pc);
10531 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10532 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10533 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10534 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10535 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10536 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10537- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10538+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10539
10540 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10541 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10542@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10543 rw = (struct reg_window32 *) fp;
10544 pc = rw->ins[7];
10545 printk("[%08lx : ", pc);
10546- printk("%pS ] ", (void *) pc);
10547+ printk("%pA ] ", (void *) pc);
10548 fp = rw->ins[6];
10549 } while (++count < 16);
10550 printk("\n");
10551diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10552index 46a5964..a35c62c 100644
10553--- a/arch/sparc/kernel/process_64.c
10554+++ b/arch/sparc/kernel/process_64.c
10555@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10556 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10557 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10558 if (regs->tstate & TSTATE_PRIV)
10559- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10560+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10561 }
10562
10563 void show_regs(struct pt_regs *regs)
10564@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10565
10566 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10567 regs->tpc, regs->tnpc, regs->y, print_tainted());
10568- printk("TPC: <%pS>\n", (void *) regs->tpc);
10569+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10570 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10571 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10572 regs->u_regs[3]);
10573@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10574 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10575 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10576 regs->u_regs[15]);
10577- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10578+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10579 show_regwindow(regs);
10580 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10581 }
10582@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10583 ((tp && tp->task) ? tp->task->pid : -1));
10584
10585 if (gp->tstate & TSTATE_PRIV) {
10586- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10587+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10588 (void *) gp->tpc,
10589 (void *) gp->o7,
10590 (void *) gp->i7,
10591diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10592index 79cc0d1..ec62734 100644
10593--- a/arch/sparc/kernel/prom_common.c
10594+++ b/arch/sparc/kernel/prom_common.c
10595@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10596
10597 unsigned int prom_early_allocated __initdata;
10598
10599-static struct of_pdt_ops prom_sparc_ops __initdata = {
10600+static struct of_pdt_ops prom_sparc_ops __initconst = {
10601 .nextprop = prom_common_nextprop,
10602 .getproplen = prom_getproplen,
10603 .getproperty = prom_getproperty,
10604diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10605index 9ddc492..27a5619 100644
10606--- a/arch/sparc/kernel/ptrace_64.c
10607+++ b/arch/sparc/kernel/ptrace_64.c
10608@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10609 return ret;
10610 }
10611
10612+#ifdef CONFIG_GRKERNSEC_SETXID
10613+extern void gr_delayed_cred_worker(void);
10614+#endif
10615+
10616 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10617 {
10618 int ret = 0;
10619@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10620 if (test_thread_flag(TIF_NOHZ))
10621 user_exit();
10622
10623+#ifdef CONFIG_GRKERNSEC_SETXID
10624+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10625+ gr_delayed_cred_worker();
10626+#endif
10627+
10628 if (test_thread_flag(TIF_SYSCALL_TRACE))
10629 ret = tracehook_report_syscall_entry(regs);
10630
10631@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10632 if (test_thread_flag(TIF_NOHZ))
10633 user_exit();
10634
10635+#ifdef CONFIG_GRKERNSEC_SETXID
10636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10637+ gr_delayed_cred_worker();
10638+#endif
10639+
10640 audit_syscall_exit(regs);
10641
10642 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10643diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10644index 61139d9..c1a5f28 100644
10645--- a/arch/sparc/kernel/smp_64.c
10646+++ b/arch/sparc/kernel/smp_64.c
10647@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10648 return;
10649
10650 #ifdef CONFIG_DEBUG_DCFLUSH
10651- atomic_inc(&dcpage_flushes);
10652+ atomic_inc_unchecked(&dcpage_flushes);
10653 #endif
10654
10655 this_cpu = get_cpu();
10656@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10657 xcall_deliver(data0, __pa(pg_addr),
10658 (u64) pg_addr, cpumask_of(cpu));
10659 #ifdef CONFIG_DEBUG_DCFLUSH
10660- atomic_inc(&dcpage_flushes_xcall);
10661+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10662 #endif
10663 }
10664 }
10665@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10666 preempt_disable();
10667
10668 #ifdef CONFIG_DEBUG_DCFLUSH
10669- atomic_inc(&dcpage_flushes);
10670+ atomic_inc_unchecked(&dcpage_flushes);
10671 #endif
10672 data0 = 0;
10673 pg_addr = page_address(page);
10674@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10675 xcall_deliver(data0, __pa(pg_addr),
10676 (u64) pg_addr, cpu_online_mask);
10677 #ifdef CONFIG_DEBUG_DCFLUSH
10678- atomic_inc(&dcpage_flushes_xcall);
10679+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10680 #endif
10681 }
10682 __local_flush_dcache_page(page);
10683diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10684index 646988d..b88905f 100644
10685--- a/arch/sparc/kernel/sys_sparc_32.c
10686+++ b/arch/sparc/kernel/sys_sparc_32.c
10687@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10688 if (len > TASK_SIZE - PAGE_SIZE)
10689 return -ENOMEM;
10690 if (!addr)
10691- addr = TASK_UNMAPPED_BASE;
10692+ addr = current->mm->mmap_base;
10693
10694 info.flags = 0;
10695 info.length = len;
10696diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10697index 30e7ddb..266a3b0 100644
10698--- a/arch/sparc/kernel/sys_sparc_64.c
10699+++ b/arch/sparc/kernel/sys_sparc_64.c
10700@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 struct vm_area_struct * vma;
10702 unsigned long task_size = TASK_SIZE;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 if (flags & MAP_FIXED) {
10708 /* We do not accept a shared mapping if it would violate
10709 * cache aliasing constraints.
10710 */
10711- if ((flags & MAP_SHARED) &&
10712+ if ((filp || (flags & MAP_SHARED)) &&
10713 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10714 return -EINVAL;
10715 return addr;
10716@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10717 if (filp || (flags & MAP_SHARED))
10718 do_color_align = 1;
10719
10720+#ifdef CONFIG_PAX_RANDMMAP
10721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10722+#endif
10723+
10724 if (addr) {
10725 if (do_color_align)
10726 addr = COLOR_ALIGN(addr, pgoff);
10727@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10728 addr = PAGE_ALIGN(addr);
10729
10730 vma = find_vma(mm, addr);
10731- if (task_size - len >= addr &&
10732- (!vma || addr + len <= vma->vm_start))
10733+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10734 return addr;
10735 }
10736
10737 info.flags = 0;
10738 info.length = len;
10739- info.low_limit = TASK_UNMAPPED_BASE;
10740+ info.low_limit = mm->mmap_base;
10741 info.high_limit = min(task_size, VA_EXCLUDE_START);
10742 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10743 info.align_offset = pgoff << PAGE_SHIFT;
10744+ info.threadstack_offset = offset;
10745 addr = vm_unmapped_area(&info);
10746
10747 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10748 VM_BUG_ON(addr != -ENOMEM);
10749 info.low_limit = VA_EXCLUDE_END;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = task_size;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10760 unsigned long task_size = STACK_TOP32;
10761 unsigned long addr = addr0;
10762 int do_color_align;
10763+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10764 struct vm_unmapped_area_info info;
10765
10766 /* This should only ever run for 32-bit processes. */
10767@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10768 /* We do not accept a shared mapping if it would violate
10769 * cache aliasing constraints.
10770 */
10771- if ((flags & MAP_SHARED) &&
10772+ if ((filp || (flags & MAP_SHARED)) &&
10773 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10774 return -EINVAL;
10775 return addr;
10776@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 if (filp || (flags & MAP_SHARED))
10778 do_color_align = 1;
10779
10780+#ifdef CONFIG_PAX_RANDMMAP
10781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10782+#endif
10783+
10784 /* requesting a specific address */
10785 if (addr) {
10786 if (do_color_align)
10787@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10788 addr = PAGE_ALIGN(addr);
10789
10790 vma = find_vma(mm, addr);
10791- if (task_size - len >= addr &&
10792- (!vma || addr + len <= vma->vm_start))
10793+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10794 return addr;
10795 }
10796
10797@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10798 info.high_limit = mm->mmap_base;
10799 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10800 info.align_offset = pgoff << PAGE_SHIFT;
10801+ info.threadstack_offset = offset;
10802 addr = vm_unmapped_area(&info);
10803
10804 /*
10805@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10806 VM_BUG_ON(addr != -ENOMEM);
10807 info.flags = 0;
10808 info.low_limit = TASK_UNMAPPED_BASE;
10809+
10810+#ifdef CONFIG_PAX_RANDMMAP
10811+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10812+ info.low_limit += mm->delta_mmap;
10813+#endif
10814+
10815 info.high_limit = STACK_TOP32;
10816 addr = vm_unmapped_area(&info);
10817 }
10818@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10819 EXPORT_SYMBOL(get_fb_unmapped_area);
10820
10821 /* Essentially the same as PowerPC. */
10822-static unsigned long mmap_rnd(void)
10823+static unsigned long mmap_rnd(struct mm_struct *mm)
10824 {
10825 unsigned long rnd = 0UL;
10826
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10829+#endif
10830+
10831 if (current->flags & PF_RANDOMIZE) {
10832 unsigned long val = get_random_int();
10833 if (test_thread_flag(TIF_32BIT))
10834@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10835
10836 void arch_pick_mmap_layout(struct mm_struct *mm)
10837 {
10838- unsigned long random_factor = mmap_rnd();
10839+ unsigned long random_factor = mmap_rnd(mm);
10840 unsigned long gap;
10841
10842 /*
10843@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10844 gap == RLIM_INFINITY ||
10845 sysctl_legacy_va_layout) {
10846 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10847+
10848+#ifdef CONFIG_PAX_RANDMMAP
10849+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10850+ mm->mmap_base += mm->delta_mmap;
10851+#endif
10852+
10853 mm->get_unmapped_area = arch_get_unmapped_area;
10854 } else {
10855 /* We know it's 32-bit */
10856@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap = (task_size / 6 * 5);
10858
10859 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10867 }
10868 }
10869diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10870index bb00089..e0ea580 100644
10871--- a/arch/sparc/kernel/syscalls.S
10872+++ b/arch/sparc/kernel/syscalls.S
10873@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10874 #endif
10875 .align 32
10876 1: ldx [%g6 + TI_FLAGS], %l5
10877- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10878+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10879 be,pt %icc, rtrap
10880 nop
10881 call syscall_trace_leave
10882@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10883
10884 srl %i3, 0, %o3 ! IEU0
10885 srl %i2, 0, %o2 ! IEU0 Group
10886- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10887+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10888 bne,pn %icc, linux_syscall_trace32 ! CTI
10889 mov %i0, %l5 ! IEU1
10890 5: call %l7 ! CTI Group brk forced
10891@@ -218,7 +218,7 @@ linux_sparc_syscall:
10892
10893 mov %i3, %o3 ! IEU1
10894 mov %i4, %o4 ! IEU0 Group
10895- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10896+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10897 bne,pn %icc, linux_syscall_trace ! CTI Group
10898 mov %i0, %l5 ! IEU0
10899 2: call %l7 ! CTI Group brk forced
10900@@ -233,7 +233,7 @@ ret_sys_call:
10901
10902 cmp %o0, -ERESTART_RESTARTBLOCK
10903 bgeu,pn %xcc, 1f
10904- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10905+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10906 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10907
10908 2:
10909diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10910index 6fd386c5..6907d81 100644
10911--- a/arch/sparc/kernel/traps_32.c
10912+++ b/arch/sparc/kernel/traps_32.c
10913@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10914 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10915 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10916
10917+extern void gr_handle_kernel_exploit(void);
10918+
10919 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10920 {
10921 static int die_counter;
10922@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10923 count++ < 30 &&
10924 (((unsigned long) rw) >= PAGE_OFFSET) &&
10925 !(((unsigned long) rw) & 0x7)) {
10926- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10927+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10928 (void *) rw->ins[7]);
10929 rw = (struct reg_window32 *)rw->ins[6];
10930 }
10931 }
10932 printk("Instruction DUMP:");
10933 instruction_dump ((unsigned long *) regs->pc);
10934- if(regs->psr & PSR_PS)
10935+ if(regs->psr & PSR_PS) {
10936+ gr_handle_kernel_exploit();
10937 do_exit(SIGKILL);
10938+ }
10939 do_exit(SIGSEGV);
10940 }
10941
10942diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10943index 0e69974..0c15a6e 100644
10944--- a/arch/sparc/kernel/traps_64.c
10945+++ b/arch/sparc/kernel/traps_64.c
10946@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10947 i + 1,
10948 p->trapstack[i].tstate, p->trapstack[i].tpc,
10949 p->trapstack[i].tnpc, p->trapstack[i].tt);
10950- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10951+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10952 }
10953 }
10954
10955@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10956
10957 lvl -= 0x100;
10958 if (regs->tstate & TSTATE_PRIV) {
10959+
10960+#ifdef CONFIG_PAX_REFCOUNT
10961+ if (lvl == 6)
10962+ pax_report_refcount_overflow(regs);
10963+#endif
10964+
10965 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10966 die_if_kernel(buffer, regs);
10967 }
10968@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10970 {
10971 char buffer[32];
10972-
10973+
10974 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10975 0, lvl, SIGTRAP) == NOTIFY_STOP)
10976 return;
10977
10978+#ifdef CONFIG_PAX_REFCOUNT
10979+ if (lvl == 6)
10980+ pax_report_refcount_overflow(regs);
10981+#endif
10982+
10983 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10984
10985 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10986@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10987 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10988 printk("%s" "ERROR(%d): ",
10989 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10990- printk("TPC<%pS>\n", (void *) regs->tpc);
10991+ printk("TPC<%pA>\n", (void *) regs->tpc);
10992 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10993 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10994 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10995@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10996 smp_processor_id(),
10997 (type & 0x1) ? 'I' : 'D',
10998 regs->tpc);
10999- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11000+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11001 panic("Irrecoverable Cheetah+ parity error.");
11002 }
11003
11004@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11005 smp_processor_id(),
11006 (type & 0x1) ? 'I' : 'D',
11007 regs->tpc);
11008- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11009+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11010 }
11011
11012 struct sun4v_error_entry {
11013@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11014 /*0x38*/u64 reserved_5;
11015 };
11016
11017-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11018-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11019+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021
11022 static const char *sun4v_err_type_to_str(u8 type)
11023 {
11024@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11025 }
11026
11027 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11028- int cpu, const char *pfx, atomic_t *ocnt)
11029+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11030 {
11031 u64 *raw_ptr = (u64 *) ent;
11032 u32 attrs;
11033@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11034
11035 show_regs(regs);
11036
11037- if ((cnt = atomic_read(ocnt)) != 0) {
11038- atomic_set(ocnt, 0);
11039+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11040+ atomic_set_unchecked(ocnt, 0);
11041 wmb();
11042 printk("%s: Queue overflowed %d times.\n",
11043 pfx, cnt);
11044@@ -2048,7 +2059,7 @@ out:
11045 */
11046 void sun4v_resum_overflow(struct pt_regs *regs)
11047 {
11048- atomic_inc(&sun4v_resum_oflow_cnt);
11049+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11050 }
11051
11052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11053@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11054 /* XXX Actually even this can make not that much sense. Perhaps
11055 * XXX we should just pull the plug and panic directly from here?
11056 */
11057- atomic_inc(&sun4v_nonresum_oflow_cnt);
11058+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11059 }
11060
11061 static void sun4v_tlb_error(struct pt_regs *regs)
11062@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11063
11064 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11065 regs->tpc, tl);
11066- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11067+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11068 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11069- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11070+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11071 (void *) regs->u_regs[UREG_I7]);
11072 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11073 "pte[%lx] error[%lx]\n",
11074@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11075
11076 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11077 regs->tpc, tl);
11078- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11079+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11080 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11081- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11082+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11083 (void *) regs->u_regs[UREG_I7]);
11084 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11085 "pte[%lx] error[%lx]\n",
11086@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11087 fp = (unsigned long)sf->fp + STACK_BIAS;
11088 }
11089
11090- printk(" [%016lx] %pS\n", pc, (void *) pc);
11091+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11093 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11094 int index = tsk->curr_ret_stack;
11095 if (tsk->ret_stack && index >= graph) {
11096 pc = tsk->ret_stack[index - graph].ret;
11097- printk(" [%016lx] %pS\n", pc, (void *) pc);
11098+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11099 graph++;
11100 }
11101 }
11102@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11103 return (struct reg_window *) (fp + STACK_BIAS);
11104 }
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 while (rw &&
11113 count++ < 30 &&
11114 kstack_valid(tp, (unsigned long) rw)) {
11115- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118
11119 rw = kernel_stack_up(rw);
11120@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11121 }
11122 if (panic_on_oops)
11123 panic("Fatal exception");
11124- if (regs->tstate & TSTATE_PRIV)
11125+ if (regs->tstate & TSTATE_PRIV) {
11126+ gr_handle_kernel_exploit();
11127 do_exit(SIGKILL);
11128+ }
11129 do_exit(SIGSEGV);
11130 }
11131 EXPORT_SYMBOL(die_if_kernel);
11132diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11133index 62098a8..547ab2c 100644
11134--- a/arch/sparc/kernel/unaligned_64.c
11135+++ b/arch/sparc/kernel/unaligned_64.c
11136@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11137 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11138
11139 if (__ratelimit(&ratelimit)) {
11140- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11141+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11142 regs->tpc, (void *) regs->tpc);
11143 }
11144 }
11145diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11146index 3269b02..64f5231 100644
11147--- a/arch/sparc/lib/Makefile
11148+++ b/arch/sparc/lib/Makefile
11149@@ -2,7 +2,7 @@
11150 #
11151
11152 asflags-y := -ansi -DST_DIV0=0x02
11153-ccflags-y := -Werror
11154+#ccflags-y := -Werror
11155
11156 lib-$(CONFIG_SPARC32) += ashrdi3.o
11157 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11158diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11159index 05dac43..76f8ed4 100644
11160--- a/arch/sparc/lib/atomic_64.S
11161+++ b/arch/sparc/lib/atomic_64.S
11162@@ -15,11 +15,22 @@
11163 * a value and does the barriers.
11164 */
11165
11166-#define ATOMIC_OP(op) \
11167-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#ifdef CONFIG_PAX_REFCOUNT
11169+#define __REFCOUNT_OP(op) op##cc
11170+#define __OVERFLOW_IOP tvs %icc, 6;
11171+#define __OVERFLOW_XOP tvs %xcc, 6;
11172+#else
11173+#define __REFCOUNT_OP(op) op
11174+#define __OVERFLOW_IOP
11175+#define __OVERFLOW_XOP
11176+#endif
11177+
11178+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11179+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11180 BACKOFF_SETUP(%o2); \
11181 1: lduw [%o1], %g1; \
11182- op %g1, %o0, %g7; \
11183+ asm_op %g1, %o0, %g7; \
11184+ post_op \
11185 cas [%o1], %g1, %g7; \
11186 cmp %g1, %g7; \
11187 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11188@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11189 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11190 ENDPROC(atomic_##op); \
11191
11192-#define ATOMIC_OP_RETURN(op) \
11193-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11194+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11195+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11196+
11197+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11198+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11199 BACKOFF_SETUP(%o2); \
11200 1: lduw [%o1], %g1; \
11201- op %g1, %o0, %g7; \
11202+ asm_op %g1, %o0, %g7; \
11203+ post_op \
11204 cas [%o1], %g1, %g7; \
11205 cmp %g1, %g7; \
11206 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11207@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11209 ENDPROC(atomic_##op##_return);
11210
11211+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11212+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11215
11216 ATOMIC_OPS(add)
11217@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11218
11219 #undef ATOMIC_OPS
11220 #undef ATOMIC_OP_RETURN
11221+#undef __ATOMIC_OP_RETURN
11222 #undef ATOMIC_OP
11223+#undef __ATOMIC_OP
11224
11225-#define ATOMIC64_OP(op) \
11226-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11227+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11228+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11229 BACKOFF_SETUP(%o2); \
11230 1: ldx [%o1], %g1; \
11231- op %g1, %o0, %g7; \
11232+ asm_op %g1, %o0, %g7; \
11233+ post_op \
11234 casx [%o1], %g1, %g7; \
11235 cmp %g1, %g7; \
11236 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11237@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11238 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11239 ENDPROC(atomic64_##op); \
11240
11241-#define ATOMIC64_OP_RETURN(op) \
11242-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11243+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11244+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11245+
11246+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11247+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11248 BACKOFF_SETUP(%o2); \
11249 1: ldx [%o1], %g1; \
11250- op %g1, %o0, %g7; \
11251+ asm_op %g1, %o0, %g7; \
11252+ post_op \
11253 casx [%o1], %g1, %g7; \
11254 cmp %g1, %g7; \
11255 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11256@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11258 ENDPROC(atomic64_##op##_return);
11259
11260+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11261+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11264
11265 ATOMIC64_OPS(add)
11266@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11267
11268 #undef ATOMIC64_OPS
11269 #undef ATOMIC64_OP_RETURN
11270+#undef __ATOMIC64_OP_RETURN
11271 #undef ATOMIC64_OP
11272+#undef __ATOMIC64_OP
11273+#undef __OVERFLOW_XOP
11274+#undef __OVERFLOW_IOP
11275+#undef __REFCOUNT_OP
11276
11277 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11278 BACKOFF_SETUP(%o2)
11279diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11280index 1d649a9..fbc5bfc 100644
11281--- a/arch/sparc/lib/ksyms.c
11282+++ b/arch/sparc/lib/ksyms.c
11283@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11284 /* Atomic counter implementation. */
11285 #define ATOMIC_OP(op) \
11286 EXPORT_SYMBOL(atomic_##op); \
11287-EXPORT_SYMBOL(atomic64_##op);
11288+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11289+EXPORT_SYMBOL(atomic64_##op); \
11290+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11291
11292 #define ATOMIC_OP_RETURN(op) \
11293 EXPORT_SYMBOL(atomic_##op##_return); \
11294@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11295 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11296
11297 ATOMIC_OPS(add)
11298+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11299+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11300 ATOMIC_OPS(sub)
11301
11302 #undef ATOMIC_OPS
11303diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11304index 30c3ecc..736f015 100644
11305--- a/arch/sparc/mm/Makefile
11306+++ b/arch/sparc/mm/Makefile
11307@@ -2,7 +2,7 @@
11308 #
11309
11310 asflags-y := -ansi
11311-ccflags-y := -Werror
11312+#ccflags-y := -Werror
11313
11314 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11315 obj-y += fault_$(BITS).o
11316diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11317index 70d8171..274c6c0 100644
11318--- a/arch/sparc/mm/fault_32.c
11319+++ b/arch/sparc/mm/fault_32.c
11320@@ -21,6 +21,9 @@
11321 #include <linux/perf_event.h>
11322 #include <linux/interrupt.h>
11323 #include <linux/kdebug.h>
11324+#include <linux/slab.h>
11325+#include <linux/pagemap.h>
11326+#include <linux/compiler.h>
11327
11328 #include <asm/page.h>
11329 #include <asm/pgtable.h>
11330@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11331 return safe_compute_effective_address(regs, insn);
11332 }
11333
11334+#ifdef CONFIG_PAX_PAGEEXEC
11335+#ifdef CONFIG_PAX_DLRESOLVE
11336+static void pax_emuplt_close(struct vm_area_struct *vma)
11337+{
11338+ vma->vm_mm->call_dl_resolve = 0UL;
11339+}
11340+
11341+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11342+{
11343+ unsigned int *kaddr;
11344+
11345+ vmf->page = alloc_page(GFP_HIGHUSER);
11346+ if (!vmf->page)
11347+ return VM_FAULT_OOM;
11348+
11349+ kaddr = kmap(vmf->page);
11350+ memset(kaddr, 0, PAGE_SIZE);
11351+ kaddr[0] = 0x9DE3BFA8U; /* save */
11352+ flush_dcache_page(vmf->page);
11353+ kunmap(vmf->page);
11354+ return VM_FAULT_MAJOR;
11355+}
11356+
11357+static const struct vm_operations_struct pax_vm_ops = {
11358+ .close = pax_emuplt_close,
11359+ .fault = pax_emuplt_fault
11360+};
11361+
11362+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11363+{
11364+ int ret;
11365+
11366+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11367+ vma->vm_mm = current->mm;
11368+ vma->vm_start = addr;
11369+ vma->vm_end = addr + PAGE_SIZE;
11370+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11371+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11372+ vma->vm_ops = &pax_vm_ops;
11373+
11374+ ret = insert_vm_struct(current->mm, vma);
11375+ if (ret)
11376+ return ret;
11377+
11378+ ++current->mm->total_vm;
11379+ return 0;
11380+}
11381+#endif
11382+
11383+/*
11384+ * PaX: decide what to do with offenders (regs->pc = fault address)
11385+ *
11386+ * returns 1 when task should be killed
11387+ * 2 when patched PLT trampoline was detected
11388+ * 3 when unpatched PLT trampoline was detected
11389+ */
11390+static int pax_handle_fetch_fault(struct pt_regs *regs)
11391+{
11392+
11393+#ifdef CONFIG_PAX_EMUPLT
11394+ int err;
11395+
11396+ do { /* PaX: patched PLT emulation #1 */
11397+ unsigned int sethi1, sethi2, jmpl;
11398+
11399+ err = get_user(sethi1, (unsigned int *)regs->pc);
11400+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11401+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11402+
11403+ if (err)
11404+ break;
11405+
11406+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11407+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11408+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11409+ {
11410+ unsigned int addr;
11411+
11412+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11413+ addr = regs->u_regs[UREG_G1];
11414+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11415+ regs->pc = addr;
11416+ regs->npc = addr+4;
11417+ return 2;
11418+ }
11419+ } while (0);
11420+
11421+ do { /* PaX: patched PLT emulation #2 */
11422+ unsigned int ba;
11423+
11424+ err = get_user(ba, (unsigned int *)regs->pc);
11425+
11426+ if (err)
11427+ break;
11428+
11429+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11430+ unsigned int addr;
11431+
11432+ if ((ba & 0xFFC00000U) == 0x30800000U)
11433+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11434+ else
11435+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11436+ regs->pc = addr;
11437+ regs->npc = addr+4;
11438+ return 2;
11439+ }
11440+ } while (0);
11441+
11442+ do { /* PaX: patched PLT emulation #3 */
11443+ unsigned int sethi, bajmpl, nop;
11444+
11445+ err = get_user(sethi, (unsigned int *)regs->pc);
11446+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11447+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11448+
11449+ if (err)
11450+ break;
11451+
11452+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11453+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11454+ nop == 0x01000000U)
11455+ {
11456+ unsigned int addr;
11457+
11458+ addr = (sethi & 0x003FFFFFU) << 10;
11459+ regs->u_regs[UREG_G1] = addr;
11460+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11461+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11462+ else
11463+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11464+ regs->pc = addr;
11465+ regs->npc = addr+4;
11466+ return 2;
11467+ }
11468+ } while (0);
11469+
11470+ do { /* PaX: unpatched PLT emulation step 1 */
11471+ unsigned int sethi, ba, nop;
11472+
11473+ err = get_user(sethi, (unsigned int *)regs->pc);
11474+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11475+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11476+
11477+ if (err)
11478+ break;
11479+
11480+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11481+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11482+ nop == 0x01000000U)
11483+ {
11484+ unsigned int addr, save, call;
11485+
11486+ if ((ba & 0xFFC00000U) == 0x30800000U)
11487+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11488+ else
11489+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11490+
11491+ err = get_user(save, (unsigned int *)addr);
11492+ err |= get_user(call, (unsigned int *)(addr+4));
11493+ err |= get_user(nop, (unsigned int *)(addr+8));
11494+ if (err)
11495+ break;
11496+
11497+#ifdef CONFIG_PAX_DLRESOLVE
11498+ if (save == 0x9DE3BFA8U &&
11499+ (call & 0xC0000000U) == 0x40000000U &&
11500+ nop == 0x01000000U)
11501+ {
11502+ struct vm_area_struct *vma;
11503+ unsigned long call_dl_resolve;
11504+
11505+ down_read(&current->mm->mmap_sem);
11506+ call_dl_resolve = current->mm->call_dl_resolve;
11507+ up_read(&current->mm->mmap_sem);
11508+ if (likely(call_dl_resolve))
11509+ goto emulate;
11510+
11511+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11512+
11513+ down_write(&current->mm->mmap_sem);
11514+ if (current->mm->call_dl_resolve) {
11515+ call_dl_resolve = current->mm->call_dl_resolve;
11516+ up_write(&current->mm->mmap_sem);
11517+ if (vma)
11518+ kmem_cache_free(vm_area_cachep, vma);
11519+ goto emulate;
11520+ }
11521+
11522+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11523+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11524+ up_write(&current->mm->mmap_sem);
11525+ if (vma)
11526+ kmem_cache_free(vm_area_cachep, vma);
11527+ return 1;
11528+ }
11529+
11530+ if (pax_insert_vma(vma, call_dl_resolve)) {
11531+ up_write(&current->mm->mmap_sem);
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ return 1;
11534+ }
11535+
11536+ current->mm->call_dl_resolve = call_dl_resolve;
11537+ up_write(&current->mm->mmap_sem);
11538+
11539+emulate:
11540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11541+ regs->pc = call_dl_resolve;
11542+ regs->npc = addr+4;
11543+ return 3;
11544+ }
11545+#endif
11546+
11547+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11548+ if ((save & 0xFFC00000U) == 0x05000000U &&
11549+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11550+ nop == 0x01000000U)
11551+ {
11552+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11553+ regs->u_regs[UREG_G2] = addr + 4;
11554+ addr = (save & 0x003FFFFFU) << 10;
11555+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11556+ regs->pc = addr;
11557+ regs->npc = addr+4;
11558+ return 3;
11559+ }
11560+ }
11561+ } while (0);
11562+
11563+ do { /* PaX: unpatched PLT emulation step 2 */
11564+ unsigned int save, call, nop;
11565+
11566+ err = get_user(save, (unsigned int *)(regs->pc-4));
11567+ err |= get_user(call, (unsigned int *)regs->pc);
11568+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11569+ if (err)
11570+ break;
11571+
11572+ if (save == 0x9DE3BFA8U &&
11573+ (call & 0xC0000000U) == 0x40000000U &&
11574+ nop == 0x01000000U)
11575+ {
11576+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11577+
11578+ regs->u_regs[UREG_RETPC] = regs->pc;
11579+ regs->pc = dl_resolve;
11580+ regs->npc = dl_resolve+4;
11581+ return 3;
11582+ }
11583+ } while (0);
11584+#endif
11585+
11586+ return 1;
11587+}
11588+
11589+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11590+{
11591+ unsigned long i;
11592+
11593+ printk(KERN_ERR "PAX: bytes at PC: ");
11594+ for (i = 0; i < 8; i++) {
11595+ unsigned int c;
11596+ if (get_user(c, (unsigned int *)pc+i))
11597+ printk(KERN_CONT "???????? ");
11598+ else
11599+ printk(KERN_CONT "%08x ", c);
11600+ }
11601+ printk("\n");
11602+}
11603+#endif
11604+
11605 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11606 int text_fault)
11607 {
11608@@ -226,6 +500,24 @@ good_area:
11609 if (!(vma->vm_flags & VM_WRITE))
11610 goto bad_area;
11611 } else {
11612+
11613+#ifdef CONFIG_PAX_PAGEEXEC
11614+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11615+ up_read(&mm->mmap_sem);
11616+ switch (pax_handle_fetch_fault(regs)) {
11617+
11618+#ifdef CONFIG_PAX_EMUPLT
11619+ case 2:
11620+ case 3:
11621+ return;
11622+#endif
11623+
11624+ }
11625+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11626+ do_group_exit(SIGKILL);
11627+ }
11628+#endif
11629+
11630 /* Allow reads even for write-only mappings */
11631 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11632 goto bad_area;
11633diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11634index 4798232..f76e3aa 100644
11635--- a/arch/sparc/mm/fault_64.c
11636+++ b/arch/sparc/mm/fault_64.c
11637@@ -22,6 +22,9 @@
11638 #include <linux/kdebug.h>
11639 #include <linux/percpu.h>
11640 #include <linux/context_tracking.h>
11641+#include <linux/slab.h>
11642+#include <linux/pagemap.h>
11643+#include <linux/compiler.h>
11644
11645 #include <asm/page.h>
11646 #include <asm/pgtable.h>
11647@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11648 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11649 regs->tpc);
11650 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11651- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11652+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11653 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11654 dump_stack();
11655 unhandled_fault(regs->tpc, current, regs);
11656@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11657 show_regs(regs);
11658 }
11659
11660+#ifdef CONFIG_PAX_PAGEEXEC
11661+#ifdef CONFIG_PAX_DLRESOLVE
11662+static void pax_emuplt_close(struct vm_area_struct *vma)
11663+{
11664+ vma->vm_mm->call_dl_resolve = 0UL;
11665+}
11666+
11667+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11668+{
11669+ unsigned int *kaddr;
11670+
11671+ vmf->page = alloc_page(GFP_HIGHUSER);
11672+ if (!vmf->page)
11673+ return VM_FAULT_OOM;
11674+
11675+ kaddr = kmap(vmf->page);
11676+ memset(kaddr, 0, PAGE_SIZE);
11677+ kaddr[0] = 0x9DE3BFA8U; /* save */
11678+ flush_dcache_page(vmf->page);
11679+ kunmap(vmf->page);
11680+ return VM_FAULT_MAJOR;
11681+}
11682+
11683+static const struct vm_operations_struct pax_vm_ops = {
11684+ .close = pax_emuplt_close,
11685+ .fault = pax_emuplt_fault
11686+};
11687+
11688+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11689+{
11690+ int ret;
11691+
11692+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11693+ vma->vm_mm = current->mm;
11694+ vma->vm_start = addr;
11695+ vma->vm_end = addr + PAGE_SIZE;
11696+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11697+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11698+ vma->vm_ops = &pax_vm_ops;
11699+
11700+ ret = insert_vm_struct(current->mm, vma);
11701+ if (ret)
11702+ return ret;
11703+
11704+ ++current->mm->total_vm;
11705+ return 0;
11706+}
11707+#endif
11708+
11709+/*
11710+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11711+ *
11712+ * returns 1 when task should be killed
11713+ * 2 when patched PLT trampoline was detected
11714+ * 3 when unpatched PLT trampoline was detected
11715+ */
11716+static int pax_handle_fetch_fault(struct pt_regs *regs)
11717+{
11718+
11719+#ifdef CONFIG_PAX_EMUPLT
11720+ int err;
11721+
11722+ do { /* PaX: patched PLT emulation #1 */
11723+ unsigned int sethi1, sethi2, jmpl;
11724+
11725+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11726+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11727+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11728+
11729+ if (err)
11730+ break;
11731+
11732+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11733+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11734+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11735+ {
11736+ unsigned long addr;
11737+
11738+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11739+ addr = regs->u_regs[UREG_G1];
11740+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11741+
11742+ if (test_thread_flag(TIF_32BIT))
11743+ addr &= 0xFFFFFFFFUL;
11744+
11745+ regs->tpc = addr;
11746+ regs->tnpc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: patched PLT emulation #2 */
11752+ unsigned int ba;
11753+
11754+ err = get_user(ba, (unsigned int *)regs->tpc);
11755+
11756+ if (err)
11757+ break;
11758+
11759+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11760+ unsigned long addr;
11761+
11762+ if ((ba & 0xFFC00000U) == 0x30800000U)
11763+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11764+ else
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11766+
11767+ if (test_thread_flag(TIF_32BIT))
11768+ addr &= 0xFFFFFFFFUL;
11769+
11770+ regs->tpc = addr;
11771+ regs->tnpc = addr+4;
11772+ return 2;
11773+ }
11774+ } while (0);
11775+
11776+ do { /* PaX: patched PLT emulation #3 */
11777+ unsigned int sethi, bajmpl, nop;
11778+
11779+ err = get_user(sethi, (unsigned int *)regs->tpc);
11780+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11781+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11782+
11783+ if (err)
11784+ break;
11785+
11786+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11787+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11788+ nop == 0x01000000U)
11789+ {
11790+ unsigned long addr;
11791+
11792+ addr = (sethi & 0x003FFFFFU) << 10;
11793+ regs->u_regs[UREG_G1] = addr;
11794+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11795+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11796+ else
11797+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11798+
11799+ if (test_thread_flag(TIF_32BIT))
11800+ addr &= 0xFFFFFFFFUL;
11801+
11802+ regs->tpc = addr;
11803+ regs->tnpc = addr+4;
11804+ return 2;
11805+ }
11806+ } while (0);
11807+
11808+ do { /* PaX: patched PLT emulation #4 */
11809+ unsigned int sethi, mov1, call, mov2;
11810+
11811+ err = get_user(sethi, (unsigned int *)regs->tpc);
11812+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11814+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11815+
11816+ if (err)
11817+ break;
11818+
11819+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11820+ mov1 == 0x8210000FU &&
11821+ (call & 0xC0000000U) == 0x40000000U &&
11822+ mov2 == 0x9E100001U)
11823+ {
11824+ unsigned long addr;
11825+
11826+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11827+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11828+
11829+ if (test_thread_flag(TIF_32BIT))
11830+ addr &= 0xFFFFFFFFUL;
11831+
11832+ regs->tpc = addr;
11833+ regs->tnpc = addr+4;
11834+ return 2;
11835+ }
11836+ } while (0);
11837+
11838+ do { /* PaX: patched PLT emulation #5 */
11839+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11840+
11841+ err = get_user(sethi, (unsigned int *)regs->tpc);
11842+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11843+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11844+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11845+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11846+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11848+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11849+
11850+ if (err)
11851+ break;
11852+
11853+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11854+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11855+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11856+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11857+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11858+ sllx == 0x83287020U &&
11859+ jmpl == 0x81C04005U &&
11860+ nop == 0x01000000U)
11861+ {
11862+ unsigned long addr;
11863+
11864+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11865+ regs->u_regs[UREG_G1] <<= 32;
11866+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11867+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11868+ regs->tpc = addr;
11869+ regs->tnpc = addr+4;
11870+ return 2;
11871+ }
11872+ } while (0);
11873+
11874+ do { /* PaX: patched PLT emulation #6 */
11875+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11876+
11877+ err = get_user(sethi, (unsigned int *)regs->tpc);
11878+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11881+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11882+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11883+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11884+
11885+ if (err)
11886+ break;
11887+
11888+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11889+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11890+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11891+ sllx == 0x83287020U &&
11892+ (or & 0xFFFFE000U) == 0x8A116000U &&
11893+ jmpl == 0x81C04005U &&
11894+ nop == 0x01000000U)
11895+ {
11896+ unsigned long addr;
11897+
11898+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11899+ regs->u_regs[UREG_G1] <<= 32;
11900+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11901+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11902+ regs->tpc = addr;
11903+ regs->tnpc = addr+4;
11904+ return 2;
11905+ }
11906+ } while (0);
11907+
11908+ do { /* PaX: unpatched PLT emulation step 1 */
11909+ unsigned int sethi, ba, nop;
11910+
11911+ err = get_user(sethi, (unsigned int *)regs->tpc);
11912+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11913+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11914+
11915+ if (err)
11916+ break;
11917+
11918+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11919+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11920+ nop == 0x01000000U)
11921+ {
11922+ unsigned long addr;
11923+ unsigned int save, call;
11924+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11925+
11926+ if ((ba & 0xFFC00000U) == 0x30800000U)
11927+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11928+ else
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11930+
11931+ if (test_thread_flag(TIF_32BIT))
11932+ addr &= 0xFFFFFFFFUL;
11933+
11934+ err = get_user(save, (unsigned int *)addr);
11935+ err |= get_user(call, (unsigned int *)(addr+4));
11936+ err |= get_user(nop, (unsigned int *)(addr+8));
11937+ if (err)
11938+ break;
11939+
11940+#ifdef CONFIG_PAX_DLRESOLVE
11941+ if (save == 0x9DE3BFA8U &&
11942+ (call & 0xC0000000U) == 0x40000000U &&
11943+ nop == 0x01000000U)
11944+ {
11945+ struct vm_area_struct *vma;
11946+ unsigned long call_dl_resolve;
11947+
11948+ down_read(&current->mm->mmap_sem);
11949+ call_dl_resolve = current->mm->call_dl_resolve;
11950+ up_read(&current->mm->mmap_sem);
11951+ if (likely(call_dl_resolve))
11952+ goto emulate;
11953+
11954+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11955+
11956+ down_write(&current->mm->mmap_sem);
11957+ if (current->mm->call_dl_resolve) {
11958+ call_dl_resolve = current->mm->call_dl_resolve;
11959+ up_write(&current->mm->mmap_sem);
11960+ if (vma)
11961+ kmem_cache_free(vm_area_cachep, vma);
11962+ goto emulate;
11963+ }
11964+
11965+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11966+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11967+ up_write(&current->mm->mmap_sem);
11968+ if (vma)
11969+ kmem_cache_free(vm_area_cachep, vma);
11970+ return 1;
11971+ }
11972+
11973+ if (pax_insert_vma(vma, call_dl_resolve)) {
11974+ up_write(&current->mm->mmap_sem);
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ return 1;
11977+ }
11978+
11979+ current->mm->call_dl_resolve = call_dl_resolve;
11980+ up_write(&current->mm->mmap_sem);
11981+
11982+emulate:
11983+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11984+ regs->tpc = call_dl_resolve;
11985+ regs->tnpc = addr+4;
11986+ return 3;
11987+ }
11988+#endif
11989+
11990+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11991+ if ((save & 0xFFC00000U) == 0x05000000U &&
11992+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11993+ nop == 0x01000000U)
11994+ {
11995+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11996+ regs->u_regs[UREG_G2] = addr + 4;
11997+ addr = (save & 0x003FFFFFU) << 10;
11998+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ addr &= 0xFFFFFFFFUL;
12002+
12003+ regs->tpc = addr;
12004+ regs->tnpc = addr+4;
12005+ return 3;
12006+ }
12007+
12008+ /* PaX: 64-bit PLT stub */
12009+ err = get_user(sethi1, (unsigned int *)addr);
12010+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12011+ err |= get_user(or1, (unsigned int *)(addr+8));
12012+ err |= get_user(or2, (unsigned int *)(addr+12));
12013+ err |= get_user(sllx, (unsigned int *)(addr+16));
12014+ err |= get_user(add, (unsigned int *)(addr+20));
12015+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12016+ err |= get_user(nop, (unsigned int *)(addr+28));
12017+ if (err)
12018+ break;
12019+
12020+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12021+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12022+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12023+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12024+ sllx == 0x89293020U &&
12025+ add == 0x8A010005U &&
12026+ jmpl == 0x89C14000U &&
12027+ nop == 0x01000000U)
12028+ {
12029+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12030+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12031+ regs->u_regs[UREG_G4] <<= 32;
12032+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12033+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12034+ regs->u_regs[UREG_G4] = addr + 24;
12035+ addr = regs->u_regs[UREG_G5];
12036+ regs->tpc = addr;
12037+ regs->tnpc = addr+4;
12038+ return 3;
12039+ }
12040+ }
12041+ } while (0);
12042+
12043+#ifdef CONFIG_PAX_DLRESOLVE
12044+ do { /* PaX: unpatched PLT emulation step 2 */
12045+ unsigned int save, call, nop;
12046+
12047+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12048+ err |= get_user(call, (unsigned int *)regs->tpc);
12049+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12050+ if (err)
12051+ break;
12052+
12053+ if (save == 0x9DE3BFA8U &&
12054+ (call & 0xC0000000U) == 0x40000000U &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12058+
12059+ if (test_thread_flag(TIF_32BIT))
12060+ dl_resolve &= 0xFFFFFFFFUL;
12061+
12062+ regs->u_regs[UREG_RETPC] = regs->tpc;
12063+ regs->tpc = dl_resolve;
12064+ regs->tnpc = dl_resolve+4;
12065+ return 3;
12066+ }
12067+ } while (0);
12068+#endif
12069+
12070+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12071+ unsigned int sethi, ba, nop;
12072+
12073+ err = get_user(sethi, (unsigned int *)regs->tpc);
12074+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12075+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12076+
12077+ if (err)
12078+ break;
12079+
12080+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12081+ (ba & 0xFFF00000U) == 0x30600000U &&
12082+ nop == 0x01000000U)
12083+ {
12084+ unsigned long addr;
12085+
12086+ addr = (sethi & 0x003FFFFFU) << 10;
12087+ regs->u_regs[UREG_G1] = addr;
12088+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12089+
12090+ if (test_thread_flag(TIF_32BIT))
12091+ addr &= 0xFFFFFFFFUL;
12092+
12093+ regs->tpc = addr;
12094+ regs->tnpc = addr+4;
12095+ return 2;
12096+ }
12097+ } while (0);
12098+
12099+#endif
12100+
12101+ return 1;
12102+}
12103+
12104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12105+{
12106+ unsigned long i;
12107+
12108+ printk(KERN_ERR "PAX: bytes at PC: ");
12109+ for (i = 0; i < 8; i++) {
12110+ unsigned int c;
12111+ if (get_user(c, (unsigned int *)pc+i))
12112+ printk(KERN_CONT "???????? ");
12113+ else
12114+ printk(KERN_CONT "%08x ", c);
12115+ }
12116+ printk("\n");
12117+}
12118+#endif
12119+
12120 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12121 {
12122 enum ctx_state prev_state = exception_enter();
12123@@ -353,6 +816,29 @@ retry:
12124 if (!vma)
12125 goto bad_area;
12126
12127+#ifdef CONFIG_PAX_PAGEEXEC
12128+ /* PaX: detect ITLB misses on non-exec pages */
12129+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12130+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12131+ {
12132+ if (address != regs->tpc)
12133+ goto good_area;
12134+
12135+ up_read(&mm->mmap_sem);
12136+ switch (pax_handle_fetch_fault(regs)) {
12137+
12138+#ifdef CONFIG_PAX_EMUPLT
12139+ case 2:
12140+ case 3:
12141+ return;
12142+#endif
12143+
12144+ }
12145+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12146+ do_group_exit(SIGKILL);
12147+ }
12148+#endif
12149+
12150 /* Pure DTLB misses do not tell us whether the fault causing
12151 * load/store/atomic was a write or not, it only says that there
12152 * was no match. So in such a case we (carefully) read the
12153diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12154index 4242eab..9ae6360 100644
12155--- a/arch/sparc/mm/hugetlbpage.c
12156+++ b/arch/sparc/mm/hugetlbpage.c
12157@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12158 unsigned long addr,
12159 unsigned long len,
12160 unsigned long pgoff,
12161- unsigned long flags)
12162+ unsigned long flags,
12163+ unsigned long offset)
12164 {
12165+ struct mm_struct *mm = current->mm;
12166 unsigned long task_size = TASK_SIZE;
12167 struct vm_unmapped_area_info info;
12168
12169@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12170
12171 info.flags = 0;
12172 info.length = len;
12173- info.low_limit = TASK_UNMAPPED_BASE;
12174+ info.low_limit = mm->mmap_base;
12175 info.high_limit = min(task_size, VA_EXCLUDE_START);
12176 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12177 info.align_offset = 0;
12178+ info.threadstack_offset = offset;
12179 addr = vm_unmapped_area(&info);
12180
12181 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12182 VM_BUG_ON(addr != -ENOMEM);
12183 info.low_limit = VA_EXCLUDE_END;
12184+
12185+#ifdef CONFIG_PAX_RANDMMAP
12186+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12187+ info.low_limit += mm->delta_mmap;
12188+#endif
12189+
12190 info.high_limit = task_size;
12191 addr = vm_unmapped_area(&info);
12192 }
12193@@ -55,7 +64,8 @@ static unsigned long
12194 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12195 const unsigned long len,
12196 const unsigned long pgoff,
12197- const unsigned long flags)
12198+ const unsigned long flags,
12199+ const unsigned long offset)
12200 {
12201 struct mm_struct *mm = current->mm;
12202 unsigned long addr = addr0;
12203@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12204 info.high_limit = mm->mmap_base;
12205 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12206 info.align_offset = 0;
12207+ info.threadstack_offset = offset;
12208 addr = vm_unmapped_area(&info);
12209
12210 /*
12211@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 VM_BUG_ON(addr != -ENOMEM);
12213 info.flags = 0;
12214 info.low_limit = TASK_UNMAPPED_BASE;
12215+
12216+#ifdef CONFIG_PAX_RANDMMAP
12217+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12218+ info.low_limit += mm->delta_mmap;
12219+#endif
12220+
12221 info.high_limit = STACK_TOP32;
12222 addr = vm_unmapped_area(&info);
12223 }
12224@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12225 struct mm_struct *mm = current->mm;
12226 struct vm_area_struct *vma;
12227 unsigned long task_size = TASK_SIZE;
12228+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12229
12230 if (test_thread_flag(TIF_32BIT))
12231 task_size = STACK_TOP32;
12232@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12233 return addr;
12234 }
12235
12236+#ifdef CONFIG_PAX_RANDMMAP
12237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12238+#endif
12239+
12240 if (addr) {
12241 addr = ALIGN(addr, HPAGE_SIZE);
12242 vma = find_vma(mm, addr);
12243- if (task_size - len >= addr &&
12244- (!vma || addr + len <= vma->vm_start))
12245+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12246 return addr;
12247 }
12248 if (mm->get_unmapped_area == arch_get_unmapped_area)
12249 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12250- pgoff, flags);
12251+ pgoff, flags, offset);
12252 else
12253 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12254- pgoff, flags);
12255+ pgoff, flags, offset);
12256 }
12257
12258 pte_t *huge_pte_alloc(struct mm_struct *mm,
12259diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12260index 4ca0d6b..e89bca1 100644
12261--- a/arch/sparc/mm/init_64.c
12262+++ b/arch/sparc/mm/init_64.c
12263@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12264 int num_kernel_image_mappings;
12265
12266 #ifdef CONFIG_DEBUG_DCFLUSH
12267-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12268+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12269 #ifdef CONFIG_SMP
12270-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12271+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12272 #endif
12273 #endif
12274
12275@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12276 {
12277 BUG_ON(tlb_type == hypervisor);
12278 #ifdef CONFIG_DEBUG_DCFLUSH
12279- atomic_inc(&dcpage_flushes);
12280+ atomic_inc_unchecked(&dcpage_flushes);
12281 #endif
12282
12283 #ifdef DCACHE_ALIASING_POSSIBLE
12284@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12285
12286 #ifdef CONFIG_DEBUG_DCFLUSH
12287 seq_printf(m, "DCPageFlushes\t: %d\n",
12288- atomic_read(&dcpage_flushes));
12289+ atomic_read_unchecked(&dcpage_flushes));
12290 #ifdef CONFIG_SMP
12291 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12292- atomic_read(&dcpage_flushes_xcall));
12293+ atomic_read_unchecked(&dcpage_flushes_xcall));
12294 #endif /* CONFIG_SMP */
12295 #endif /* CONFIG_DEBUG_DCFLUSH */
12296 }
12297diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12298index 7cca418..53fc030 100644
12299--- a/arch/tile/Kconfig
12300+++ b/arch/tile/Kconfig
12301@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12302
12303 config KEXEC
12304 bool "kexec system call"
12305+ depends on !GRKERNSEC_KMEM
12306 ---help---
12307 kexec is a system call that implements the ability to shutdown your
12308 current kernel, and to start another kernel. It is like a reboot
12309diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12310index 7b11c5f..755a026 100644
12311--- a/arch/tile/include/asm/atomic_64.h
12312+++ b/arch/tile/include/asm/atomic_64.h
12313@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12314
12315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12316
12317+#define atomic64_read_unchecked(v) atomic64_read(v)
12318+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12319+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12320+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12321+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12322+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12323+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12324+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12325+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12326+
12327 /* Define this to indicate that cmpxchg is an efficient operation. */
12328 #define __HAVE_ARCH_CMPXCHG
12329
12330diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12331index 6160761..00cac88 100644
12332--- a/arch/tile/include/asm/cache.h
12333+++ b/arch/tile/include/asm/cache.h
12334@@ -15,11 +15,12 @@
12335 #ifndef _ASM_TILE_CACHE_H
12336 #define _ASM_TILE_CACHE_H
12337
12338+#include <linux/const.h>
12339 #include <arch/chip.h>
12340
12341 /* bytes per L1 data cache line */
12342 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12345
12346 /* bytes per L2 cache line */
12347 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12348diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12349index f41cb53..31d3ab4 100644
12350--- a/arch/tile/include/asm/uaccess.h
12351+++ b/arch/tile/include/asm/uaccess.h
12352@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12353 const void __user *from,
12354 unsigned long n)
12355 {
12356- int sz = __compiletime_object_size(to);
12357+ size_t sz = __compiletime_object_size(to);
12358
12359- if (likely(sz == -1 || sz >= n))
12360+ if (likely(sz == (size_t)-1 || sz >= n))
12361 n = _copy_from_user(to, from, n);
12362 else
12363 copy_from_user_overflow();
12364diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12365index 8416240..a012fb7 100644
12366--- a/arch/tile/mm/hugetlbpage.c
12367+++ b/arch/tile/mm/hugetlbpage.c
12368@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12369 info.high_limit = TASK_SIZE;
12370 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12371 info.align_offset = 0;
12372+ info.threadstack_offset = 0;
12373 return vm_unmapped_area(&info);
12374 }
12375
12376@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12377 info.high_limit = current->mm->mmap_base;
12378 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12379 info.align_offset = 0;
12380+ info.threadstack_offset = 0;
12381 addr = vm_unmapped_area(&info);
12382
12383 /*
12384diff --git a/arch/um/Makefile b/arch/um/Makefile
12385index e4b1a96..16162f8 100644
12386--- a/arch/um/Makefile
12387+++ b/arch/um/Makefile
12388@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12389 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12390 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12391
12392+ifdef CONSTIFY_PLUGIN
12393+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12394+endif
12395+
12396 #This will adjust *FLAGS accordingly to the platform.
12397 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12398
12399diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12400index 19e1bdd..3665b77 100644
12401--- a/arch/um/include/asm/cache.h
12402+++ b/arch/um/include/asm/cache.h
12403@@ -1,6 +1,7 @@
12404 #ifndef __UM_CACHE_H
12405 #define __UM_CACHE_H
12406
12407+#include <linux/const.h>
12408
12409 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12410 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12411@@ -12,6 +13,6 @@
12412 # define L1_CACHE_SHIFT 5
12413 #endif
12414
12415-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12417
12418 #endif
12419diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12420index 2e0a6b1..a64d0f5 100644
12421--- a/arch/um/include/asm/kmap_types.h
12422+++ b/arch/um/include/asm/kmap_types.h
12423@@ -8,6 +8,6 @@
12424
12425 /* No more #include "asm/arch/kmap_types.h" ! */
12426
12427-#define KM_TYPE_NR 14
12428+#define KM_TYPE_NR 15
12429
12430 #endif
12431diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12432index 71c5d13..4c7b9f1 100644
12433--- a/arch/um/include/asm/page.h
12434+++ b/arch/um/include/asm/page.h
12435@@ -14,6 +14,9 @@
12436 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12437 #define PAGE_MASK (~(PAGE_SIZE-1))
12438
12439+#define ktla_ktva(addr) (addr)
12440+#define ktva_ktla(addr) (addr)
12441+
12442 #ifndef __ASSEMBLY__
12443
12444 struct page;
12445diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12446index 2b4274e..754fe06 100644
12447--- a/arch/um/include/asm/pgtable-3level.h
12448+++ b/arch/um/include/asm/pgtable-3level.h
12449@@ -58,6 +58,7 @@
12450 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12451 #define pud_populate(mm, pud, pmd) \
12452 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12453+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12454
12455 #ifdef CONFIG_64BIT
12456 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12457diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12458index f17bca8..48adb87 100644
12459--- a/arch/um/kernel/process.c
12460+++ b/arch/um/kernel/process.c
12461@@ -356,22 +356,6 @@ int singlestepping(void * t)
12462 return 2;
12463 }
12464
12465-/*
12466- * Only x86 and x86_64 have an arch_align_stack().
12467- * All other arches have "#define arch_align_stack(x) (x)"
12468- * in their asm/exec.h
12469- * As this is included in UML from asm-um/system-generic.h,
12470- * we can use it to behave as the subarch does.
12471- */
12472-#ifndef arch_align_stack
12473-unsigned long arch_align_stack(unsigned long sp)
12474-{
12475- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12476- sp -= get_random_int() % 8192;
12477- return sp & ~0xf;
12478-}
12479-#endif
12480-
12481 unsigned long get_wchan(struct task_struct *p)
12482 {
12483 unsigned long stack_page, sp, ip;
12484diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12485index ad8f795..2c7eec6 100644
12486--- a/arch/unicore32/include/asm/cache.h
12487+++ b/arch/unicore32/include/asm/cache.h
12488@@ -12,8 +12,10 @@
12489 #ifndef __UNICORE_CACHE_H__
12490 #define __UNICORE_CACHE_H__
12491
12492-#define L1_CACHE_SHIFT (5)
12493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12494+#include <linux/const.h>
12495+
12496+#define L1_CACHE_SHIFT 5
12497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12498
12499 /*
12500 * Memory returned by kmalloc() may be used for DMA, so we must make
12501diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12502index b7d31ca..9481ec5 100644
12503--- a/arch/x86/Kconfig
12504+++ b/arch/x86/Kconfig
12505@@ -132,7 +132,7 @@ config X86
12506 select RTC_LIB
12507 select HAVE_DEBUG_STACKOVERFLOW
12508 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12509- select HAVE_CC_STACKPROTECTOR
12510+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12511 select GENERIC_CPU_AUTOPROBE
12512 select HAVE_ARCH_AUDITSYSCALL
12513 select ARCH_SUPPORTS_ATOMIC_RMW
12514@@ -266,7 +266,7 @@ config X86_HT
12515
12516 config X86_32_LAZY_GS
12517 def_bool y
12518- depends on X86_32 && !CC_STACKPROTECTOR
12519+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12520
12521 config ARCH_HWEIGHT_CFLAGS
12522 string
12523@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12524
12525 menuconfig HYPERVISOR_GUEST
12526 bool "Linux guest support"
12527+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12528 ---help---
12529 Say Y here to enable options for running Linux under various hyper-
12530 visors. This option enables basic hypervisor detection and platform
12531@@ -1013,6 +1014,7 @@ config VM86
12532
12533 config X86_16BIT
12534 bool "Enable support for 16-bit segments" if EXPERT
12535+ depends on !GRKERNSEC
12536 default y
12537 ---help---
12538 This option is required by programs like Wine to run 16-bit
12539@@ -1186,6 +1188,7 @@ choice
12540
12541 config NOHIGHMEM
12542 bool "off"
12543+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12544 ---help---
12545 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12546 However, the address space of 32-bit x86 processors is only 4
12547@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12548
12549 config HIGHMEM4G
12550 bool "4GB"
12551+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12552 ---help---
12553 Select this if you have a 32-bit processor and between 1 and 4
12554 gigabytes of physical RAM.
12555@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12556 hex
12557 default 0xB0000000 if VMSPLIT_3G_OPT
12558 default 0x80000000 if VMSPLIT_2G
12559- default 0x78000000 if VMSPLIT_2G_OPT
12560+ default 0x70000000 if VMSPLIT_2G_OPT
12561 default 0x40000000 if VMSPLIT_1G
12562 default 0xC0000000
12563 depends on X86_32
12564@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12565
12566 config KEXEC
12567 bool "kexec system call"
12568+ depends on !GRKERNSEC_KMEM
12569 ---help---
12570 kexec is a system call that implements the ability to shutdown your
12571 current kernel, and to start another kernel. It is like a reboot
12572@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12573
12574 config PHYSICAL_ALIGN
12575 hex "Alignment value to which kernel should be aligned"
12576- default "0x200000"
12577+ default "0x1000000"
12578+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12579+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12580 range 0x2000 0x1000000 if X86_32
12581 range 0x200000 0x1000000 if X86_64
12582 ---help---
12583@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12584 def_bool n
12585 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12586 depends on X86_32 || IA32_EMULATION
12587+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12588 ---help---
12589 Certain buggy versions of glibc will crash if they are
12590 presented with a 32-bit vDSO that is not mapped at the address
12591diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12592index 6983314..54ad7e8 100644
12593--- a/arch/x86/Kconfig.cpu
12594+++ b/arch/x86/Kconfig.cpu
12595@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12596
12597 config X86_F00F_BUG
12598 def_bool y
12599- depends on M586MMX || M586TSC || M586 || M486
12600+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12601
12602 config X86_INVD_BUG
12603 def_bool y
12604@@ -327,7 +327,7 @@ config X86_INVD_BUG
12605
12606 config X86_ALIGNMENT_16
12607 def_bool y
12608- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12609+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12610
12611 config X86_INTEL_USERCOPY
12612 def_bool y
12613@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12614 # generates cmov.
12615 config X86_CMOV
12616 def_bool y
12617- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12618+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12619
12620 config X86_MINIMUM_CPU_FAMILY
12621 int
12622diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12623index 20028da..88d5946 100644
12624--- a/arch/x86/Kconfig.debug
12625+++ b/arch/x86/Kconfig.debug
12626@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12627 config DEBUG_RODATA
12628 bool "Write protect kernel read-only data structures"
12629 default y
12630- depends on DEBUG_KERNEL
12631+ depends on DEBUG_KERNEL && BROKEN
12632 ---help---
12633 Mark the kernel read-only data as write-protected in the pagetables,
12634 in order to catch accidental (and incorrect) writes to such const
12635@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12636
12637 config DEBUG_SET_MODULE_RONX
12638 bool "Set loadable kernel module data as NX and text as RO"
12639- depends on MODULES
12640+ depends on MODULES && BROKEN
12641 ---help---
12642 This option helps catch unintended modifications to loadable
12643 kernel module's text and read-only data. It also prevents execution
12644diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12645index 5ba2d9c..41e5bb6 100644
12646--- a/arch/x86/Makefile
12647+++ b/arch/x86/Makefile
12648@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12649 # CPU-specific tuning. Anything which can be shared with UML should go here.
12650 include $(srctree)/arch/x86/Makefile_32.cpu
12651 KBUILD_CFLAGS += $(cflags-y)
12652-
12653- # temporary until string.h is fixed
12654- KBUILD_CFLAGS += -ffreestanding
12655 else
12656 BITS := 64
12657 UTS_MACHINE := x86_64
12658@@ -107,6 +104,9 @@ else
12659 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12660 endif
12661
12662+# temporary until string.h is fixed
12663+KBUILD_CFLAGS += -ffreestanding
12664+
12665 # Make sure compiler does not have buggy stack-protector support.
12666 ifdef CONFIG_CC_STACKPROTECTOR
12667 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12668@@ -181,6 +181,7 @@ archheaders:
12669 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12670
12671 archprepare:
12672+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12673 ifeq ($(CONFIG_KEXEC_FILE),y)
12674 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12675 endif
12676@@ -264,3 +265,9 @@ define archhelp
12677 echo ' FDARGS="..." arguments for the booted kernel'
12678 echo ' FDINITRD=file initrd for the booted kernel'
12679 endef
12680+
12681+define OLD_LD
12682+
12683+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12684+*** Please upgrade your binutils to 2.18 or newer
12685+endef
12686diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12687index 57bbf2f..b100fce 100644
12688--- a/arch/x86/boot/Makefile
12689+++ b/arch/x86/boot/Makefile
12690@@ -58,6 +58,9 @@ clean-files += cpustr.h
12691 # ---------------------------------------------------------------------------
12692
12693 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12694+ifdef CONSTIFY_PLUGIN
12695+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12696+endif
12697 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12698 GCOV_PROFILE := n
12699
12700diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12701index 878e4b9..20537ab 100644
12702--- a/arch/x86/boot/bitops.h
12703+++ b/arch/x86/boot/bitops.h
12704@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12705 u8 v;
12706 const u32 *p = (const u32 *)addr;
12707
12708- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12709+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12710 return v;
12711 }
12712
12713@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12714
12715 static inline void set_bit(int nr, void *addr)
12716 {
12717- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12718+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12719 }
12720
12721 #endif /* BOOT_BITOPS_H */
12722diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12723index bd49ec6..94c7f58 100644
12724--- a/arch/x86/boot/boot.h
12725+++ b/arch/x86/boot/boot.h
12726@@ -84,7 +84,7 @@ static inline void io_delay(void)
12727 static inline u16 ds(void)
12728 {
12729 u16 seg;
12730- asm("movw %%ds,%0" : "=rm" (seg));
12731+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12732 return seg;
12733 }
12734
12735diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12736index 0a291cd..9686efc 100644
12737--- a/arch/x86/boot/compressed/Makefile
12738+++ b/arch/x86/boot/compressed/Makefile
12739@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12740 KBUILD_CFLAGS += -mno-mmx -mno-sse
12741 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12742 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12743+ifdef CONSTIFY_PLUGIN
12744+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12745+endif
12746
12747 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12748 GCOV_PROFILE := n
12749diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12750index a53440e..c3dbf1e 100644
12751--- a/arch/x86/boot/compressed/efi_stub_32.S
12752+++ b/arch/x86/boot/compressed/efi_stub_32.S
12753@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12754 * parameter 2, ..., param n. To make things easy, we save the return
12755 * address of efi_call_phys in a global variable.
12756 */
12757- popl %ecx
12758- movl %ecx, saved_return_addr(%edx)
12759- /* get the function pointer into ECX*/
12760- popl %ecx
12761- movl %ecx, efi_rt_function_ptr(%edx)
12762+ popl saved_return_addr(%edx)
12763+ popl efi_rt_function_ptr(%edx)
12764
12765 /*
12766 * 3. Call the physical function.
12767 */
12768- call *%ecx
12769+ call *efi_rt_function_ptr(%edx)
12770
12771 /*
12772 * 4. Balance the stack. And because EAX contain the return value,
12773@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12774 1: popl %edx
12775 subl $1b, %edx
12776
12777- movl efi_rt_function_ptr(%edx), %ecx
12778- pushl %ecx
12779+ pushl efi_rt_function_ptr(%edx)
12780
12781 /*
12782 * 10. Push the saved return address onto the stack and return.
12783 */
12784- movl saved_return_addr(%edx), %ecx
12785- pushl %ecx
12786- ret
12787+ jmpl *saved_return_addr(%edx)
12788 ENDPROC(efi_call_phys)
12789 .previous
12790
12791diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12792index 630384a..278e788 100644
12793--- a/arch/x86/boot/compressed/efi_thunk_64.S
12794+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12795@@ -189,8 +189,8 @@ efi_gdt64:
12796 .long 0 /* Filled out by user */
12797 .word 0
12798 .quad 0x0000000000000000 /* NULL descriptor */
12799- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12800- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12801+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12802+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12803 .quad 0x0080890000000000 /* TS descriptor */
12804 .quad 0x0000000000000000 /* TS continued */
12805 efi_gdt64_end:
12806diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12807index 1d7fbbc..36ecd58 100644
12808--- a/arch/x86/boot/compressed/head_32.S
12809+++ b/arch/x86/boot/compressed/head_32.S
12810@@ -140,10 +140,10 @@ preferred_addr:
12811 addl %eax, %ebx
12812 notl %eax
12813 andl %eax, %ebx
12814- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12815+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12816 jge 1f
12817 #endif
12818- movl $LOAD_PHYSICAL_ADDR, %ebx
12819+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12820 1:
12821
12822 /* Target address to relocate to for decompression */
12823diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12824index 6b1766c..ad465c9 100644
12825--- a/arch/x86/boot/compressed/head_64.S
12826+++ b/arch/x86/boot/compressed/head_64.S
12827@@ -94,10 +94,10 @@ ENTRY(startup_32)
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840@@ -322,10 +322,10 @@ preferred_addr:
12841 addq %rax, %rbp
12842 notq %rax
12843 andq %rax, %rbp
12844- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12845+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12846 jge 1f
12847 #endif
12848- movq $LOAD_PHYSICAL_ADDR, %rbp
12849+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -434,8 +434,8 @@ gdt:
12854 .long gdt
12855 .word 0
12856 .quad 0x0000000000000000 /* NULL descriptor */
12857- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12858- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12859+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12860+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12861 .quad 0x0080890000000000 /* TS descriptor */
12862 .quad 0x0000000000000000 /* TS continued */
12863 gdt_end:
12864diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12865index a950864..c710239 100644
12866--- a/arch/x86/boot/compressed/misc.c
12867+++ b/arch/x86/boot/compressed/misc.c
12868@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12869 * Calculate the delta between where vmlinux was linked to load
12870 * and where it was actually loaded.
12871 */
12872- delta = min_addr - LOAD_PHYSICAL_ADDR;
12873+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12874 if (!delta) {
12875 debug_putstr("No relocation needed... ");
12876 return;
12877@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12878 Elf32_Ehdr ehdr;
12879 Elf32_Phdr *phdrs, *phdr;
12880 #endif
12881- void *dest;
12882+ void *dest, *prev;
12883 int i;
12884
12885 memcpy(&ehdr, output, sizeof(ehdr));
12886@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12887 case PT_LOAD:
12888 #ifdef CONFIG_RELOCATABLE
12889 dest = output;
12890- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12891+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12892 #else
12893 dest = (void *)(phdr->p_paddr);
12894 #endif
12895 memcpy(dest,
12896 output + phdr->p_offset,
12897 phdr->p_filesz);
12898+ if (i)
12899+ memset(prev, 0xff, dest - prev);
12900+ prev = dest + phdr->p_filesz;
12901 break;
12902 default: /* Ignore other PT_* */ break;
12903 }
12904@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12905 error("Destination address too large");
12906 #endif
12907 #ifndef CONFIG_RELOCATABLE
12908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12910 error("Wrong destination address");
12911 #endif
12912
12913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12914index 1fd7d57..0f7d096 100644
12915--- a/arch/x86/boot/cpucheck.c
12916+++ b/arch/x86/boot/cpucheck.c
12917@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12918 u32 ecx = MSR_K7_HWCR;
12919 u32 eax, edx;
12920
12921- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12922+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12923 eax &= ~(1 << 15);
12924- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12925+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12926
12927 get_cpuflags(); /* Make sure it really did something */
12928 err = check_cpuflags();
12929@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12930 u32 ecx = MSR_VIA_FCR;
12931 u32 eax, edx;
12932
12933- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12934+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935 eax |= (1<<1)|(1<<7);
12936- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12937+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938
12939 set_bit(X86_FEATURE_CX8, cpu.flags);
12940 err = check_cpuflags();
12941@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12942 u32 eax, edx;
12943 u32 level = 1;
12944
12945- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12946- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12947- asm("cpuid"
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12950+ asm volatile("cpuid"
12951 : "+a" (level), "=d" (cpu.flags[0])
12952 : : "ecx", "ebx");
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 err = check_cpuflags();
12957 } else if (err == 0x01 &&
12958diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12959index 16ef025..91e033b 100644
12960--- a/arch/x86/boot/header.S
12961+++ b/arch/x86/boot/header.S
12962@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12963 # single linked list of
12964 # struct setup_data
12965
12966-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12967+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12968
12969 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12970+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12971+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12972+#else
12973 #define VO_INIT_SIZE (VO__end - VO__text)
12974+#endif
12975 #if ZO_INIT_SIZE > VO_INIT_SIZE
12976 #define INIT_SIZE ZO_INIT_SIZE
12977 #else
12978diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12979index db75d07..8e6d0af 100644
12980--- a/arch/x86/boot/memory.c
12981+++ b/arch/x86/boot/memory.c
12982@@ -19,7 +19,7 @@
12983
12984 static int detect_memory_e820(void)
12985 {
12986- int count = 0;
12987+ unsigned int count = 0;
12988 struct biosregs ireg, oreg;
12989 struct e820entry *desc = boot_params.e820_map;
12990 static struct e820entry buf; /* static so it is zeroed */
12991diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12992index ba3e100..6501b8f 100644
12993--- a/arch/x86/boot/video-vesa.c
12994+++ b/arch/x86/boot/video-vesa.c
12995@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12996
12997 boot_params.screen_info.vesapm_seg = oreg.es;
12998 boot_params.screen_info.vesapm_off = oreg.di;
12999+ boot_params.screen_info.vesapm_size = oreg.cx;
13000 }
13001
13002 /*
13003diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13004index 43eda28..5ab5fdb 100644
13005--- a/arch/x86/boot/video.c
13006+++ b/arch/x86/boot/video.c
13007@@ -96,7 +96,7 @@ static void store_mode_params(void)
13008 static unsigned int get_entry(void)
13009 {
13010 char entry_buf[4];
13011- int i, len = 0;
13012+ unsigned int i, len = 0;
13013 int key;
13014 unsigned int v;
13015
13016diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13017index 9105655..41779c1 100644
13018--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13019+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13020@@ -8,6 +8,8 @@
13021 * including this sentence is retained in full.
13022 */
13023
13024+#include <asm/alternative-asm.h>
13025+
13026 .extern crypto_ft_tab
13027 .extern crypto_it_tab
13028 .extern crypto_fl_tab
13029@@ -70,6 +72,8 @@
13030 je B192; \
13031 leaq 32(r9),r9;
13032
13033+#define ret pax_force_retaddr; ret
13034+
13035 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13036 movq r1,r2; \
13037 movq r3,r4; \
13038diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13039index 6bd2c6c..368c93e 100644
13040--- a/arch/x86/crypto/aesni-intel_asm.S
13041+++ b/arch/x86/crypto/aesni-intel_asm.S
13042@@ -31,6 +31,7 @@
13043
13044 #include <linux/linkage.h>
13045 #include <asm/inst.h>
13046+#include <asm/alternative-asm.h>
13047
13048 /*
13049 * The following macros are used to move an (un)aligned 16 byte value to/from
13050@@ -217,7 +218,7 @@ enc: .octa 0x2
13051 * num_initial_blocks = b mod 4
13052 * encrypt the initial num_initial_blocks blocks and apply ghash on
13053 * the ciphertext
13054-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13055+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13056 * are clobbered
13057 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13058 */
13059@@ -227,8 +228,8 @@ enc: .octa 0x2
13060 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13061 MOVADQ SHUF_MASK(%rip), %xmm14
13062 mov arg7, %r10 # %r10 = AAD
13063- mov arg8, %r12 # %r12 = aadLen
13064- mov %r12, %r11
13065+ mov arg8, %r15 # %r15 = aadLen
13066+ mov %r15, %r11
13067 pxor %xmm\i, %xmm\i
13068
13069 _get_AAD_loop\num_initial_blocks\operation:
13070@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13071 psrldq $4, %xmm\i
13072 pxor \TMP1, %xmm\i
13073 add $4, %r10
13074- sub $4, %r12
13075+ sub $4, %r15
13076 jne _get_AAD_loop\num_initial_blocks\operation
13077
13078 cmp $16, %r11
13079 je _get_AAD_loop2_done\num_initial_blocks\operation
13080
13081- mov $16, %r12
13082+ mov $16, %r15
13083 _get_AAD_loop2\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085- sub $4, %r12
13086- cmp %r11, %r12
13087+ sub $4, %r15
13088+ cmp %r11, %r15
13089 jne _get_AAD_loop2\num_initial_blocks\operation
13090
13091 _get_AAD_loop2_done\num_initial_blocks\operation:
13092@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13093 * num_initial_blocks = b mod 4
13094 * encrypt the initial num_initial_blocks blocks and apply ghash on
13095 * the ciphertext
13096-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13097+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13098 * are clobbered
13099 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13100 */
13101@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13102 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13103 MOVADQ SHUF_MASK(%rip), %xmm14
13104 mov arg7, %r10 # %r10 = AAD
13105- mov arg8, %r12 # %r12 = aadLen
13106- mov %r12, %r11
13107+ mov arg8, %r15 # %r15 = aadLen
13108+ mov %r15, %r11
13109 pxor %xmm\i, %xmm\i
13110 _get_AAD_loop\num_initial_blocks\operation:
13111 movd (%r10), \TMP1
13112@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13113 psrldq $4, %xmm\i
13114 pxor \TMP1, %xmm\i
13115 add $4, %r10
13116- sub $4, %r12
13117+ sub $4, %r15
13118 jne _get_AAD_loop\num_initial_blocks\operation
13119 cmp $16, %r11
13120 je _get_AAD_loop2_done\num_initial_blocks\operation
13121- mov $16, %r12
13122+ mov $16, %r15
13123 _get_AAD_loop2\num_initial_blocks\operation:
13124 psrldq $4, %xmm\i
13125- sub $4, %r12
13126- cmp %r11, %r12
13127+ sub $4, %r15
13128+ cmp %r11, %r15
13129 jne _get_AAD_loop2\num_initial_blocks\operation
13130 _get_AAD_loop2_done\num_initial_blocks\operation:
13131 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13132@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13133 *
13134 *****************************************************************************/
13135 ENTRY(aesni_gcm_dec)
13136- push %r12
13137+ push %r15
13138 push %r13
13139 push %r14
13140 mov %rsp, %r14
13141@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13142 */
13143 sub $VARIABLE_OFFSET, %rsp
13144 and $~63, %rsp # align rsp to 64 bytes
13145- mov %arg6, %r12
13146- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13147+ mov %arg6, %r15
13148+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13149 movdqa SHUF_MASK(%rip), %xmm2
13150 PSHUFB_XMM %xmm2, %xmm13
13151
13152@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13153 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13154 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13155 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13156- mov %r13, %r12
13157- and $(3<<4), %r12
13158+ mov %r13, %r15
13159+ and $(3<<4), %r15
13160 jz _initial_num_blocks_is_0_decrypt
13161- cmp $(2<<4), %r12
13162+ cmp $(2<<4), %r15
13163 jb _initial_num_blocks_is_1_decrypt
13164 je _initial_num_blocks_is_2_decrypt
13165 _initial_num_blocks_is_3_decrypt:
13166@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13167 sub $16, %r11
13168 add %r13, %r11
13169 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13170- lea SHIFT_MASK+16(%rip), %r12
13171- sub %r13, %r12
13172+ lea SHIFT_MASK+16(%rip), %r15
13173+ sub %r13, %r15
13174 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13175 # (%r13 is the number of bytes in plaintext mod 16)
13176- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13177+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13178 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13179
13180 movdqa %xmm1, %xmm2
13181 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13182- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13183+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13184 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13185 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13186 pand %xmm1, %xmm2
13187@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13188 sub $1, %r13
13189 jne _less_than_8_bytes_left_decrypt
13190 _multiple_of_16_bytes_decrypt:
13191- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13192- shl $3, %r12 # convert into number of bits
13193- movd %r12d, %xmm15 # len(A) in %xmm15
13194+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13195+ shl $3, %r15 # convert into number of bits
13196+ movd %r15d, %xmm15 # len(A) in %xmm15
13197 shl $3, %arg4 # len(C) in bits (*128)
13198 MOVQ_R64_XMM %arg4, %xmm1
13199 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13200@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13201 mov %r14, %rsp
13202 pop %r14
13203 pop %r13
13204- pop %r12
13205+ pop %r15
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(aesni_gcm_dec)
13209
13210@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13211 * poly = x^128 + x^127 + x^126 + x^121 + 1
13212 ***************************************************************************/
13213 ENTRY(aesni_gcm_enc)
13214- push %r12
13215+ push %r15
13216 push %r13
13217 push %r14
13218 mov %rsp, %r14
13219@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13220 #
13221 sub $VARIABLE_OFFSET, %rsp
13222 and $~63, %rsp
13223- mov %arg6, %r12
13224- movdqu (%r12), %xmm13
13225+ mov %arg6, %r15
13226+ movdqu (%r15), %xmm13
13227 movdqa SHUF_MASK(%rip), %xmm2
13228 PSHUFB_XMM %xmm2, %xmm13
13229
13230@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13231 movdqa %xmm13, HashKey(%rsp)
13232 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13233 and $-16, %r13
13234- mov %r13, %r12
13235+ mov %r13, %r15
13236
13237 # Encrypt first few blocks
13238
13239- and $(3<<4), %r12
13240+ and $(3<<4), %r15
13241 jz _initial_num_blocks_is_0_encrypt
13242- cmp $(2<<4), %r12
13243+ cmp $(2<<4), %r15
13244 jb _initial_num_blocks_is_1_encrypt
13245 je _initial_num_blocks_is_2_encrypt
13246 _initial_num_blocks_is_3_encrypt:
13247@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13248 sub $16, %r11
13249 add %r13, %r11
13250 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13251- lea SHIFT_MASK+16(%rip), %r12
13252- sub %r13, %r12
13253+ lea SHIFT_MASK+16(%rip), %r15
13254+ sub %r13, %r15
13255 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13256 # (%r13 is the number of bytes in plaintext mod 16)
13257- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13258+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13259 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13260 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13261- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13262+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13263 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13264 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13265 movdqa SHUF_MASK(%rip), %xmm10
13266@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13267 sub $1, %r13
13268 jne _less_than_8_bytes_left_encrypt
13269 _multiple_of_16_bytes_encrypt:
13270- mov arg8, %r12 # %r12 = addLen (number of bytes)
13271- shl $3, %r12
13272- movd %r12d, %xmm15 # len(A) in %xmm15
13273+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13274+ shl $3, %r15
13275+ movd %r15d, %xmm15 # len(A) in %xmm15
13276 shl $3, %arg4 # len(C) in bits (*128)
13277 MOVQ_R64_XMM %arg4, %xmm1
13278 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13279@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13280 mov %r14, %rsp
13281 pop %r14
13282 pop %r13
13283- pop %r12
13284+ pop %r15
13285+ pax_force_retaddr
13286 ret
13287 ENDPROC(aesni_gcm_enc)
13288
13289@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13290 pxor %xmm1, %xmm0
13291 movaps %xmm0, (TKEYP)
13292 add $0x10, TKEYP
13293+ pax_force_retaddr
13294 ret
13295 ENDPROC(_key_expansion_128)
13296 ENDPROC(_key_expansion_256a)
13297@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13298 shufps $0b01001110, %xmm2, %xmm1
13299 movaps %xmm1, 0x10(TKEYP)
13300 add $0x20, TKEYP
13301+ pax_force_retaddr
13302 ret
13303 ENDPROC(_key_expansion_192a)
13304
13305@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13306
13307 movaps %xmm0, (TKEYP)
13308 add $0x10, TKEYP
13309+ pax_force_retaddr
13310 ret
13311 ENDPROC(_key_expansion_192b)
13312
13313@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13314 pxor %xmm1, %xmm2
13315 movaps %xmm2, (TKEYP)
13316 add $0x10, TKEYP
13317+ pax_force_retaddr
13318 ret
13319 ENDPROC(_key_expansion_256b)
13320
13321@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13322 #ifndef __x86_64__
13323 popl KEYP
13324 #endif
13325+ pax_force_retaddr
13326 ret
13327 ENDPROC(aesni_set_key)
13328
13329@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13330 popl KLEN
13331 popl KEYP
13332 #endif
13333+ pax_force_retaddr
13334 ret
13335 ENDPROC(aesni_enc)
13336
13337@@ -1985,6 +1994,7 @@ _aesni_enc1:
13338 AESENC KEY STATE
13339 movaps 0x70(TKEYP), KEY
13340 AESENCLAST KEY STATE
13341+ pax_force_retaddr
13342 ret
13343 ENDPROC(_aesni_enc1)
13344
13345@@ -2094,6 +2104,7 @@ _aesni_enc4:
13346 AESENCLAST KEY STATE2
13347 AESENCLAST KEY STATE3
13348 AESENCLAST KEY STATE4
13349+ pax_force_retaddr
13350 ret
13351 ENDPROC(_aesni_enc4)
13352
13353@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13354 popl KLEN
13355 popl KEYP
13356 #endif
13357+ pax_force_retaddr
13358 ret
13359 ENDPROC(aesni_dec)
13360
13361@@ -2175,6 +2187,7 @@ _aesni_dec1:
13362 AESDEC KEY STATE
13363 movaps 0x70(TKEYP), KEY
13364 AESDECLAST KEY STATE
13365+ pax_force_retaddr
13366 ret
13367 ENDPROC(_aesni_dec1)
13368
13369@@ -2284,6 +2297,7 @@ _aesni_dec4:
13370 AESDECLAST KEY STATE2
13371 AESDECLAST KEY STATE3
13372 AESDECLAST KEY STATE4
13373+ pax_force_retaddr
13374 ret
13375 ENDPROC(_aesni_dec4)
13376
13377@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13378 popl KEYP
13379 popl LEN
13380 #endif
13381+ pax_force_retaddr
13382 ret
13383 ENDPROC(aesni_ecb_enc)
13384
13385@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13386 popl KEYP
13387 popl LEN
13388 #endif
13389+ pax_force_retaddr
13390 ret
13391 ENDPROC(aesni_ecb_dec)
13392
13393@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13394 popl LEN
13395 popl IVP
13396 #endif
13397+ pax_force_retaddr
13398 ret
13399 ENDPROC(aesni_cbc_enc)
13400
13401@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13402 popl LEN
13403 popl IVP
13404 #endif
13405+ pax_force_retaddr
13406 ret
13407 ENDPROC(aesni_cbc_dec)
13408
13409@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13410 mov $1, TCTR_LOW
13411 MOVQ_R64_XMM TCTR_LOW INC
13412 MOVQ_R64_XMM CTR TCTR_LOW
13413+ pax_force_retaddr
13414 ret
13415 ENDPROC(_aesni_inc_init)
13416
13417@@ -2590,6 +2609,7 @@ _aesni_inc:
13418 .Linc_low:
13419 movaps CTR, IV
13420 PSHUFB_XMM BSWAP_MASK IV
13421+ pax_force_retaddr
13422 ret
13423 ENDPROC(_aesni_inc)
13424
13425@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13426 .Lctr_enc_ret:
13427 movups IV, (IVP)
13428 .Lctr_enc_just_ret:
13429+ pax_force_retaddr
13430 ret
13431 ENDPROC(aesni_ctr_enc)
13432
13433@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13434 pxor INC, STATE4
13435 movdqu STATE4, 0x70(OUTP)
13436
13437+ pax_force_retaddr
13438 ret
13439 ENDPROC(aesni_xts_crypt8)
13440
13441diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13442index 246c670..466e2d6 100644
13443--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13445@@ -21,6 +21,7 @@
13446 */
13447
13448 #include <linux/linkage.h>
13449+#include <asm/alternative-asm.h>
13450
13451 .file "blowfish-x86_64-asm.S"
13452 .text
13453@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13454 jnz .L__enc_xor;
13455
13456 write_block();
13457+ pax_force_retaddr
13458 ret;
13459 .L__enc_xor:
13460 xor_block();
13461+ pax_force_retaddr
13462 ret;
13463 ENDPROC(__blowfish_enc_blk)
13464
13465@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13466
13467 movq %r11, %rbp;
13468
13469+ pax_force_retaddr
13470 ret;
13471 ENDPROC(blowfish_dec_blk)
13472
13473@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13474
13475 popq %rbx;
13476 popq %rbp;
13477+ pax_force_retaddr
13478 ret;
13479
13480 .L__enc_xor4:
13481@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13482
13483 popq %rbx;
13484 popq %rbp;
13485+ pax_force_retaddr
13486 ret;
13487 ENDPROC(__blowfish_enc_blk_4way)
13488
13489@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13490 popq %rbx;
13491 popq %rbp;
13492
13493+ pax_force_retaddr
13494 ret;
13495 ENDPROC(blowfish_dec_blk_4way)
13496diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13497index ce71f92..1dce7ec 100644
13498--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13500@@ -16,6 +16,7 @@
13501 */
13502
13503 #include <linux/linkage.h>
13504+#include <asm/alternative-asm.h>
13505
13506 #define CAMELLIA_TABLE_BYTE_LEN 272
13507
13508@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13509 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13510 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13511 %rcx, (%r9));
13512+ pax_force_retaddr
13513 ret;
13514 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13515
13516@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13517 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13518 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13519 %rax, (%r9));
13520+ pax_force_retaddr
13521 ret;
13522 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13523
13524@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13526 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13527
13528+ pax_force_retaddr
13529 ret;
13530
13531 .align 8
13532@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13533 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13534 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13535
13536+ pax_force_retaddr
13537 ret;
13538
13539 .align 8
13540@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13541 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13542 %xmm8, %rsi);
13543
13544+ pax_force_retaddr
13545 ret;
13546 ENDPROC(camellia_ecb_enc_16way)
13547
13548@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13549 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13550 %xmm8, %rsi);
13551
13552+ pax_force_retaddr
13553 ret;
13554 ENDPROC(camellia_ecb_dec_16way)
13555
13556@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13557 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13558 %xmm8, %rsi);
13559
13560+ pax_force_retaddr
13561 ret;
13562 ENDPROC(camellia_cbc_dec_16way)
13563
13564@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13565 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13566 %xmm8, %rsi);
13567
13568+ pax_force_retaddr
13569 ret;
13570 ENDPROC(camellia_ctr_16way)
13571
13572@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13573 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13574 %xmm8, %rsi);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(camellia_xts_crypt_16way)
13579
13580diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13581index 0e0b886..5a3123c 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13584@@ -11,6 +11,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13594 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13602 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13610 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13617 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13618 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13625
13626 vzeroupper;
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_32way)
13631
13632@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13633
13634 vzeroupper;
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_32way)
13639
13640@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13641
13642 vzeroupper;
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_32way)
13647
13648@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13649
13650 vzeroupper;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_32way)
13655
13656@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13657
13658 vzeroupper;
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_32way)
13663
13664diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13665index 310319c..db3d7b5 100644
13666--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13667+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13668@@ -21,6 +21,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 .file "camellia-x86_64-asm_64.S"
13675 .text
13676@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13677 enc_outunpack(mov, RT1);
13678
13679 movq RRBP, %rbp;
13680+ pax_force_retaddr
13681 ret;
13682
13683 .L__enc_xor:
13684 enc_outunpack(xor, RT1);
13685
13686 movq RRBP, %rbp;
13687+ pax_force_retaddr
13688 ret;
13689 ENDPROC(__camellia_enc_blk)
13690
13691@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13692 dec_outunpack();
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697 ENDPROC(camellia_dec_blk)
13698
13699@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13700
13701 movq RRBP, %rbp;
13702 popq %rbx;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc2_xor:
13707@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13708
13709 movq RRBP, %rbp;
13710 popq %rbx;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__camellia_enc_blk_2way)
13714
13715@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13716
13717 movq RRBP, %rbp;
13718 movq RXOR, %rbx;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(camellia_dec_blk_2way)
13722diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13723index c35fd5d..2d8c7db 100644
13724--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13726@@ -24,6 +24,7 @@
13727 */
13728
13729 #include <linux/linkage.h>
13730+#include <asm/alternative-asm.h>
13731
13732 .file "cast5-avx-x86_64-asm_64.S"
13733
13734@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13735 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13736 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13737
13738+ pax_force_retaddr
13739 ret;
13740 ENDPROC(__cast5_enc_blk16)
13741
13742@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13743 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13744 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13745
13746+ pax_force_retaddr
13747 ret;
13748
13749 .L__skip_dec:
13750@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13751 vmovdqu RR4, (6*4*4)(%r11);
13752 vmovdqu RL4, (7*4*4)(%r11);
13753
13754+ pax_force_retaddr
13755 ret;
13756 ENDPROC(cast5_ecb_enc_16way)
13757
13758@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13759 vmovdqu RR4, (6*4*4)(%r11);
13760 vmovdqu RL4, (7*4*4)(%r11);
13761
13762+ pax_force_retaddr
13763 ret;
13764 ENDPROC(cast5_ecb_dec_16way)
13765
13766@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13767 * %rdx: src
13768 */
13769
13770- pushq %r12;
13771+ pushq %r14;
13772
13773 movq %rsi, %r11;
13774- movq %rdx, %r12;
13775+ movq %rdx, %r14;
13776
13777 vmovdqu (0*16)(%rdx), RL1;
13778 vmovdqu (1*16)(%rdx), RR1;
13779@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13780 call __cast5_dec_blk16;
13781
13782 /* xor with src */
13783- vmovq (%r12), RX;
13784+ vmovq (%r14), RX;
13785 vpshufd $0x4f, RX, RX;
13786 vpxor RX, RR1, RR1;
13787- vpxor 0*16+8(%r12), RL1, RL1;
13788- vpxor 1*16+8(%r12), RR2, RR2;
13789- vpxor 2*16+8(%r12), RL2, RL2;
13790- vpxor 3*16+8(%r12), RR3, RR3;
13791- vpxor 4*16+8(%r12), RL3, RL3;
13792- vpxor 5*16+8(%r12), RR4, RR4;
13793- vpxor 6*16+8(%r12), RL4, RL4;
13794+ vpxor 0*16+8(%r14), RL1, RL1;
13795+ vpxor 1*16+8(%r14), RR2, RR2;
13796+ vpxor 2*16+8(%r14), RL2, RL2;
13797+ vpxor 3*16+8(%r14), RR3, RR3;
13798+ vpxor 4*16+8(%r14), RL3, RL3;
13799+ vpxor 5*16+8(%r14), RR4, RR4;
13800+ vpxor 6*16+8(%r14), RL4, RL4;
13801
13802 vmovdqu RR1, (0*16)(%r11);
13803 vmovdqu RL1, (1*16)(%r11);
13804@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13805 vmovdqu RR4, (6*16)(%r11);
13806 vmovdqu RL4, (7*16)(%r11);
13807
13808- popq %r12;
13809+ popq %r14;
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(cast5_cbc_dec_16way)
13814
13815@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13816 * %rcx: iv (big endian, 64bit)
13817 */
13818
13819- pushq %r12;
13820+ pushq %r14;
13821
13822 movq %rsi, %r11;
13823- movq %rdx, %r12;
13824+ movq %rdx, %r14;
13825
13826 vpcmpeqd RTMP, RTMP, RTMP;
13827 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13828@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13829 call __cast5_enc_blk16;
13830
13831 /* dst = src ^ iv */
13832- vpxor (0*16)(%r12), RR1, RR1;
13833- vpxor (1*16)(%r12), RL1, RL1;
13834- vpxor (2*16)(%r12), RR2, RR2;
13835- vpxor (3*16)(%r12), RL2, RL2;
13836- vpxor (4*16)(%r12), RR3, RR3;
13837- vpxor (5*16)(%r12), RL3, RL3;
13838- vpxor (6*16)(%r12), RR4, RR4;
13839- vpxor (7*16)(%r12), RL4, RL4;
13840+ vpxor (0*16)(%r14), RR1, RR1;
13841+ vpxor (1*16)(%r14), RL1, RL1;
13842+ vpxor (2*16)(%r14), RR2, RR2;
13843+ vpxor (3*16)(%r14), RL2, RL2;
13844+ vpxor (4*16)(%r14), RR3, RR3;
13845+ vpxor (5*16)(%r14), RL3, RL3;
13846+ vpxor (6*16)(%r14), RR4, RR4;
13847+ vpxor (7*16)(%r14), RL4, RL4;
13848 vmovdqu RR1, (0*16)(%r11);
13849 vmovdqu RL1, (1*16)(%r11);
13850 vmovdqu RR2, (2*16)(%r11);
13851@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13852 vmovdqu RR4, (6*16)(%r11);
13853 vmovdqu RL4, (7*16)(%r11);
13854
13855- popq %r12;
13856+ popq %r14;
13857
13858+ pax_force_retaddr
13859 ret;
13860 ENDPROC(cast5_ctr_16way)
13861diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13862index e3531f8..e123f35 100644
13863--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13865@@ -24,6 +24,7 @@
13866 */
13867
13868 #include <linux/linkage.h>
13869+#include <asm/alternative-asm.h>
13870 #include "glue_helper-asm-avx.S"
13871
13872 .file "cast6-avx-x86_64-asm_64.S"
13873@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13874 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13875 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13876
13877+ pax_force_retaddr
13878 ret;
13879 ENDPROC(__cast6_enc_blk8)
13880
13881@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13882 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13883 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13884
13885+ pax_force_retaddr
13886 ret;
13887 ENDPROC(__cast6_dec_blk8)
13888
13889@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13890
13891 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13892
13893+ pax_force_retaddr
13894 ret;
13895 ENDPROC(cast6_ecb_enc_8way)
13896
13897@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13898
13899 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13900
13901+ pax_force_retaddr
13902 ret;
13903 ENDPROC(cast6_ecb_dec_8way)
13904
13905@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13906 * %rdx: src
13907 */
13908
13909- pushq %r12;
13910+ pushq %r14;
13911
13912 movq %rsi, %r11;
13913- movq %rdx, %r12;
13914+ movq %rdx, %r14;
13915
13916 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13917
13918 call __cast6_dec_blk8;
13919
13920- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13921+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13922
13923- popq %r12;
13924+ popq %r14;
13925
13926+ pax_force_retaddr
13927 ret;
13928 ENDPROC(cast6_cbc_dec_8way)
13929
13930@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13931 * %rcx: iv (little endian, 128bit)
13932 */
13933
13934- pushq %r12;
13935+ pushq %r14;
13936
13937 movq %rsi, %r11;
13938- movq %rdx, %r12;
13939+ movq %rdx, %r14;
13940
13941 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13942 RD2, RX, RKR, RKM);
13943
13944 call __cast6_enc_blk8;
13945
13946- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13947+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13948
13949- popq %r12;
13950+ popq %r14;
13951
13952+ pax_force_retaddr
13953 ret;
13954 ENDPROC(cast6_ctr_8way)
13955
13956@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13957 /* dst <= regs xor IVs(in dst) */
13958 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959
13960+ pax_force_retaddr
13961 ret;
13962 ENDPROC(cast6_xts_enc_8way)
13963
13964@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13965 /* dst <= regs xor IVs(in dst) */
13966 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13967
13968+ pax_force_retaddr
13969 ret;
13970 ENDPROC(cast6_xts_dec_8way)
13971diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13972index 26d49eb..c0a8c84 100644
13973--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13975@@ -45,6 +45,7 @@
13976
13977 #include <asm/inst.h>
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13982
13983@@ -309,6 +310,7 @@ do_return:
13984 popq %rsi
13985 popq %rdi
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989
13990 ################################################################
13991diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13992index 5d1e007..098cb4f 100644
13993--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13994+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13995@@ -18,6 +18,7 @@
13996
13997 #include <linux/linkage.h>
13998 #include <asm/inst.h>
13999+#include <asm/alternative-asm.h>
14000
14001 .data
14002
14003@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14004 psrlq $1, T2
14005 pxor T2, T1
14006 pxor T1, DATA
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(__clmul_gf128mul_ble)
14010
14011@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14012 call __clmul_gf128mul_ble
14013 PSHUFB_XMM BSWAP DATA
14014 movups DATA, (%rdi)
14015+ pax_force_retaddr
14016 ret
14017 ENDPROC(clmul_ghash_mul)
14018
14019@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14020 PSHUFB_XMM BSWAP DATA
14021 movups DATA, (%rdi)
14022 .Lupdate_just_ret:
14023+ pax_force_retaddr
14024 ret
14025 ENDPROC(clmul_ghash_update)
14026diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14027index 9279e0b..c4b3d2c 100644
14028--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14029+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14030@@ -1,4 +1,5 @@
14031 #include <linux/linkage.h>
14032+#include <asm/alternative-asm.h>
14033
14034 # enter salsa20_encrypt_bytes
14035 ENTRY(salsa20_encrypt_bytes)
14036@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14037 add %r11,%rsp
14038 mov %rdi,%rax
14039 mov %rsi,%rdx
14040+ pax_force_retaddr
14041 ret
14042 # bytesatleast65:
14043 ._bytesatleast65:
14044@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14045 add %r11,%rsp
14046 mov %rdi,%rax
14047 mov %rsi,%rdx
14048+ pax_force_retaddr
14049 ret
14050 ENDPROC(salsa20_keysetup)
14051
14052@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14053 add %r11,%rsp
14054 mov %rdi,%rax
14055 mov %rsi,%rdx
14056+ pax_force_retaddr
14057 ret
14058 ENDPROC(salsa20_ivsetup)
14059diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14060index 2f202f4..d9164d6 100644
14061--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14062+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14063@@ -24,6 +24,7 @@
14064 */
14065
14066 #include <linux/linkage.h>
14067+#include <asm/alternative-asm.h>
14068 #include "glue_helper-asm-avx.S"
14069
14070 .file "serpent-avx-x86_64-asm_64.S"
14071@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14072 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14073 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14074
14075+ pax_force_retaddr
14076 ret;
14077 ENDPROC(__serpent_enc_blk8_avx)
14078
14079@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14080 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14081 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14082
14083+ pax_force_retaddr
14084 ret;
14085 ENDPROC(__serpent_dec_blk8_avx)
14086
14087@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14088
14089 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14090
14091+ pax_force_retaddr
14092 ret;
14093 ENDPROC(serpent_ecb_enc_8way_avx)
14094
14095@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14096
14097 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14098
14099+ pax_force_retaddr
14100 ret;
14101 ENDPROC(serpent_ecb_dec_8way_avx)
14102
14103@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14104
14105 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(serpent_cbc_dec_8way_avx)
14110
14111@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14112
14113 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(serpent_ctr_8way_avx)
14118
14119@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14120 /* dst <= regs xor IVs(in dst) */
14121 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(serpent_xts_enc_8way_avx)
14126
14127@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14128 /* dst <= regs xor IVs(in dst) */
14129 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(serpent_xts_dec_8way_avx)
14134diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14135index b222085..abd483c 100644
14136--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14137+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14138@@ -15,6 +15,7 @@
14139 */
14140
14141 #include <linux/linkage.h>
14142+#include <asm/alternative-asm.h>
14143 #include "glue_helper-asm-avx2.S"
14144
14145 .file "serpent-avx2-asm_64.S"
14146@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14147 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14148 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14149
14150+ pax_force_retaddr
14151 ret;
14152 ENDPROC(__serpent_enc_blk16)
14153
14154@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14155 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14156 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14157
14158+ pax_force_retaddr
14159 ret;
14160 ENDPROC(__serpent_dec_blk16)
14161
14162@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14163
14164 vzeroupper;
14165
14166+ pax_force_retaddr
14167 ret;
14168 ENDPROC(serpent_ecb_enc_16way)
14169
14170@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14171
14172 vzeroupper;
14173
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(serpent_ecb_dec_16way)
14177
14178@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14179
14180 vzeroupper;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(serpent_cbc_dec_16way)
14185
14186@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14187
14188 vzeroupper;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(serpent_ctr_16way)
14193
14194@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14195
14196 vzeroupper;
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(serpent_xts_enc_16way)
14201
14202@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14203
14204 vzeroupper;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(serpent_xts_dec_16way)
14209diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14210index acc066c..1559cc4 100644
14211--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14212+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14213@@ -25,6 +25,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 .file "serpent-sse2-x86_64-asm_64.S"
14220 .text
14221@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14222 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14223 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14224
14225+ pax_force_retaddr
14226 ret;
14227
14228 .L__enc_xor8:
14229 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14230 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14231
14232+ pax_force_retaddr
14233 ret;
14234 ENDPROC(__serpent_enc_blk_8way)
14235
14236@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14237 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14238 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242 ENDPROC(serpent_dec_blk_8way)
14243diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14244index a410950..9dfe7ad 100644
14245--- a/arch/x86/crypto/sha1_ssse3_asm.S
14246+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14247@@ -29,6 +29,7 @@
14248 */
14249
14250 #include <linux/linkage.h>
14251+#include <asm/alternative-asm.h>
14252
14253 #define CTX %rdi // arg1
14254 #define BUF %rsi // arg2
14255@@ -75,9 +76,9 @@
14256
14257 push %rbx
14258 push %rbp
14259- push %r12
14260+ push %r14
14261
14262- mov %rsp, %r12
14263+ mov %rsp, %r14
14264 sub $64, %rsp # allocate workspace
14265 and $~15, %rsp # align stack
14266
14267@@ -99,11 +100,12 @@
14268 xor %rax, %rax
14269 rep stosq
14270
14271- mov %r12, %rsp # deallocate workspace
14272+ mov %r14, %rsp # deallocate workspace
14273
14274- pop %r12
14275+ pop %r14
14276 pop %rbp
14277 pop %rbx
14278+ pax_force_retaddr
14279 ret
14280
14281 ENDPROC(\name)
14282diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14283index 642f156..51a513c 100644
14284--- a/arch/x86/crypto/sha256-avx-asm.S
14285+++ b/arch/x86/crypto/sha256-avx-asm.S
14286@@ -49,6 +49,7 @@
14287
14288 #ifdef CONFIG_AS_AVX
14289 #include <linux/linkage.h>
14290+#include <asm/alternative-asm.h>
14291
14292 ## assume buffers not aligned
14293 #define VMOVDQ vmovdqu
14294@@ -460,6 +461,7 @@ done_hash:
14295 popq %r13
14296 popq %rbp
14297 popq %rbx
14298+ pax_force_retaddr
14299 ret
14300 ENDPROC(sha256_transform_avx)
14301
14302diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14303index 9e86944..3795e6a 100644
14304--- a/arch/x86/crypto/sha256-avx2-asm.S
14305+++ b/arch/x86/crypto/sha256-avx2-asm.S
14306@@ -50,6 +50,7 @@
14307
14308 #ifdef CONFIG_AS_AVX2
14309 #include <linux/linkage.h>
14310+#include <asm/alternative-asm.h>
14311
14312 ## assume buffers not aligned
14313 #define VMOVDQ vmovdqu
14314@@ -720,6 +721,7 @@ done_hash:
14315 popq %r12
14316 popq %rbp
14317 popq %rbx
14318+ pax_force_retaddr
14319 ret
14320 ENDPROC(sha256_transform_rorx)
14321
14322diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14323index f833b74..8c62a9e 100644
14324--- a/arch/x86/crypto/sha256-ssse3-asm.S
14325+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14326@@ -47,6 +47,7 @@
14327 ########################################################################
14328
14329 #include <linux/linkage.h>
14330+#include <asm/alternative-asm.h>
14331
14332 ## assume buffers not aligned
14333 #define MOVDQ movdqu
14334@@ -471,6 +472,7 @@ done_hash:
14335 popq %rbp
14336 popq %rbx
14337
14338+ pax_force_retaddr
14339 ret
14340 ENDPROC(sha256_transform_ssse3)
14341
14342diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14343index 974dde9..a823ff9 100644
14344--- a/arch/x86/crypto/sha512-avx-asm.S
14345+++ b/arch/x86/crypto/sha512-avx-asm.S
14346@@ -49,6 +49,7 @@
14347
14348 #ifdef CONFIG_AS_AVX
14349 #include <linux/linkage.h>
14350+#include <asm/alternative-asm.h>
14351
14352 .text
14353
14354@@ -364,6 +365,7 @@ updateblock:
14355 mov frame_RSPSAVE(%rsp), %rsp
14356
14357 nowork:
14358+ pax_force_retaddr
14359 ret
14360 ENDPROC(sha512_transform_avx)
14361
14362diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14363index 568b961..ed20c37 100644
14364--- a/arch/x86/crypto/sha512-avx2-asm.S
14365+++ b/arch/x86/crypto/sha512-avx2-asm.S
14366@@ -51,6 +51,7 @@
14367
14368 #ifdef CONFIG_AS_AVX2
14369 #include <linux/linkage.h>
14370+#include <asm/alternative-asm.h>
14371
14372 .text
14373
14374@@ -678,6 +679,7 @@ done_hash:
14375
14376 # Restore Stack Pointer
14377 mov frame_RSPSAVE(%rsp), %rsp
14378+ pax_force_retaddr
14379 ret
14380 ENDPROC(sha512_transform_rorx)
14381
14382diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14383index fb56855..6edd768 100644
14384--- a/arch/x86/crypto/sha512-ssse3-asm.S
14385+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14386@@ -48,6 +48,7 @@
14387 ########################################################################
14388
14389 #include <linux/linkage.h>
14390+#include <asm/alternative-asm.h>
14391
14392 .text
14393
14394@@ -363,6 +364,7 @@ updateblock:
14395 mov frame_RSPSAVE(%rsp), %rsp
14396
14397 nowork:
14398+ pax_force_retaddr
14399 ret
14400 ENDPROC(sha512_transform_ssse3)
14401
14402diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14403index 0505813..b067311 100644
14404--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14406@@ -24,6 +24,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411 #include "glue_helper-asm-avx.S"
14412
14413 .file "twofish-avx-x86_64-asm_64.S"
14414@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14415 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14416 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420 ENDPROC(__twofish_enc_blk8)
14421
14422@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14423 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14424 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14425
14426+ pax_force_retaddr
14427 ret;
14428 ENDPROC(__twofish_dec_blk8)
14429
14430@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14431
14432 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14433
14434+ pax_force_retaddr
14435 ret;
14436 ENDPROC(twofish_ecb_enc_8way)
14437
14438@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14439
14440 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14441
14442+ pax_force_retaddr
14443 ret;
14444 ENDPROC(twofish_ecb_dec_8way)
14445
14446@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14447 * %rdx: src
14448 */
14449
14450- pushq %r12;
14451+ pushq %r14;
14452
14453 movq %rsi, %r11;
14454- movq %rdx, %r12;
14455+ movq %rdx, %r14;
14456
14457 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14458
14459 call __twofish_dec_blk8;
14460
14461- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14462+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464- popq %r12;
14465+ popq %r14;
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(twofish_cbc_dec_8way)
14470
14471@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14472 * %rcx: iv (little endian, 128bit)
14473 */
14474
14475- pushq %r12;
14476+ pushq %r14;
14477
14478 movq %rsi, %r11;
14479- movq %rdx, %r12;
14480+ movq %rdx, %r14;
14481
14482 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14483 RD2, RX0, RX1, RY0);
14484
14485 call __twofish_enc_blk8;
14486
14487- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14488+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14489
14490- popq %r12;
14491+ popq %r14;
14492
14493+ pax_force_retaddr
14494 ret;
14495 ENDPROC(twofish_ctr_8way)
14496
14497@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14498 /* dst <= regs xor IVs(in dst) */
14499 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501+ pax_force_retaddr
14502 ret;
14503 ENDPROC(twofish_xts_enc_8way)
14504
14505@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14506 /* dst <= regs xor IVs(in dst) */
14507 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14508
14509+ pax_force_retaddr
14510 ret;
14511 ENDPROC(twofish_xts_dec_8way)
14512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14513index 1c3b7ce..02f578d 100644
14514--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14515+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14516@@ -21,6 +21,7 @@
14517 */
14518
14519 #include <linux/linkage.h>
14520+#include <asm/alternative-asm.h>
14521
14522 .file "twofish-x86_64-asm-3way.S"
14523 .text
14524@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14525 popq %r13;
14526 popq %r14;
14527 popq %r15;
14528+ pax_force_retaddr
14529 ret;
14530
14531 .L__enc_xor3:
14532@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14533 popq %r13;
14534 popq %r14;
14535 popq %r15;
14536+ pax_force_retaddr
14537 ret;
14538 ENDPROC(__twofish_enc_blk_3way)
14539
14540@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14541 popq %r13;
14542 popq %r14;
14543 popq %r15;
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(twofish_dec_blk_3way)
14547diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14548index a039d21..524b8b2 100644
14549--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14551@@ -22,6 +22,7 @@
14552
14553 #include <linux/linkage.h>
14554 #include <asm/asm-offsets.h>
14555+#include <asm/alternative-asm.h>
14556
14557 #define a_offset 0
14558 #define b_offset 4
14559@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14560
14561 popq R1
14562 movq $1,%rax
14563+ pax_force_retaddr
14564 ret
14565 ENDPROC(twofish_enc_blk)
14566
14567@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14568
14569 popq R1
14570 movq $1,%rax
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(twofish_dec_blk)
14574diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14575index ae6aad1..719d6d9 100644
14576--- a/arch/x86/ia32/ia32_aout.c
14577+++ b/arch/x86/ia32/ia32_aout.c
14578@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14579 unsigned long dump_start, dump_size;
14580 struct user32 dump;
14581
14582+ memset(&dump, 0, sizeof(dump));
14583+
14584 fs = get_fs();
14585 set_fs(KERNEL_DS);
14586 has_dumped = 1;
14587diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14588index d0165c9..0d5639b 100644
14589--- a/arch/x86/ia32/ia32_signal.c
14590+++ b/arch/x86/ia32/ia32_signal.c
14591@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14592 if (__get_user(set.sig[0], &frame->sc.oldmask)
14593 || (_COMPAT_NSIG_WORDS > 1
14594 && __copy_from_user((((char *) &set.sig) + 4),
14595- &frame->extramask,
14596+ frame->extramask,
14597 sizeof(frame->extramask))))
14598 goto badframe;
14599
14600@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14601 sp -= frame_size;
14602 /* Align the stack pointer according to the i386 ABI,
14603 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14604- sp = ((sp + 4) & -16ul) - 4;
14605+ sp = ((sp - 12) & -16ul) - 4;
14606 return (void __user *) sp;
14607 }
14608
14609@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14610 } else {
14611 /* Return stub is in 32bit vsyscall page */
14612 if (current->mm->context.vdso)
14613- restorer = current->mm->context.vdso +
14614- selected_vdso32->sym___kernel_sigreturn;
14615+ restorer = (void __force_user *)(current->mm->context.vdso +
14616+ selected_vdso32->sym___kernel_sigreturn);
14617 else
14618- restorer = &frame->retcode;
14619+ restorer = frame->retcode;
14620 }
14621
14622 put_user_try {
14623@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14624 * These are actually not used anymore, but left because some
14625 * gdb versions depend on them as a marker.
14626 */
14627- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14628+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14629 } put_user_catch(err);
14630
14631 if (err)
14632@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14633 0xb8,
14634 __NR_ia32_rt_sigreturn,
14635 0x80cd,
14636- 0,
14637+ 0
14638 };
14639
14640 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14641@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14642
14643 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14644 restorer = ksig->ka.sa.sa_restorer;
14645+ else if (current->mm->context.vdso)
14646+ /* Return stub is in 32bit vsyscall page */
14647+ restorer = (void __force_user *)(current->mm->context.vdso +
14648+ selected_vdso32->sym___kernel_rt_sigreturn);
14649 else
14650- restorer = current->mm->context.vdso +
14651- selected_vdso32->sym___kernel_rt_sigreturn;
14652+ restorer = frame->retcode;
14653 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14654
14655 /*
14656 * Not actually used anymore, but left because some gdb
14657 * versions need it.
14658 */
14659- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14660+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14661 } put_user_catch(err);
14662
14663 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14664diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14665index 156ebca..9591cf0 100644
14666--- a/arch/x86/ia32/ia32entry.S
14667+++ b/arch/x86/ia32/ia32entry.S
14668@@ -15,8 +15,10 @@
14669 #include <asm/irqflags.h>
14670 #include <asm/asm.h>
14671 #include <asm/smap.h>
14672+#include <asm/pgtable.h>
14673 #include <linux/linkage.h>
14674 #include <linux/err.h>
14675+#include <asm/alternative-asm.h>
14676
14677 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14678 #include <linux/elf-em.h>
14679@@ -62,12 +64,12 @@
14680 */
14681 .macro LOAD_ARGS32 offset, _r9=0
14682 .if \_r9
14683- movl \offset+16(%rsp),%r9d
14684+ movl \offset+R9(%rsp),%r9d
14685 .endif
14686- movl \offset+40(%rsp),%ecx
14687- movl \offset+48(%rsp),%edx
14688- movl \offset+56(%rsp),%esi
14689- movl \offset+64(%rsp),%edi
14690+ movl \offset+RCX(%rsp),%ecx
14691+ movl \offset+RDX(%rsp),%edx
14692+ movl \offset+RSI(%rsp),%esi
14693+ movl \offset+RDI(%rsp),%edi
14694 movl %eax,%eax /* zero extension */
14695 .endm
14696
14697@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14698 ENDPROC(native_irq_enable_sysexit)
14699 #endif
14700
14701+ .macro pax_enter_kernel_user
14702+ pax_set_fptr_mask
14703+#ifdef CONFIG_PAX_MEMORY_UDEREF
14704+ call pax_enter_kernel_user
14705+#endif
14706+ .endm
14707+
14708+ .macro pax_exit_kernel_user
14709+#ifdef CONFIG_PAX_MEMORY_UDEREF
14710+ call pax_exit_kernel_user
14711+#endif
14712+#ifdef CONFIG_PAX_RANDKSTACK
14713+ pushq %rax
14714+ pushq %r11
14715+ call pax_randomize_kstack
14716+ popq %r11
14717+ popq %rax
14718+#endif
14719+ .endm
14720+
14721+ .macro pax_erase_kstack
14722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14723+ call pax_erase_kstack
14724+#endif
14725+ .endm
14726+
14727 /*
14728 * 32bit SYSENTER instruction entry.
14729 *
14730@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14731 CFI_REGISTER rsp,rbp
14732 SWAPGS_UNSAFE_STACK
14733 movq PER_CPU_VAR(kernel_stack), %rsp
14734- addq $(KERNEL_STACK_OFFSET),%rsp
14735- /*
14736- * No need to follow this irqs on/off section: the syscall
14737- * disabled irqs, here we enable it straight after entry:
14738- */
14739- ENABLE_INTERRUPTS(CLBR_NONE)
14740 movl %ebp,%ebp /* zero extension */
14741 pushq_cfi $__USER32_DS
14742 /*CFI_REL_OFFSET ss,0*/
14743@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14744 CFI_REL_OFFSET rsp,0
14745 pushfq_cfi
14746 /*CFI_REL_OFFSET rflags,0*/
14747- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14748- CFI_REGISTER rip,r10
14749+ orl $X86_EFLAGS_IF,(%rsp)
14750+ GET_THREAD_INFO(%r11)
14751+ movl TI_sysenter_return(%r11), %r11d
14752+ CFI_REGISTER rip,r11
14753 pushq_cfi $__USER32_CS
14754 /*CFI_REL_OFFSET cs,0*/
14755 movl %eax, %eax
14756- pushq_cfi %r10
14757+ pushq_cfi %r11
14758 CFI_REL_OFFSET rip,0
14759 pushq_cfi %rax
14760 cld
14761 SAVE_ARGS 0,1,0
14762+ pax_enter_kernel_user
14763+
14764+#ifdef CONFIG_PAX_RANDKSTACK
14765+ pax_erase_kstack
14766+#endif
14767+
14768+ /*
14769+ * No need to follow this irqs on/off section: the syscall
14770+ * disabled irqs, here we enable it straight after entry:
14771+ */
14772+ ENABLE_INTERRUPTS(CLBR_NONE)
14773 /* no need to do an access_ok check here because rbp has been
14774 32bit zero extended */
14775+
14776+#ifdef CONFIG_PAX_MEMORY_UDEREF
14777+ addq pax_user_shadow_base,%rbp
14778+ ASM_PAX_OPEN_USERLAND
14779+#endif
14780+
14781 ASM_STAC
14782 1: movl (%rbp),%ebp
14783 _ASM_EXTABLE(1b,ia32_badarg)
14784 ASM_CLAC
14785
14786+#ifdef CONFIG_PAX_MEMORY_UDEREF
14787+ ASM_PAX_CLOSE_USERLAND
14788+#endif
14789+
14790 /*
14791 * Sysenter doesn't filter flags, so we need to clear NT
14792 * ourselves. To save a few cycles, we can check whether
14793@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14794 jnz sysenter_fix_flags
14795 sysenter_flags_fixed:
14796
14797- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14798- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14799+ GET_THREAD_INFO(%r11)
14800+ orl $TS_COMPAT,TI_status(%r11)
14801+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14802 CFI_REMEMBER_STATE
14803 jnz sysenter_tracesys
14804 cmpq $(IA32_NR_syscalls-1),%rax
14805@@ -172,14 +218,17 @@ sysenter_do_call:
14806 sysenter_dispatch:
14807 call *ia32_sys_call_table(,%rax,8)
14808 movq %rax,RAX-ARGOFFSET(%rsp)
14809+ GET_THREAD_INFO(%r11)
14810 DISABLE_INTERRUPTS(CLBR_NONE)
14811 TRACE_IRQS_OFF
14812- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14814 jnz sysexit_audit
14815 sysexit_from_sys_call:
14816- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14817+ pax_exit_kernel_user
14818+ pax_erase_kstack
14819+ andl $~TS_COMPAT,TI_status(%r11)
14820 /* clear IF, that popfq doesn't enable interrupts early */
14821- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14822+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14823 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14824 CFI_REGISTER rip,rdx
14825 RESTORE_ARGS 0,24,0,0,0,0
14826@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14827 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14828 movl %eax,%edi /* 1st arg: syscall number */
14829 call __audit_syscall_entry
14830+
14831+ pax_erase_kstack
14832+
14833 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14834 cmpq $(IA32_NR_syscalls-1),%rax
14835 ja ia32_badsys
14836@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14837 .endm
14838
14839 .macro auditsys_exit exit
14840- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14841+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14842 jnz ia32_ret_from_sys_call
14843 TRACE_IRQS_ON
14844 ENABLE_INTERRUPTS(CLBR_NONE)
14845@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14846 1: setbe %al /* 1 if error, 0 if not */
14847 movzbl %al,%edi /* zero-extend that into %edi */
14848 call __audit_syscall_exit
14849+ GET_THREAD_INFO(%r11)
14850 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14851 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14852 DISABLE_INTERRUPTS(CLBR_NONE)
14853 TRACE_IRQS_OFF
14854- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl %edi,TI_flags(%r11)
14856 jz \exit
14857 CLEAR_RREGS -ARGOFFSET
14858 jmp int_with_check
14859@@ -253,7 +306,7 @@ sysenter_fix_flags:
14860
14861 sysenter_tracesys:
14862 #ifdef CONFIG_AUDITSYSCALL
14863- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14865 jz sysenter_auditsys
14866 #endif
14867 SAVE_REST
14868@@ -265,6 +318,9 @@ sysenter_tracesys:
14869 RESTORE_REST
14870 cmpq $(IA32_NR_syscalls-1),%rax
14871 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14872+
14873+ pax_erase_kstack
14874+
14875 jmp sysenter_do_call
14876 CFI_ENDPROC
14877 ENDPROC(ia32_sysenter_target)
14878@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14879 ENTRY(ia32_cstar_target)
14880 CFI_STARTPROC32 simple
14881 CFI_SIGNAL_FRAME
14882- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14883+ CFI_DEF_CFA rsp,0
14884 CFI_REGISTER rip,rcx
14885 /*CFI_REGISTER rflags,r11*/
14886 SWAPGS_UNSAFE_STACK
14887 movl %esp,%r8d
14888 CFI_REGISTER rsp,r8
14889 movq PER_CPU_VAR(kernel_stack),%rsp
14890+ SAVE_ARGS 8*6,0,0
14891+ pax_enter_kernel_user
14892+
14893+#ifdef CONFIG_PAX_RANDKSTACK
14894+ pax_erase_kstack
14895+#endif
14896+
14897 /*
14898 * No need to follow this irqs on/off section: the syscall
14899 * disabled irqs and here we enable it straight after entry:
14900 */
14901 ENABLE_INTERRUPTS(CLBR_NONE)
14902- SAVE_ARGS 8,0,0
14903 movl %eax,%eax /* zero extension */
14904 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14905 movq %rcx,RIP-ARGOFFSET(%rsp)
14906@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14907 /* no need to do an access_ok check here because r8 has been
14908 32bit zero extended */
14909 /* hardware stack frame is complete now */
14910+
14911+#ifdef CONFIG_PAX_MEMORY_UDEREF
14912+ ASM_PAX_OPEN_USERLAND
14913+ movq pax_user_shadow_base,%r8
14914+ addq RSP-ARGOFFSET(%rsp),%r8
14915+#endif
14916+
14917 ASM_STAC
14918 1: movl (%r8),%r9d
14919 _ASM_EXTABLE(1b,ia32_badarg)
14920 ASM_CLAC
14921- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14922- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14923+
14924+#ifdef CONFIG_PAX_MEMORY_UDEREF
14925+ ASM_PAX_CLOSE_USERLAND
14926+#endif
14927+
14928+ GET_THREAD_INFO(%r11)
14929+ orl $TS_COMPAT,TI_status(%r11)
14930+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14931 CFI_REMEMBER_STATE
14932 jnz cstar_tracesys
14933 cmpq $IA32_NR_syscalls-1,%rax
14934@@ -335,13 +410,16 @@ cstar_do_call:
14935 cstar_dispatch:
14936 call *ia32_sys_call_table(,%rax,8)
14937 movq %rax,RAX-ARGOFFSET(%rsp)
14938+ GET_THREAD_INFO(%r11)
14939 DISABLE_INTERRUPTS(CLBR_NONE)
14940 TRACE_IRQS_OFF
14941- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14942+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14943 jnz sysretl_audit
14944 sysretl_from_sys_call:
14945- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14947+ pax_exit_kernel_user
14948+ pax_erase_kstack
14949+ andl $~TS_COMPAT,TI_status(%r11)
14950+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14951 movl RIP-ARGOFFSET(%rsp),%ecx
14952 CFI_REGISTER rip,rcx
14953 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14954@@ -368,7 +446,7 @@ sysretl_audit:
14955
14956 cstar_tracesys:
14957 #ifdef CONFIG_AUDITSYSCALL
14958- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14960 jz cstar_auditsys
14961 #endif
14962 xchgl %r9d,%ebp
14963@@ -382,11 +460,19 @@ cstar_tracesys:
14964 xchgl %ebp,%r9d
14965 cmpq $(IA32_NR_syscalls-1),%rax
14966 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14967+
14968+ pax_erase_kstack
14969+
14970 jmp cstar_do_call
14971 END(ia32_cstar_target)
14972
14973 ia32_badarg:
14974 ASM_CLAC
14975+
14976+#ifdef CONFIG_PAX_MEMORY_UDEREF
14977+ ASM_PAX_CLOSE_USERLAND
14978+#endif
14979+
14980 movq $-EFAULT,%rax
14981 jmp ia32_sysret
14982 CFI_ENDPROC
14983@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14984 CFI_REL_OFFSET rip,RIP-RIP
14985 PARAVIRT_ADJUST_EXCEPTION_FRAME
14986 SWAPGS
14987- /*
14988- * No need to follow this irqs on/off section: the syscall
14989- * disabled irqs and here we enable it straight after entry:
14990- */
14991- ENABLE_INTERRUPTS(CLBR_NONE)
14992 movl %eax,%eax
14993 pushq_cfi %rax
14994 cld
14995 /* note the registers are not zero extended to the sf.
14996 this could be a problem. */
14997 SAVE_ARGS 0,1,0
14998- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14999- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15000+ pax_enter_kernel_user
15001+
15002+#ifdef CONFIG_PAX_RANDKSTACK
15003+ pax_erase_kstack
15004+#endif
15005+
15006+ /*
15007+ * No need to follow this irqs on/off section: the syscall
15008+ * disabled irqs and here we enable it straight after entry:
15009+ */
15010+ ENABLE_INTERRUPTS(CLBR_NONE)
15011+ GET_THREAD_INFO(%r11)
15012+ orl $TS_COMPAT,TI_status(%r11)
15013+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15014 jnz ia32_tracesys
15015 cmpq $(IA32_NR_syscalls-1),%rax
15016 ja ia32_badsys
15017@@ -458,6 +551,9 @@ ia32_tracesys:
15018 RESTORE_REST
15019 cmpq $(IA32_NR_syscalls-1),%rax
15020 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15021+
15022+ pax_erase_kstack
15023+
15024 jmp ia32_do_call
15025 END(ia32_syscall)
15026
15027diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15028index 8e0ceec..af13504 100644
15029--- a/arch/x86/ia32/sys_ia32.c
15030+++ b/arch/x86/ia32/sys_ia32.c
15031@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15032 */
15033 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15034 {
15035- typeof(ubuf->st_uid) uid = 0;
15036- typeof(ubuf->st_gid) gid = 0;
15037+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15038+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15039 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15040 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15041 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15042diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15043index 372231c..51b537d 100644
15044--- a/arch/x86/include/asm/alternative-asm.h
15045+++ b/arch/x86/include/asm/alternative-asm.h
15046@@ -18,6 +18,45 @@
15047 .endm
15048 #endif
15049
15050+#ifdef KERNEXEC_PLUGIN
15051+ .macro pax_force_retaddr_bts rip=0
15052+ btsq $63,\rip(%rsp)
15053+ .endm
15054+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15055+ .macro pax_force_retaddr rip=0, reload=0
15056+ btsq $63,\rip(%rsp)
15057+ .endm
15058+ .macro pax_force_fptr ptr
15059+ btsq $63,\ptr
15060+ .endm
15061+ .macro pax_set_fptr_mask
15062+ .endm
15063+#endif
15064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065+ .macro pax_force_retaddr rip=0, reload=0
15066+ .if \reload
15067+ pax_set_fptr_mask
15068+ .endif
15069+ orq %r12,\rip(%rsp)
15070+ .endm
15071+ .macro pax_force_fptr ptr
15072+ orq %r12,\ptr
15073+ .endm
15074+ .macro pax_set_fptr_mask
15075+ movabs $0x8000000000000000,%r12
15076+ .endm
15077+#endif
15078+#else
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .endm
15081+ .macro pax_force_fptr ptr
15082+ .endm
15083+ .macro pax_force_retaddr_bts rip=0
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ .endm
15087+#endif
15088+
15089 .macro altinstruction_entry orig alt feature orig_len alt_len
15090 .long \orig - .
15091 .long \alt - .
15092diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15093index 473bdbe..b1e3377 100644
15094--- a/arch/x86/include/asm/alternative.h
15095+++ b/arch/x86/include/asm/alternative.h
15096@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15097 ".pushsection .discard,\"aw\",@progbits\n" \
15098 DISCARD_ENTRY(1) \
15099 ".popsection\n" \
15100- ".pushsection .altinstr_replacement, \"ax\"\n" \
15101+ ".pushsection .altinstr_replacement, \"a\"\n" \
15102 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15103 ".popsection"
15104
15105@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15106 DISCARD_ENTRY(1) \
15107 DISCARD_ENTRY(2) \
15108 ".popsection\n" \
15109- ".pushsection .altinstr_replacement, \"ax\"\n" \
15110+ ".pushsection .altinstr_replacement, \"a\"\n" \
15111 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15112 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15113 ".popsection"
15114diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15115index efc3b22..85c4f3a 100644
15116--- a/arch/x86/include/asm/apic.h
15117+++ b/arch/x86/include/asm/apic.h
15118@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15119
15120 #ifdef CONFIG_X86_LOCAL_APIC
15121
15122-extern unsigned int apic_verbosity;
15123+extern int apic_verbosity;
15124 extern int local_apic_timer_c2_ok;
15125
15126 extern int disable_apic;
15127diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15128index 20370c6..a2eb9b0 100644
15129--- a/arch/x86/include/asm/apm.h
15130+++ b/arch/x86/include/asm/apm.h
15131@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15132 __asm__ __volatile__(APM_DO_ZERO_SEGS
15133 "pushl %%edi\n\t"
15134 "pushl %%ebp\n\t"
15135- "lcall *%%cs:apm_bios_entry\n\t"
15136+ "lcall *%%ss:apm_bios_entry\n\t"
15137 "setc %%al\n\t"
15138 "popl %%ebp\n\t"
15139 "popl %%edi\n\t"
15140@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15141 __asm__ __volatile__(APM_DO_ZERO_SEGS
15142 "pushl %%edi\n\t"
15143 "pushl %%ebp\n\t"
15144- "lcall *%%cs:apm_bios_entry\n\t"
15145+ "lcall *%%ss:apm_bios_entry\n\t"
15146 "setc %%bl\n\t"
15147 "popl %%ebp\n\t"
15148 "popl %%edi\n\t"
15149diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15150index 5e5cd12..51cdc93 100644
15151--- a/arch/x86/include/asm/atomic.h
15152+++ b/arch/x86/include/asm/atomic.h
15153@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15154 }
15155
15156 /**
15157+ * atomic_read_unchecked - read atomic variable
15158+ * @v: pointer of type atomic_unchecked_t
15159+ *
15160+ * Atomically reads the value of @v.
15161+ */
15162+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15163+{
15164+ return ACCESS_ONCE((v)->counter);
15165+}
15166+
15167+/**
15168 * atomic_set - set atomic variable
15169 * @v: pointer of type atomic_t
15170 * @i: required value
15171@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15172 }
15173
15174 /**
15175+ * atomic_set_unchecked - set atomic variable
15176+ * @v: pointer of type atomic_unchecked_t
15177+ * @i: required value
15178+ *
15179+ * Atomically sets the value of @v to @i.
15180+ */
15181+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15182+{
15183+ v->counter = i;
15184+}
15185+
15186+/**
15187 * atomic_add - add integer to atomic variable
15188 * @i: integer value to add
15189 * @v: pointer of type atomic_t
15190@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15191 */
15192 static inline void atomic_add(int i, atomic_t *v)
15193 {
15194- asm volatile(LOCK_PREFIX "addl %1,%0"
15195+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15196+
15197+#ifdef CONFIG_PAX_REFCOUNT
15198+ "jno 0f\n"
15199+ LOCK_PREFIX "subl %1,%0\n"
15200+ "int $4\n0:\n"
15201+ _ASM_EXTABLE(0b, 0b)
15202+#endif
15203+
15204+ : "+m" (v->counter)
15205+ : "ir" (i));
15206+}
15207+
15208+/**
15209+ * atomic_add_unchecked - add integer to atomic variable
15210+ * @i: integer value to add
15211+ * @v: pointer of type atomic_unchecked_t
15212+ *
15213+ * Atomically adds @i to @v.
15214+ */
15215+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15216+{
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218 : "+m" (v->counter)
15219 : "ir" (i));
15220 }
15221@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15222 */
15223 static inline void atomic_sub(int i, atomic_t *v)
15224 {
15225- asm volatile(LOCK_PREFIX "subl %1,%0"
15226+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15227+
15228+#ifdef CONFIG_PAX_REFCOUNT
15229+ "jno 0f\n"
15230+ LOCK_PREFIX "addl %1,%0\n"
15231+ "int $4\n0:\n"
15232+ _ASM_EXTABLE(0b, 0b)
15233+#endif
15234+
15235+ : "+m" (v->counter)
15236+ : "ir" (i));
15237+}
15238+
15239+/**
15240+ * atomic_sub_unchecked - subtract integer from atomic variable
15241+ * @i: integer value to subtract
15242+ * @v: pointer of type atomic_unchecked_t
15243+ *
15244+ * Atomically subtracts @i from @v.
15245+ */
15246+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15247+{
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249 : "+m" (v->counter)
15250 : "ir" (i));
15251 }
15252@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15253 */
15254 static inline int atomic_sub_and_test(int i, atomic_t *v)
15255 {
15256- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15257+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15258 }
15259
15260 /**
15261@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15262 */
15263 static inline void atomic_inc(atomic_t *v)
15264 {
15265- asm volatile(LOCK_PREFIX "incl %0"
15266+ asm volatile(LOCK_PREFIX "incl %0\n"
15267+
15268+#ifdef CONFIG_PAX_REFCOUNT
15269+ "jno 0f\n"
15270+ LOCK_PREFIX "decl %0\n"
15271+ "int $4\n0:\n"
15272+ _ASM_EXTABLE(0b, 0b)
15273+#endif
15274+
15275+ : "+m" (v->counter));
15276+}
15277+
15278+/**
15279+ * atomic_inc_unchecked - increment atomic variable
15280+ * @v: pointer of type atomic_unchecked_t
15281+ *
15282+ * Atomically increments @v by 1.
15283+ */
15284+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15285+{
15286+ asm volatile(LOCK_PREFIX "incl %0\n"
15287 : "+m" (v->counter));
15288 }
15289
15290@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15291 */
15292 static inline void atomic_dec(atomic_t *v)
15293 {
15294- asm volatile(LOCK_PREFIX "decl %0"
15295+ asm volatile(LOCK_PREFIX "decl %0\n"
15296+
15297+#ifdef CONFIG_PAX_REFCOUNT
15298+ "jno 0f\n"
15299+ LOCK_PREFIX "incl %0\n"
15300+ "int $4\n0:\n"
15301+ _ASM_EXTABLE(0b, 0b)
15302+#endif
15303+
15304+ : "+m" (v->counter));
15305+}
15306+
15307+/**
15308+ * atomic_dec_unchecked - decrement atomic variable
15309+ * @v: pointer of type atomic_unchecked_t
15310+ *
15311+ * Atomically decrements @v by 1.
15312+ */
15313+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15314+{
15315+ asm volatile(LOCK_PREFIX "decl %0\n"
15316 : "+m" (v->counter));
15317 }
15318
15319@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15320 */
15321 static inline int atomic_dec_and_test(atomic_t *v)
15322 {
15323- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15324+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15325 }
15326
15327 /**
15328@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15329 */
15330 static inline int atomic_inc_and_test(atomic_t *v)
15331 {
15332- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15333+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15334+}
15335+
15336+/**
15337+ * atomic_inc_and_test_unchecked - increment and test
15338+ * @v: pointer of type atomic_unchecked_t
15339+ *
15340+ * Atomically increments @v by 1
15341+ * and returns true if the result is zero, or false for all
15342+ * other cases.
15343+ */
15344+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15345+{
15346+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_add_negative(int i, atomic_t *v)
15353 {
15354- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15355+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15356 }
15357
15358 /**
15359@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15360 *
15361 * Atomically adds @i to @v and returns @i + @v
15362 */
15363-static inline int atomic_add_return(int i, atomic_t *v)
15364+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15365+{
15366+ return i + xadd_check_overflow(&v->counter, i);
15367+}
15368+
15369+/**
15370+ * atomic_add_return_unchecked - add integer and return
15371+ * @i: integer value to add
15372+ * @v: pointer of type atomic_unchecked_t
15373+ *
15374+ * Atomically adds @i to @v and returns @i + @v
15375+ */
15376+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15377 {
15378 return i + xadd(&v->counter, i);
15379 }
15380@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15381 *
15382 * Atomically subtracts @i from @v and returns @v - @i
15383 */
15384-static inline int atomic_sub_return(int i, atomic_t *v)
15385+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15386 {
15387 return atomic_add_return(-i, v);
15388 }
15389
15390 #define atomic_inc_return(v) (atomic_add_return(1, v))
15391+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15392+{
15393+ return atomic_add_return_unchecked(1, v);
15394+}
15395 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15396
15397-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15398+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15399+{
15400+ return cmpxchg(&v->counter, old, new);
15401+}
15402+
15403+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15404 {
15405 return cmpxchg(&v->counter, old, new);
15406 }
15407@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15408 return xchg(&v->counter, new);
15409 }
15410
15411+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15412+{
15413+ return xchg(&v->counter, new);
15414+}
15415+
15416 /**
15417 * __atomic_add_unless - add unless the number is already a given value
15418 * @v: pointer of type atomic_t
15419@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15420 */
15421 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15422 {
15423- int c, old;
15424+ int c, old, new;
15425 c = atomic_read(v);
15426 for (;;) {
15427- if (unlikely(c == (u)))
15428+ if (unlikely(c == u))
15429 break;
15430- old = atomic_cmpxchg((v), c, c + (a));
15431+
15432+ asm volatile("addl %2,%0\n"
15433+
15434+#ifdef CONFIG_PAX_REFCOUNT
15435+ "jno 0f\n"
15436+ "subl %2,%0\n"
15437+ "int $4\n0:\n"
15438+ _ASM_EXTABLE(0b, 0b)
15439+#endif
15440+
15441+ : "=r" (new)
15442+ : "0" (c), "ir" (a));
15443+
15444+ old = atomic_cmpxchg(v, c, new);
15445 if (likely(old == c))
15446 break;
15447 c = old;
15448@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15449 }
15450
15451 /**
15452+ * atomic_inc_not_zero_hint - increment if not null
15453+ * @v: pointer of type atomic_t
15454+ * @hint: probable value of the atomic before the increment
15455+ *
15456+ * This version of atomic_inc_not_zero() gives a hint of probable
15457+ * value of the atomic. This helps processor to not read the memory
15458+ * before doing the atomic read/modify/write cycle, lowering
15459+ * number of bus transactions on some arches.
15460+ *
15461+ * Returns: 0 if increment was not done, 1 otherwise.
15462+ */
15463+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15464+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15465+{
15466+ int val, c = hint, new;
15467+
15468+ /* sanity test, should be removed by compiler if hint is a constant */
15469+ if (!hint)
15470+ return __atomic_add_unless(v, 1, 0);
15471+
15472+ do {
15473+ asm volatile("incl %0\n"
15474+
15475+#ifdef CONFIG_PAX_REFCOUNT
15476+ "jno 0f\n"
15477+ "decl %0\n"
15478+ "int $4\n0:\n"
15479+ _ASM_EXTABLE(0b, 0b)
15480+#endif
15481+
15482+ : "=r" (new)
15483+ : "0" (c));
15484+
15485+ val = atomic_cmpxchg(v, c, new);
15486+ if (val == c)
15487+ return 1;
15488+ c = val;
15489+ } while (c);
15490+
15491+ return 0;
15492+}
15493+
15494+/**
15495 * atomic_inc_short - increment of a short integer
15496 * @v: pointer to type int
15497 *
15498@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15499 }
15500
15501 /* These are x86-specific, used by some header files */
15502-#define atomic_clear_mask(mask, addr) \
15503- asm volatile(LOCK_PREFIX "andl %0,%1" \
15504- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15505+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15506+{
15507+ asm volatile(LOCK_PREFIX "andl %1,%0"
15508+ : "+m" (v->counter)
15509+ : "r" (~(mask))
15510+ : "memory");
15511+}
15512
15513-#define atomic_set_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "orl %0,%1" \
15515- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15516- : "memory")
15517+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15518+{
15519+ asm volatile(LOCK_PREFIX "andl %1,%0"
15520+ : "+m" (v->counter)
15521+ : "r" (~(mask))
15522+ : "memory");
15523+}
15524+
15525+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15526+{
15527+ asm volatile(LOCK_PREFIX "orl %1,%0"
15528+ : "+m" (v->counter)
15529+ : "r" (mask)
15530+ : "memory");
15531+}
15532+
15533+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15534+{
15535+ asm volatile(LOCK_PREFIX "orl %1,%0"
15536+ : "+m" (v->counter)
15537+ : "r" (mask)
15538+ : "memory");
15539+}
15540
15541 #ifdef CONFIG_X86_32
15542 # include <asm/atomic64_32.h>
15543diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15544index b154de7..bf18a5a 100644
15545--- a/arch/x86/include/asm/atomic64_32.h
15546+++ b/arch/x86/include/asm/atomic64_32.h
15547@@ -12,6 +12,14 @@ typedef struct {
15548 u64 __aligned(8) counter;
15549 } atomic64_t;
15550
15551+#ifdef CONFIG_PAX_REFCOUNT
15552+typedef struct {
15553+ u64 __aligned(8) counter;
15554+} atomic64_unchecked_t;
15555+#else
15556+typedef atomic64_t atomic64_unchecked_t;
15557+#endif
15558+
15559 #define ATOMIC64_INIT(val) { (val) }
15560
15561 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15562@@ -37,21 +45,31 @@ typedef struct {
15563 ATOMIC64_DECL_ONE(sym##_386)
15564
15565 ATOMIC64_DECL_ONE(add_386);
15566+ATOMIC64_DECL_ONE(add_unchecked_386);
15567 ATOMIC64_DECL_ONE(sub_386);
15568+ATOMIC64_DECL_ONE(sub_unchecked_386);
15569 ATOMIC64_DECL_ONE(inc_386);
15570+ATOMIC64_DECL_ONE(inc_unchecked_386);
15571 ATOMIC64_DECL_ONE(dec_386);
15572+ATOMIC64_DECL_ONE(dec_unchecked_386);
15573 #endif
15574
15575 #define alternative_atomic64(f, out, in...) \
15576 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15577
15578 ATOMIC64_DECL(read);
15579+ATOMIC64_DECL(read_unchecked);
15580 ATOMIC64_DECL(set);
15581+ATOMIC64_DECL(set_unchecked);
15582 ATOMIC64_DECL(xchg);
15583 ATOMIC64_DECL(add_return);
15584+ATOMIC64_DECL(add_return_unchecked);
15585 ATOMIC64_DECL(sub_return);
15586+ATOMIC64_DECL(sub_return_unchecked);
15587 ATOMIC64_DECL(inc_return);
15588+ATOMIC64_DECL(inc_return_unchecked);
15589 ATOMIC64_DECL(dec_return);
15590+ATOMIC64_DECL(dec_return_unchecked);
15591 ATOMIC64_DECL(dec_if_positive);
15592 ATOMIC64_DECL(inc_not_zero);
15593 ATOMIC64_DECL(add_unless);
15594@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15595 }
15596
15597 /**
15598+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15599+ * @p: pointer to type atomic64_unchecked_t
15600+ * @o: expected value
15601+ * @n: new value
15602+ *
15603+ * Atomically sets @v to @n if it was equal to @o and returns
15604+ * the old value.
15605+ */
15606+
15607+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15608+{
15609+ return cmpxchg64(&v->counter, o, n);
15610+}
15611+
15612+/**
15613 * atomic64_xchg - xchg atomic64 variable
15614 * @v: pointer to type atomic64_t
15615 * @n: value to assign
15616@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15617 }
15618
15619 /**
15620+ * atomic64_set_unchecked - set atomic64 variable
15621+ * @v: pointer to type atomic64_unchecked_t
15622+ * @n: value to assign
15623+ *
15624+ * Atomically sets the value of @v to @n.
15625+ */
15626+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15627+{
15628+ unsigned high = (unsigned)(i >> 32);
15629+ unsigned low = (unsigned)i;
15630+ alternative_atomic64(set, /* no output */,
15631+ "S" (v), "b" (low), "c" (high)
15632+ : "eax", "edx", "memory");
15633+}
15634+
15635+/**
15636 * atomic64_read - read atomic64 variable
15637 * @v: pointer to type atomic64_t
15638 *
15639@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15640 }
15641
15642 /**
15643+ * atomic64_read_unchecked - read atomic64 variable
15644+ * @v: pointer to type atomic64_unchecked_t
15645+ *
15646+ * Atomically reads the value of @v and returns it.
15647+ */
15648+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15649+{
15650+ long long r;
15651+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15652+ return r;
15653+ }
15654+
15655+/**
15656 * atomic64_add_return - add and return
15657 * @i: integer value to add
15658 * @v: pointer to type atomic64_t
15659@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15660 return i;
15661 }
15662
15663+/**
15664+ * atomic64_add_return_unchecked - add and return
15665+ * @i: integer value to add
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically adds @i to @v and returns @i + *@v
15669+ */
15670+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15671+{
15672+ alternative_atomic64(add_return_unchecked,
15673+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15674+ ASM_NO_INPUT_CLOBBER("memory"));
15675+ return i;
15676+}
15677+
15678 /*
15679 * Other variants with different arithmetic operators:
15680 */
15681@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15682 return a;
15683 }
15684
15685+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15686+{
15687+ long long a;
15688+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15689+ "S" (v) : "memory", "ecx");
15690+ return a;
15691+}
15692+
15693 static inline long long atomic64_dec_return(atomic64_t *v)
15694 {
15695 long long a;
15696@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15697 }
15698
15699 /**
15700+ * atomic64_add_unchecked - add integer to atomic64 variable
15701+ * @i: integer value to add
15702+ * @v: pointer to type atomic64_unchecked_t
15703+ *
15704+ * Atomically adds @i to @v.
15705+ */
15706+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15707+{
15708+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15709+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15710+ ASM_NO_INPUT_CLOBBER("memory"));
15711+ return i;
15712+}
15713+
15714+/**
15715 * atomic64_sub - subtract the atomic64 variable
15716 * @i: integer value to subtract
15717 * @v: pointer to type atomic64_t
15718diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15719index f8d273e..02f39f3 100644
15720--- a/arch/x86/include/asm/atomic64_64.h
15721+++ b/arch/x86/include/asm/atomic64_64.h
15722@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15723 }
15724
15725 /**
15726+ * atomic64_read_unchecked - read atomic64 variable
15727+ * @v: pointer of type atomic64_unchecked_t
15728+ *
15729+ * Atomically reads the value of @v.
15730+ * Doesn't imply a read memory barrier.
15731+ */
15732+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15733+{
15734+ return ACCESS_ONCE((v)->counter);
15735+}
15736+
15737+/**
15738 * atomic64_set - set atomic64 variable
15739 * @v: pointer to type atomic64_t
15740 * @i: required value
15741@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15742 }
15743
15744 /**
15745+ * atomic64_set_unchecked - set atomic64 variable
15746+ * @v: pointer to type atomic64_unchecked_t
15747+ * @i: required value
15748+ *
15749+ * Atomically sets the value of @v to @i.
15750+ */
15751+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15752+{
15753+ v->counter = i;
15754+}
15755+
15756+/**
15757 * atomic64_add - add integer to atomic64 variable
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15761 */
15762 static inline void atomic64_add(long i, atomic64_t *v)
15763 {
15764+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15765+
15766+#ifdef CONFIG_PAX_REFCOUNT
15767+ "jno 0f\n"
15768+ LOCK_PREFIX "subq %1,%0\n"
15769+ "int $4\n0:\n"
15770+ _ASM_EXTABLE(0b, 0b)
15771+#endif
15772+
15773+ : "=m" (v->counter)
15774+ : "er" (i), "m" (v->counter));
15775+}
15776+
15777+/**
15778+ * atomic64_add_unchecked - add integer to atomic64 variable
15779+ * @i: integer value to add
15780+ * @v: pointer to type atomic64_unchecked_t
15781+ *
15782+ * Atomically adds @i to @v.
15783+ */
15784+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15785+{
15786 asm volatile(LOCK_PREFIX "addq %1,%0"
15787 : "=m" (v->counter)
15788 : "er" (i), "m" (v->counter));
15789@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15790 */
15791 static inline void atomic64_sub(long i, atomic64_t *v)
15792 {
15793- asm volatile(LOCK_PREFIX "subq %1,%0"
15794+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15795+
15796+#ifdef CONFIG_PAX_REFCOUNT
15797+ "jno 0f\n"
15798+ LOCK_PREFIX "addq %1,%0\n"
15799+ "int $4\n0:\n"
15800+ _ASM_EXTABLE(0b, 0b)
15801+#endif
15802+
15803+ : "=m" (v->counter)
15804+ : "er" (i), "m" (v->counter));
15805+}
15806+
15807+/**
15808+ * atomic64_sub_unchecked - subtract the atomic64 variable
15809+ * @i: integer value to subtract
15810+ * @v: pointer to type atomic64_unchecked_t
15811+ *
15812+ * Atomically subtracts @i from @v.
15813+ */
15814+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15815+{
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817 : "=m" (v->counter)
15818 : "er" (i), "m" (v->counter));
15819 }
15820@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15821 */
15822 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15823 {
15824- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15825+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15826 }
15827
15828 /**
15829@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15830 */
15831 static inline void atomic64_inc(atomic64_t *v)
15832 {
15833+ asm volatile(LOCK_PREFIX "incq %0\n"
15834+
15835+#ifdef CONFIG_PAX_REFCOUNT
15836+ "jno 0f\n"
15837+ LOCK_PREFIX "decq %0\n"
15838+ "int $4\n0:\n"
15839+ _ASM_EXTABLE(0b, 0b)
15840+#endif
15841+
15842+ : "=m" (v->counter)
15843+ : "m" (v->counter));
15844+}
15845+
15846+/**
15847+ * atomic64_inc_unchecked - increment atomic64 variable
15848+ * @v: pointer to type atomic64_unchecked_t
15849+ *
15850+ * Atomically increments @v by 1.
15851+ */
15852+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15853+{
15854 asm volatile(LOCK_PREFIX "incq %0"
15855 : "=m" (v->counter)
15856 : "m" (v->counter));
15857@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15858 */
15859 static inline void atomic64_dec(atomic64_t *v)
15860 {
15861- asm volatile(LOCK_PREFIX "decq %0"
15862+ asm volatile(LOCK_PREFIX "decq %0\n"
15863+
15864+#ifdef CONFIG_PAX_REFCOUNT
15865+ "jno 0f\n"
15866+ LOCK_PREFIX "incq %0\n"
15867+ "int $4\n0:\n"
15868+ _ASM_EXTABLE(0b, 0b)
15869+#endif
15870+
15871+ : "=m" (v->counter)
15872+ : "m" (v->counter));
15873+}
15874+
15875+/**
15876+ * atomic64_dec_unchecked - decrement atomic64 variable
15877+ * @v: pointer to type atomic64_t
15878+ *
15879+ * Atomically decrements @v by 1.
15880+ */
15881+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15882+{
15883+ asm volatile(LOCK_PREFIX "decq %0\n"
15884 : "=m" (v->counter)
15885 : "m" (v->counter));
15886 }
15887@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15888 */
15889 static inline int atomic64_dec_and_test(atomic64_t *v)
15890 {
15891- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15892+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15893 }
15894
15895 /**
15896@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15897 */
15898 static inline int atomic64_inc_and_test(atomic64_t *v)
15899 {
15900- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15901+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15902 }
15903
15904 /**
15905@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15906 */
15907 static inline int atomic64_add_negative(long i, atomic64_t *v)
15908 {
15909- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15910+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15911 }
15912
15913 /**
15914@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15915 */
15916 static inline long atomic64_add_return(long i, atomic64_t *v)
15917 {
15918+ return i + xadd_check_overflow(&v->counter, i);
15919+}
15920+
15921+/**
15922+ * atomic64_add_return_unchecked - add and return
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v and returns @i + @v
15927+ */
15928+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15929+{
15930 return i + xadd(&v->counter, i);
15931 }
15932
15933@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15934 }
15935
15936 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15937+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15938+{
15939+ return atomic64_add_return_unchecked(1, v);
15940+}
15941 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15942
15943 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15944@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15945 return cmpxchg(&v->counter, old, new);
15946 }
15947
15948+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15949+{
15950+ return cmpxchg(&v->counter, old, new);
15951+}
15952+
15953 static inline long atomic64_xchg(atomic64_t *v, long new)
15954 {
15955 return xchg(&v->counter, new);
15956@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15957 */
15958 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15959 {
15960- long c, old;
15961+ long c, old, new;
15962 c = atomic64_read(v);
15963 for (;;) {
15964- if (unlikely(c == (u)))
15965+ if (unlikely(c == u))
15966 break;
15967- old = atomic64_cmpxchg((v), c, c + (a));
15968+
15969+ asm volatile("add %2,%0\n"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "sub %2,%0\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978+ : "=r" (new)
15979+ : "0" (c), "ir" (a));
15980+
15981+ old = atomic64_cmpxchg(v, c, new);
15982 if (likely(old == c))
15983 break;
15984 c = old;
15985 }
15986- return c != (u);
15987+ return c != u;
15988 }
15989
15990 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15991diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15992index 2ab1eb3..1e8cc5d 100644
15993--- a/arch/x86/include/asm/barrier.h
15994+++ b/arch/x86/include/asm/barrier.h
15995@@ -57,7 +57,7 @@
15996 do { \
15997 compiletime_assert_atomic_type(*p); \
15998 smp_mb(); \
15999- ACCESS_ONCE(*p) = (v); \
16000+ ACCESS_ONCE_RW(*p) = (v); \
16001 } while (0)
16002
16003 #define smp_load_acquire(p) \
16004@@ -74,7 +74,7 @@ do { \
16005 do { \
16006 compiletime_assert_atomic_type(*p); \
16007 barrier(); \
16008- ACCESS_ONCE(*p) = (v); \
16009+ ACCESS_ONCE_RW(*p) = (v); \
16010 } while (0)
16011
16012 #define smp_load_acquire(p) \
16013diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16014index cfe3b95..d01b118 100644
16015--- a/arch/x86/include/asm/bitops.h
16016+++ b/arch/x86/include/asm/bitops.h
16017@@ -50,7 +50,7 @@
16018 * a mask operation on a byte.
16019 */
16020 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16021-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16022+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16023 #define CONST_MASK(nr) (1 << ((nr) & 7))
16024
16025 /**
16026@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16027 */
16028 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16029 {
16030- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16031+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16032 }
16033
16034 /**
16035@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16036 */
16037 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16038 {
16039- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16040+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16041 }
16042
16043 /**
16044@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16045 */
16046 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16049+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16050 }
16051
16052 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16053@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16054 *
16055 * Undefined if no bit exists, so code should check against 0 first.
16056 */
16057-static inline unsigned long __ffs(unsigned long word)
16058+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16059 {
16060 asm("rep; bsf %1,%0"
16061 : "=r" (word)
16062@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16063 *
16064 * Undefined if no zero exists, so code should check against ~0UL first.
16065 */
16066-static inline unsigned long ffz(unsigned long word)
16067+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16068 {
16069 asm("rep; bsf %1,%0"
16070 : "=r" (word)
16071@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16072 *
16073 * Undefined if no set bit exists, so code should check against 0 first.
16074 */
16075-static inline unsigned long __fls(unsigned long word)
16076+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16077 {
16078 asm("bsr %1,%0"
16079 : "=r" (word)
16080@@ -434,7 +434,7 @@ static inline int ffs(int x)
16081 * set bit if value is nonzero. The last (most significant) bit is
16082 * at position 32.
16083 */
16084-static inline int fls(int x)
16085+static inline int __intentional_overflow(-1) fls(int x)
16086 {
16087 int r;
16088
16089@@ -476,7 +476,7 @@ static inline int fls(int x)
16090 * at position 64.
16091 */
16092 #ifdef CONFIG_X86_64
16093-static __always_inline int fls64(__u64 x)
16094+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16095 {
16096 int bitpos = -1;
16097 /*
16098diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16099index 4fa687a..60f2d39 100644
16100--- a/arch/x86/include/asm/boot.h
16101+++ b/arch/x86/include/asm/boot.h
16102@@ -6,10 +6,15 @@
16103 #include <uapi/asm/boot.h>
16104
16105 /* Physical address where kernel should be loaded. */
16106-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16107+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16108 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16109 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16110
16111+#ifndef __ASSEMBLY__
16112+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16113+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16114+#endif
16115+
16116 /* Minimum kernel alignment, as a power of two */
16117 #ifdef CONFIG_X86_64
16118 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16119diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16120index 48f99f1..d78ebf9 100644
16121--- a/arch/x86/include/asm/cache.h
16122+++ b/arch/x86/include/asm/cache.h
16123@@ -5,12 +5,13 @@
16124
16125 /* L1 cache line size */
16126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16127-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16128+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16129
16130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16131+#define __read_only __attribute__((__section__(".data..read_only")))
16132
16133 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16134-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16135+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16136
16137 #ifdef CONFIG_X86_VSMP
16138 #ifdef CONFIG_SMP
16139diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16140index 1f1297b..72b8439 100644
16141--- a/arch/x86/include/asm/calling.h
16142+++ b/arch/x86/include/asm/calling.h
16143@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16144 #define RSP 152
16145 #define SS 160
16146
16147-#define ARGOFFSET R11
16148+#define ARGOFFSET R15
16149
16150 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16151- subq $9*8+\addskip, %rsp
16152- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16153- movq_cfi rdi, 8*8
16154- movq_cfi rsi, 7*8
16155- movq_cfi rdx, 6*8
16156+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16157+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16158+ movq_cfi rdi, RDI
16159+ movq_cfi rsi, RSI
16160+ movq_cfi rdx, RDX
16161
16162 .if \save_rcx
16163- movq_cfi rcx, 5*8
16164+ movq_cfi rcx, RCX
16165 .endif
16166
16167 .if \rax_enosys
16168- movq $-ENOSYS, 4*8(%rsp)
16169+ movq $-ENOSYS, RAX(%rsp)
16170 .else
16171- movq_cfi rax, 4*8
16172+ movq_cfi rax, RAX
16173 .endif
16174
16175 .if \save_r891011
16176- movq_cfi r8, 3*8
16177- movq_cfi r9, 2*8
16178- movq_cfi r10, 1*8
16179- movq_cfi r11, 0*8
16180+ movq_cfi r8, R8
16181+ movq_cfi r9, R9
16182+ movq_cfi r10, R10
16183+ movq_cfi r11, R11
16184 .endif
16185
16186+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16187+ movq_cfi r12, R12
16188+#endif
16189+
16190 .endm
16191
16192-#define ARG_SKIP (9*8)
16193+#define ARG_SKIP ORIG_RAX
16194
16195 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16196 rstor_r8910=1, rstor_rdx=1
16197+
16198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16199+ movq_cfi_restore R12, r12
16200+#endif
16201+
16202 .if \rstor_r11
16203- movq_cfi_restore 0*8, r11
16204+ movq_cfi_restore R11, r11
16205 .endif
16206
16207 .if \rstor_r8910
16208- movq_cfi_restore 1*8, r10
16209- movq_cfi_restore 2*8, r9
16210- movq_cfi_restore 3*8, r8
16211+ movq_cfi_restore R10, r10
16212+ movq_cfi_restore R9, r9
16213+ movq_cfi_restore R8, r8
16214 .endif
16215
16216 .if \rstor_rax
16217- movq_cfi_restore 4*8, rax
16218+ movq_cfi_restore RAX, rax
16219 .endif
16220
16221 .if \rstor_rcx
16222- movq_cfi_restore 5*8, rcx
16223+ movq_cfi_restore RCX, rcx
16224 .endif
16225
16226 .if \rstor_rdx
16227- movq_cfi_restore 6*8, rdx
16228+ movq_cfi_restore RDX, rdx
16229 .endif
16230
16231- movq_cfi_restore 7*8, rsi
16232- movq_cfi_restore 8*8, rdi
16233+ movq_cfi_restore RSI, rsi
16234+ movq_cfi_restore RDI, rdi
16235
16236- .if ARG_SKIP+\addskip > 0
16237- addq $ARG_SKIP+\addskip, %rsp
16238- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16239+ .if ORIG_RAX+\addskip > 0
16240+ addq $ORIG_RAX+\addskip, %rsp
16241+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16242 .endif
16243 .endm
16244
16245- .macro LOAD_ARGS offset, skiprax=0
16246- movq \offset(%rsp), %r11
16247- movq \offset+8(%rsp), %r10
16248- movq \offset+16(%rsp), %r9
16249- movq \offset+24(%rsp), %r8
16250- movq \offset+40(%rsp), %rcx
16251- movq \offset+48(%rsp), %rdx
16252- movq \offset+56(%rsp), %rsi
16253- movq \offset+64(%rsp), %rdi
16254+ .macro LOAD_ARGS skiprax=0
16255+ movq R11(%rsp), %r11
16256+ movq R10(%rsp), %r10
16257+ movq R9(%rsp), %r9
16258+ movq R8(%rsp), %r8
16259+ movq RCX(%rsp), %rcx
16260+ movq RDX(%rsp), %rdx
16261+ movq RSI(%rsp), %rsi
16262+ movq RDI(%rsp), %rdi
16263 .if \skiprax
16264 .else
16265- movq \offset+72(%rsp), %rax
16266+ movq ORIG_RAX(%rsp), %rax
16267 .endif
16268 .endm
16269
16270-#define REST_SKIP (6*8)
16271-
16272 .macro SAVE_REST
16273- subq $REST_SKIP, %rsp
16274- CFI_ADJUST_CFA_OFFSET REST_SKIP
16275- movq_cfi rbx, 5*8
16276- movq_cfi rbp, 4*8
16277- movq_cfi r12, 3*8
16278- movq_cfi r13, 2*8
16279- movq_cfi r14, 1*8
16280- movq_cfi r15, 0*8
16281+ movq_cfi rbx, RBX
16282+ movq_cfi rbp, RBP
16283+
16284+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16285+ movq_cfi r12, R12
16286+#endif
16287+
16288+ movq_cfi r13, R13
16289+ movq_cfi r14, R14
16290+ movq_cfi r15, R15
16291 .endm
16292
16293 .macro RESTORE_REST
16294- movq_cfi_restore 0*8, r15
16295- movq_cfi_restore 1*8, r14
16296- movq_cfi_restore 2*8, r13
16297- movq_cfi_restore 3*8, r12
16298- movq_cfi_restore 4*8, rbp
16299- movq_cfi_restore 5*8, rbx
16300- addq $REST_SKIP, %rsp
16301- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16302+ movq_cfi_restore R15, r15
16303+ movq_cfi_restore R14, r14
16304+ movq_cfi_restore R13, r13
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi_restore R12, r12
16308+#endif
16309+
16310+ movq_cfi_restore RBP, rbp
16311+ movq_cfi_restore RBX, rbx
16312 .endm
16313
16314 .macro SAVE_ALL
16315diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16316index f50de69..2b0a458 100644
16317--- a/arch/x86/include/asm/checksum_32.h
16318+++ b/arch/x86/include/asm/checksum_32.h
16319@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16320 int len, __wsum sum,
16321 int *src_err_ptr, int *dst_err_ptr);
16322
16323+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16324+ int len, __wsum sum,
16325+ int *src_err_ptr, int *dst_err_ptr);
16326+
16327+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16328+ int len, __wsum sum,
16329+ int *src_err_ptr, int *dst_err_ptr);
16330+
16331 /*
16332 * Note: when you get a NULL pointer exception here this means someone
16333 * passed in an incorrect kernel address to one of these functions.
16334@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16335
16336 might_sleep();
16337 stac();
16338- ret = csum_partial_copy_generic((__force void *)src, dst,
16339+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16340 len, sum, err_ptr, NULL);
16341 clac();
16342
16343@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16344 might_sleep();
16345 if (access_ok(VERIFY_WRITE, dst, len)) {
16346 stac();
16347- ret = csum_partial_copy_generic(src, (__force void *)dst,
16348+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16349 len, sum, NULL, err_ptr);
16350 clac();
16351 return ret;
16352diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16353index 99c105d7..2f667ac 100644
16354--- a/arch/x86/include/asm/cmpxchg.h
16355+++ b/arch/x86/include/asm/cmpxchg.h
16356@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16357 __compiletime_error("Bad argument size for cmpxchg");
16358 extern void __xadd_wrong_size(void)
16359 __compiletime_error("Bad argument size for xadd");
16360+extern void __xadd_check_overflow_wrong_size(void)
16361+ __compiletime_error("Bad argument size for xadd_check_overflow");
16362 extern void __add_wrong_size(void)
16363 __compiletime_error("Bad argument size for add");
16364+extern void __add_check_overflow_wrong_size(void)
16365+ __compiletime_error("Bad argument size for add_check_overflow");
16366
16367 /*
16368 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16369@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16370 __ret; \
16371 })
16372
16373+#ifdef CONFIG_PAX_REFCOUNT
16374+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16375+ ({ \
16376+ __typeof__ (*(ptr)) __ret = (arg); \
16377+ switch (sizeof(*(ptr))) { \
16378+ case __X86_CASE_L: \
16379+ asm volatile (lock #op "l %0, %1\n" \
16380+ "jno 0f\n" \
16381+ "mov %0,%1\n" \
16382+ "int $4\n0:\n" \
16383+ _ASM_EXTABLE(0b, 0b) \
16384+ : "+r" (__ret), "+m" (*(ptr)) \
16385+ : : "memory", "cc"); \
16386+ break; \
16387+ case __X86_CASE_Q: \
16388+ asm volatile (lock #op "q %q0, %1\n" \
16389+ "jno 0f\n" \
16390+ "mov %0,%1\n" \
16391+ "int $4\n0:\n" \
16392+ _ASM_EXTABLE(0b, 0b) \
16393+ : "+r" (__ret), "+m" (*(ptr)) \
16394+ : : "memory", "cc"); \
16395+ break; \
16396+ default: \
16397+ __ ## op ## _check_overflow_wrong_size(); \
16398+ } \
16399+ __ret; \
16400+ })
16401+#else
16402+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16403+#endif
16404+
16405 /*
16406 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16407 * Since this is generally used to protect other memory information, we
16408@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16409 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16410 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16411
16412+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16413+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16414+
16415 #define __add(ptr, inc, lock) \
16416 ({ \
16417 __typeof__ (*(ptr)) __ret = (inc); \
16418diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16419index 59c6c40..5e0b22c 100644
16420--- a/arch/x86/include/asm/compat.h
16421+++ b/arch/x86/include/asm/compat.h
16422@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16423 typedef u32 compat_uint_t;
16424 typedef u32 compat_ulong_t;
16425 typedef u64 __attribute__((aligned(4))) compat_u64;
16426-typedef u32 compat_uptr_t;
16427+typedef u32 __user compat_uptr_t;
16428
16429 struct compat_timespec {
16430 compat_time_t tv_sec;
16431diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16432index 90a5485..43b6211 100644
16433--- a/arch/x86/include/asm/cpufeature.h
16434+++ b/arch/x86/include/asm/cpufeature.h
16435@@ -213,7 +213,7 @@
16436 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16437 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16438 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16439-
16440+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16441
16442 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16443 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16444@@ -221,7 +221,7 @@
16445 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16446 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16447 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16448-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16449+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16450 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16451 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16452 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16453@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16454 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16455 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16456 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16457+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16458
16459 #if __GNUC__ >= 4
16460 extern void warn_pre_alternatives(void);
16461@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16462
16463 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16464 t_warn:
16465- warn_pre_alternatives();
16466+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16467+ warn_pre_alternatives();
16468 return false;
16469 #endif
16470
16471@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16472 ".section .discard,\"aw\",@progbits\n"
16473 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16474 ".previous\n"
16475- ".section .altinstr_replacement,\"ax\"\n"
16476+ ".section .altinstr_replacement,\"a\"\n"
16477 "3: movb $1,%0\n"
16478 "4:\n"
16479 ".previous\n"
16480@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16481 " .byte 2b - 1b\n" /* src len */
16482 " .byte 4f - 3f\n" /* repl len */
16483 ".previous\n"
16484- ".section .altinstr_replacement,\"ax\"\n"
16485+ ".section .altinstr_replacement,\"a\"\n"
16486 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16487 "4:\n"
16488 ".previous\n"
16489@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $0,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 ".section .discard,\"aw\",@progbits\n"
16500 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "5: movb $1,%0\n"
16505 "6:\n"
16506 ".previous\n"
16507diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16508index a94b82e..59ecefa 100644
16509--- a/arch/x86/include/asm/desc.h
16510+++ b/arch/x86/include/asm/desc.h
16511@@ -4,6 +4,7 @@
16512 #include <asm/desc_defs.h>
16513 #include <asm/ldt.h>
16514 #include <asm/mmu.h>
16515+#include <asm/pgtable.h>
16516
16517 #include <linux/smp.h>
16518 #include <linux/percpu.h>
16519@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16520
16521 desc->type = (info->read_exec_only ^ 1) << 1;
16522 desc->type |= info->contents << 2;
16523+ desc->type |= info->seg_not_present ^ 1;
16524
16525 desc->s = 1;
16526 desc->dpl = 0x3;
16527@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16528 }
16529
16530 extern struct desc_ptr idt_descr;
16531-extern gate_desc idt_table[];
16532-extern struct desc_ptr debug_idt_descr;
16533-extern gate_desc debug_idt_table[];
16534-
16535-struct gdt_page {
16536- struct desc_struct gdt[GDT_ENTRIES];
16537-} __attribute__((aligned(PAGE_SIZE)));
16538-
16539-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16540+extern gate_desc idt_table[IDT_ENTRIES];
16541+extern const struct desc_ptr debug_idt_descr;
16542+extern gate_desc debug_idt_table[IDT_ENTRIES];
16543
16544+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16545 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16546 {
16547- return per_cpu(gdt_page, cpu).gdt;
16548+ return cpu_gdt_table[cpu];
16549 }
16550
16551 #ifdef CONFIG_X86_64
16552@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16553 unsigned long base, unsigned dpl, unsigned flags,
16554 unsigned short seg)
16555 {
16556- gate->a = (seg << 16) | (base & 0xffff);
16557- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16558+ gate->gate.offset_low = base;
16559+ gate->gate.seg = seg;
16560+ gate->gate.reserved = 0;
16561+ gate->gate.type = type;
16562+ gate->gate.s = 0;
16563+ gate->gate.dpl = dpl;
16564+ gate->gate.p = 1;
16565+ gate->gate.offset_high = base >> 16;
16566 }
16567
16568 #endif
16569@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16570
16571 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16572 {
16573+ pax_open_kernel();
16574 memcpy(&idt[entry], gate, sizeof(*gate));
16575+ pax_close_kernel();
16576 }
16577
16578 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16579 {
16580+ pax_open_kernel();
16581 memcpy(&ldt[entry], desc, 8);
16582+ pax_close_kernel();
16583 }
16584
16585 static inline void
16586@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16587 default: size = sizeof(*gdt); break;
16588 }
16589
16590+ pax_open_kernel();
16591 memcpy(&gdt[entry], desc, size);
16592+ pax_close_kernel();
16593 }
16594
16595 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16596@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16597
16598 static inline void native_load_tr_desc(void)
16599 {
16600+ pax_open_kernel();
16601 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16602+ pax_close_kernel();
16603 }
16604
16605 static inline void native_load_gdt(const struct desc_ptr *dtr)
16606@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16607 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16608 unsigned int i;
16609
16610+ pax_open_kernel();
16611 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16612 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16613+ pax_close_kernel();
16614 }
16615
16616 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16617@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16618 preempt_enable();
16619 }
16620
16621-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16622+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16623 {
16624 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16625 }
16626@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16627 }
16628
16629 #ifdef CONFIG_X86_64
16630-static inline void set_nmi_gate(int gate, void *addr)
16631+static inline void set_nmi_gate(int gate, const void *addr)
16632 {
16633 gate_desc s;
16634
16635@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16636 #endif
16637
16638 #ifdef CONFIG_TRACING
16639-extern struct desc_ptr trace_idt_descr;
16640-extern gate_desc trace_idt_table[];
16641+extern const struct desc_ptr trace_idt_descr;
16642+extern gate_desc trace_idt_table[IDT_ENTRIES];
16643 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16644 {
16645 write_idt_entry(trace_idt_table, entry, gate);
16646 }
16647
16648-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16649+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16650 unsigned dpl, unsigned ist, unsigned seg)
16651 {
16652 gate_desc s;
16653@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16654 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16655 #endif
16656
16657-static inline void _set_gate(int gate, unsigned type, void *addr,
16658+static inline void _set_gate(int gate, unsigned type, const void *addr,
16659 unsigned dpl, unsigned ist, unsigned seg)
16660 {
16661 gate_desc s;
16662@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16663 #define set_intr_gate(n, addr) \
16664 do { \
16665 BUG_ON((unsigned)n > 0xFF); \
16666- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16667+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16668 __KERNEL_CS); \
16669- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16670+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16671 0, 0, __KERNEL_CS); \
16672 } while (0)
16673
16674@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16675 /*
16676 * This routine sets up an interrupt gate at directory privilege level 3.
16677 */
16678-static inline void set_system_intr_gate(unsigned int n, void *addr)
16679+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16680 {
16681 BUG_ON((unsigned)n > 0xFF);
16682 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16683 }
16684
16685-static inline void set_system_trap_gate(unsigned int n, void *addr)
16686+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16687 {
16688 BUG_ON((unsigned)n > 0xFF);
16689 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16690 }
16691
16692-static inline void set_trap_gate(unsigned int n, void *addr)
16693+static inline void set_trap_gate(unsigned int n, const void *addr)
16694 {
16695 BUG_ON((unsigned)n > 0xFF);
16696 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16697@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16698 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16699 {
16700 BUG_ON((unsigned)n > 0xFF);
16701- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16702+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16703 }
16704
16705-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16706+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16707 {
16708 BUG_ON((unsigned)n > 0xFF);
16709 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16710 }
16711
16712-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16713+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16717@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16718 else
16719 load_idt((const struct desc_ptr *)&idt_descr);
16720 }
16721+
16722+#ifdef CONFIG_X86_32
16723+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16724+{
16725+ struct desc_struct d;
16726+
16727+ if (likely(limit))
16728+ limit = (limit - 1UL) >> PAGE_SHIFT;
16729+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16730+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16731+}
16732+#endif
16733+
16734 #endif /* _ASM_X86_DESC_H */
16735diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16736index 278441f..b95a174 100644
16737--- a/arch/x86/include/asm/desc_defs.h
16738+++ b/arch/x86/include/asm/desc_defs.h
16739@@ -31,6 +31,12 @@ struct desc_struct {
16740 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16741 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16742 };
16743+ struct {
16744+ u16 offset_low;
16745+ u16 seg;
16746+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16747+ unsigned offset_high: 16;
16748+ } gate;
16749 };
16750 } __attribute__((packed));
16751
16752diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16753index ced283a..ffe04cc 100644
16754--- a/arch/x86/include/asm/div64.h
16755+++ b/arch/x86/include/asm/div64.h
16756@@ -39,7 +39,7 @@
16757 __mod; \
16758 })
16759
16760-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16761+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16762 {
16763 union {
16764 u64 v64;
16765diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16766index ca3347a..1a5082a 100644
16767--- a/arch/x86/include/asm/elf.h
16768+++ b/arch/x86/include/asm/elf.h
16769@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16770
16771 #include <asm/vdso.h>
16772
16773-#ifdef CONFIG_X86_64
16774-extern unsigned int vdso64_enabled;
16775-#endif
16776 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16777 extern unsigned int vdso32_enabled;
16778 #endif
16779@@ -249,7 +246,25 @@ extern int force_personality32;
16780 the loader. We need to make sure that it is out of the way of the program
16781 that it will "exec", and that there is sufficient room for the brk. */
16782
16783+#ifdef CONFIG_PAX_SEGMEXEC
16784+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16785+#else
16786 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_ASLR
16790+#ifdef CONFIG_X86_32
16791+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16792+
16793+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16794+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16795+#else
16796+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16797+
16798+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16799+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16800+#endif
16801+#endif
16802
16803 /* This yields a mask that user programs can use to figure out what
16804 instruction set this CPU supports. This could be done in user space,
16805@@ -298,17 +313,13 @@ do { \
16806
16807 #define ARCH_DLINFO \
16808 do { \
16809- if (vdso64_enabled) \
16810- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16811- (unsigned long __force)current->mm->context.vdso); \
16812+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16813 } while (0)
16814
16815 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16816 #define ARCH_DLINFO_X32 \
16817 do { \
16818- if (vdso64_enabled) \
16819- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16820- (unsigned long __force)current->mm->context.vdso); \
16821+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16822 } while (0)
16823
16824 #define AT_SYSINFO 32
16825@@ -323,10 +334,10 @@ else \
16826
16827 #endif /* !CONFIG_X86_32 */
16828
16829-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16830+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16831
16832 #define VDSO_ENTRY \
16833- ((unsigned long)current->mm->context.vdso + \
16834+ (current->mm->context.vdso + \
16835 selected_vdso32->sym___kernel_vsyscall)
16836
16837 struct linux_binprm;
16838@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16839 int uses_interp);
16840 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16841
16842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16843-#define arch_randomize_brk arch_randomize_brk
16844-
16845 /*
16846 * True on X86_32 or when emulating IA32 on X86_64
16847 */
16848diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16849index 77a99ac..39ff7f5 100644
16850--- a/arch/x86/include/asm/emergency-restart.h
16851+++ b/arch/x86/include/asm/emergency-restart.h
16852@@ -1,6 +1,6 @@
16853 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16854 #define _ASM_X86_EMERGENCY_RESTART_H
16855
16856-extern void machine_emergency_restart(void);
16857+extern void machine_emergency_restart(void) __noreturn;
16858
16859 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16860diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16861index 1c7eefe..d0e4702 100644
16862--- a/arch/x86/include/asm/floppy.h
16863+++ b/arch/x86/include/asm/floppy.h
16864@@ -229,18 +229,18 @@ static struct fd_routine_l {
16865 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16866 } fd_routine[] = {
16867 {
16868- request_dma,
16869- free_dma,
16870- get_dma_residue,
16871- dma_mem_alloc,
16872- hard_dma_setup
16873+ ._request_dma = request_dma,
16874+ ._free_dma = free_dma,
16875+ ._get_dma_residue = get_dma_residue,
16876+ ._dma_mem_alloc = dma_mem_alloc,
16877+ ._dma_setup = hard_dma_setup
16878 },
16879 {
16880- vdma_request_dma,
16881- vdma_nop,
16882- vdma_get_dma_residue,
16883- vdma_mem_alloc,
16884- vdma_dma_setup
16885+ ._request_dma = vdma_request_dma,
16886+ ._free_dma = vdma_nop,
16887+ ._get_dma_residue = vdma_get_dma_residue,
16888+ ._dma_mem_alloc = vdma_mem_alloc,
16889+ ._dma_setup = vdma_dma_setup
16890 }
16891 };
16892
16893diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16894index 72ba21a..79f3f66 100644
16895--- a/arch/x86/include/asm/fpu-internal.h
16896+++ b/arch/x86/include/asm/fpu-internal.h
16897@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16898 #define user_insn(insn, output, input...) \
16899 ({ \
16900 int err; \
16901+ pax_open_userland(); \
16902 asm volatile(ASM_STAC "\n" \
16903- "1:" #insn "\n\t" \
16904+ "1:" \
16905+ __copyuser_seg \
16906+ #insn "\n\t" \
16907 "2: " ASM_CLAC "\n" \
16908 ".section .fixup,\"ax\"\n" \
16909 "3: movl $-1,%[err]\n" \
16910@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16911 _ASM_EXTABLE(1b, 3b) \
16912 : [err] "=r" (err), output \
16913 : "0"(0), input); \
16914+ pax_close_userland(); \
16915 err; \
16916 })
16917
16918@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16919 "fnclex\n\t"
16920 "emms\n\t"
16921 "fildl %P[addr]" /* set F?P to defined value */
16922- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16923+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16924 }
16925
16926 return fpu_restore_checking(&tsk->thread.fpu);
16927diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16928index b4c1f54..e290c08 100644
16929--- a/arch/x86/include/asm/futex.h
16930+++ b/arch/x86/include/asm/futex.h
16931@@ -12,6 +12,7 @@
16932 #include <asm/smap.h>
16933
16934 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16935+ typecheck(u32 __user *, uaddr); \
16936 asm volatile("\t" ASM_STAC "\n" \
16937 "1:\t" insn "\n" \
16938 "2:\t" ASM_CLAC "\n" \
16939@@ -20,15 +21,16 @@
16940 "\tjmp\t2b\n" \
16941 "\t.previous\n" \
16942 _ASM_EXTABLE(1b, 3b) \
16943- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16944+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16945 : "i" (-EFAULT), "0" (oparg), "1" (0))
16946
16947 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16948+ typecheck(u32 __user *, uaddr); \
16949 asm volatile("\t" ASM_STAC "\n" \
16950 "1:\tmovl %2, %0\n" \
16951 "\tmovl\t%0, %3\n" \
16952 "\t" insn "\n" \
16953- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16954+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16955 "\tjnz\t1b\n" \
16956 "3:\t" ASM_CLAC "\n" \
16957 "\t.section .fixup,\"ax\"\n" \
16958@@ -38,7 +40,7 @@
16959 _ASM_EXTABLE(1b, 4b) \
16960 _ASM_EXTABLE(2b, 4b) \
16961 : "=&a" (oldval), "=&r" (ret), \
16962- "+m" (*uaddr), "=&r" (tem) \
16963+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16964 : "r" (oparg), "i" (-EFAULT), "1" (0))
16965
16966 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16967@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16968
16969 pagefault_disable();
16970
16971+ pax_open_userland();
16972 switch (op) {
16973 case FUTEX_OP_SET:
16974- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16975+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16976 break;
16977 case FUTEX_OP_ADD:
16978- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16979+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16980 uaddr, oparg);
16981 break;
16982 case FUTEX_OP_OR:
16983@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16984 default:
16985 ret = -ENOSYS;
16986 }
16987+ pax_close_userland();
16988
16989 pagefault_enable();
16990
16991diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16992index 9662290..49ca5e5 100644
16993--- a/arch/x86/include/asm/hw_irq.h
16994+++ b/arch/x86/include/asm/hw_irq.h
16995@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16996 #endif /* CONFIG_X86_LOCAL_APIC */
16997
16998 /* Statistics */
16999-extern atomic_t irq_err_count;
17000-extern atomic_t irq_mis_count;
17001+extern atomic_unchecked_t irq_err_count;
17002+extern atomic_unchecked_t irq_mis_count;
17003
17004 /* EISA */
17005 extern void eisa_set_level_irq(unsigned int irq);
17006diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17007index ccffa53..3c90c87 100644
17008--- a/arch/x86/include/asm/i8259.h
17009+++ b/arch/x86/include/asm/i8259.h
17010@@ -62,7 +62,7 @@ struct legacy_pic {
17011 void (*init)(int auto_eoi);
17012 int (*irq_pending)(unsigned int irq);
17013 void (*make_irq)(unsigned int irq);
17014-};
17015+} __do_const;
17016
17017 extern struct legacy_pic *legacy_pic;
17018 extern struct legacy_pic null_legacy_pic;
17019diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17020index 34a5b93..27e40a6 100644
17021--- a/arch/x86/include/asm/io.h
17022+++ b/arch/x86/include/asm/io.h
17023@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17024 "m" (*(volatile type __force *)addr) barrier); }
17025
17026 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17027-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17028-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17029+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17030+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17031
17032 build_mmio_read(__readb, "b", unsigned char, "=q", )
17033-build_mmio_read(__readw, "w", unsigned short, "=r", )
17034-build_mmio_read(__readl, "l", unsigned int, "=r", )
17035+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17036+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17037
17038 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17039 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17040@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17041 * this function
17042 */
17043
17044-static inline phys_addr_t virt_to_phys(volatile void *address)
17045+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17046 {
17047 return __pa(address);
17048 }
17049@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17050 return ioremap_nocache(offset, size);
17051 }
17052
17053-extern void iounmap(volatile void __iomem *addr);
17054+extern void iounmap(const volatile void __iomem *addr);
17055
17056 extern void set_iounmap_nonlazy(void);
17057
17058@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17059
17060 #include <linux/vmalloc.h>
17061
17062+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17063+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17064+{
17065+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17066+}
17067+
17068+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17069+{
17070+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17071+}
17072+
17073 /*
17074 * Convert a virtual cached pointer to an uncached pointer
17075 */
17076diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17077index 0a8b519..80e7d5b 100644
17078--- a/arch/x86/include/asm/irqflags.h
17079+++ b/arch/x86/include/asm/irqflags.h
17080@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17081 sti; \
17082 sysexit
17083
17084+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17085+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17086+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17087+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17088+
17089 #else
17090 #define INTERRUPT_RETURN iret
17091 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17092diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17093index 4421b5d..8543006 100644
17094--- a/arch/x86/include/asm/kprobes.h
17095+++ b/arch/x86/include/asm/kprobes.h
17096@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17097 #define RELATIVEJUMP_SIZE 5
17098 #define RELATIVECALL_OPCODE 0xe8
17099 #define RELATIVE_ADDR_SIZE 4
17100-#define MAX_STACK_SIZE 64
17101-#define MIN_STACK_SIZE(ADDR) \
17102- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17103- THREAD_SIZE - (unsigned long)(ADDR))) \
17104- ? (MAX_STACK_SIZE) \
17105- : (((unsigned long)current_thread_info()) + \
17106- THREAD_SIZE - (unsigned long)(ADDR)))
17107+#define MAX_STACK_SIZE 64UL
17108+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17109
17110 #define flush_insn_slot(p) do { } while (0)
17111
17112diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17113index 4ad6560..75c7bdd 100644
17114--- a/arch/x86/include/asm/local.h
17115+++ b/arch/x86/include/asm/local.h
17116@@ -10,33 +10,97 @@ typedef struct {
17117 atomic_long_t a;
17118 } local_t;
17119
17120+typedef struct {
17121+ atomic_long_unchecked_t a;
17122+} local_unchecked_t;
17123+
17124 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17125
17126 #define local_read(l) atomic_long_read(&(l)->a)
17127+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17128 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17129+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17130
17131 static inline void local_inc(local_t *l)
17132 {
17133- asm volatile(_ASM_INC "%0"
17134+ asm volatile(_ASM_INC "%0\n"
17135+
17136+#ifdef CONFIG_PAX_REFCOUNT
17137+ "jno 0f\n"
17138+ _ASM_DEC "%0\n"
17139+ "int $4\n0:\n"
17140+ _ASM_EXTABLE(0b, 0b)
17141+#endif
17142+
17143+ : "+m" (l->a.counter));
17144+}
17145+
17146+static inline void local_inc_unchecked(local_unchecked_t *l)
17147+{
17148+ asm volatile(_ASM_INC "%0\n"
17149 : "+m" (l->a.counter));
17150 }
17151
17152 static inline void local_dec(local_t *l)
17153 {
17154- asm volatile(_ASM_DEC "%0"
17155+ asm volatile(_ASM_DEC "%0\n"
17156+
17157+#ifdef CONFIG_PAX_REFCOUNT
17158+ "jno 0f\n"
17159+ _ASM_INC "%0\n"
17160+ "int $4\n0:\n"
17161+ _ASM_EXTABLE(0b, 0b)
17162+#endif
17163+
17164+ : "+m" (l->a.counter));
17165+}
17166+
17167+static inline void local_dec_unchecked(local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_DEC "%0\n"
17170 : "+m" (l->a.counter));
17171 }
17172
17173 static inline void local_add(long i, local_t *l)
17174 {
17175- asm volatile(_ASM_ADD "%1,%0"
17176+ asm volatile(_ASM_ADD "%1,%0\n"
17177+
17178+#ifdef CONFIG_PAX_REFCOUNT
17179+ "jno 0f\n"
17180+ _ASM_SUB "%1,%0\n"
17181+ "int $4\n0:\n"
17182+ _ASM_EXTABLE(0b, 0b)
17183+#endif
17184+
17185+ : "+m" (l->a.counter)
17186+ : "ir" (i));
17187+}
17188+
17189+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_ADD "%1,%0\n"
17192 : "+m" (l->a.counter)
17193 : "ir" (i));
17194 }
17195
17196 static inline void local_sub(long i, local_t *l)
17197 {
17198- asm volatile(_ASM_SUB "%1,%0"
17199+ asm volatile(_ASM_SUB "%1,%0\n"
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ "jno 0f\n"
17203+ _ASM_ADD "%1,%0\n"
17204+ "int $4\n0:\n"
17205+ _ASM_EXTABLE(0b, 0b)
17206+#endif
17207+
17208+ : "+m" (l->a.counter)
17209+ : "ir" (i));
17210+}
17211+
17212+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17213+{
17214+ asm volatile(_ASM_SUB "%1,%0\n"
17215 : "+m" (l->a.counter)
17216 : "ir" (i));
17217 }
17218@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17219 */
17220 static inline int local_sub_and_test(long i, local_t *l)
17221 {
17222- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17223+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17224 }
17225
17226 /**
17227@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17228 */
17229 static inline int local_dec_and_test(local_t *l)
17230 {
17231- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17232+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17233 }
17234
17235 /**
17236@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17237 */
17238 static inline int local_inc_and_test(local_t *l)
17239 {
17240- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17241+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17242 }
17243
17244 /**
17245@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17246 */
17247 static inline int local_add_negative(long i, local_t *l)
17248 {
17249- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17250+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17251 }
17252
17253 /**
17254@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17255 static inline long local_add_return(long i, local_t *l)
17256 {
17257 long __i = i;
17258+ asm volatile(_ASM_XADD "%0, %1\n"
17259+
17260+#ifdef CONFIG_PAX_REFCOUNT
17261+ "jno 0f\n"
17262+ _ASM_MOV "%0,%1\n"
17263+ "int $4\n0:\n"
17264+ _ASM_EXTABLE(0b, 0b)
17265+#endif
17266+
17267+ : "+r" (i), "+m" (l->a.counter)
17268+ : : "memory");
17269+ return i + __i;
17270+}
17271+
17272+/**
17273+ * local_add_return_unchecked - add and return
17274+ * @i: integer value to add
17275+ * @l: pointer to type local_unchecked_t
17276+ *
17277+ * Atomically adds @i to @l and returns @i + @l
17278+ */
17279+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17280+{
17281+ long __i = i;
17282 asm volatile(_ASM_XADD "%0, %1;"
17283 : "+r" (i), "+m" (l->a.counter)
17284 : : "memory");
17285@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17286
17287 #define local_cmpxchg(l, o, n) \
17288 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17289+#define local_cmpxchg_unchecked(l, o, n) \
17290+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17291 /* Always has a lock prefix */
17292 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17293
17294diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17295new file mode 100644
17296index 0000000..2bfd3ba
17297--- /dev/null
17298+++ b/arch/x86/include/asm/mman.h
17299@@ -0,0 +1,15 @@
17300+#ifndef _X86_MMAN_H
17301+#define _X86_MMAN_H
17302+
17303+#include <uapi/asm/mman.h>
17304+
17305+#ifdef __KERNEL__
17306+#ifndef __ASSEMBLY__
17307+#ifdef CONFIG_X86_32
17308+#define arch_mmap_check i386_mmap_check
17309+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17310+#endif
17311+#endif
17312+#endif
17313+
17314+#endif /* X86_MMAN_H */
17315diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17316index 09b9620..923aecd 100644
17317--- a/arch/x86/include/asm/mmu.h
17318+++ b/arch/x86/include/asm/mmu.h
17319@@ -9,7 +9,7 @@
17320 * we put the segment information here.
17321 */
17322 typedef struct {
17323- void *ldt;
17324+ struct desc_struct *ldt;
17325 int size;
17326
17327 #ifdef CONFIG_X86_64
17328@@ -18,7 +18,19 @@ typedef struct {
17329 #endif
17330
17331 struct mutex lock;
17332- void __user *vdso;
17333+ unsigned long vdso;
17334+
17335+#ifdef CONFIG_X86_32
17336+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17337+ unsigned long user_cs_base;
17338+ unsigned long user_cs_limit;
17339+
17340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17341+ cpumask_t cpu_user_cs_mask;
17342+#endif
17343+
17344+#endif
17345+#endif
17346
17347 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17348 } mm_context_t;
17349diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17350index 883f6b93..6869d96 100644
17351--- a/arch/x86/include/asm/mmu_context.h
17352+++ b/arch/x86/include/asm/mmu_context.h
17353@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17354
17355 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17356 {
17357+
17358+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17359+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17360+ unsigned int i;
17361+ pgd_t *pgd;
17362+
17363+ pax_open_kernel();
17364+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17365+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17366+ set_pgd_batched(pgd+i, native_make_pgd(0));
17367+ pax_close_kernel();
17368+ }
17369+#endif
17370+
17371 #ifdef CONFIG_SMP
17372 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17373 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17374@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17375 struct task_struct *tsk)
17376 {
17377 unsigned cpu = smp_processor_id();
17378+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17379+ int tlbstate = TLBSTATE_OK;
17380+#endif
17381
17382 if (likely(prev != next)) {
17383 #ifdef CONFIG_SMP
17384+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17385+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17386+#endif
17387 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17388 this_cpu_write(cpu_tlbstate.active_mm, next);
17389 #endif
17390 cpumask_set_cpu(cpu, mm_cpumask(next));
17391
17392 /* Re-load page tables */
17393+#ifdef CONFIG_PAX_PER_CPU_PGD
17394+ pax_open_kernel();
17395+
17396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17397+ if (static_cpu_has(X86_FEATURE_PCID))
17398+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17399+ else
17400+#endif
17401+
17402+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17403+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17404+ pax_close_kernel();
17405+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID)) {
17409+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17410+ u64 descriptor[2];
17411+ descriptor[0] = PCID_USER;
17412+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17413+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17414+ descriptor[0] = PCID_KERNEL;
17415+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17416+ }
17417+ } else {
17418+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17419+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17420+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17421+ else
17422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17423+ }
17424+ } else
17425+#endif
17426+
17427+ load_cr3(get_cpu_pgd(cpu, kernel));
17428+#else
17429 load_cr3(next->pgd);
17430+#endif
17431 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17432
17433 /* Stop flush ipis for the previous mm */
17434@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17435 */
17436 if (unlikely(prev->context.ldt != next->context.ldt))
17437 load_LDT_nolock(&next->context);
17438+
17439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17440+ if (!(__supported_pte_mask & _PAGE_NX)) {
17441+ smp_mb__before_atomic();
17442+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17443+ smp_mb__after_atomic();
17444+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17445+ }
17446+#endif
17447+
17448+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17449+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17450+ prev->context.user_cs_limit != next->context.user_cs_limit))
17451+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17452+#ifdef CONFIG_SMP
17453+ else if (unlikely(tlbstate != TLBSTATE_OK))
17454+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17455+#endif
17456+#endif
17457+
17458 }
17459+ else {
17460+
17461+#ifdef CONFIG_PAX_PER_CPU_PGD
17462+ pax_open_kernel();
17463+
17464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17465+ if (static_cpu_has(X86_FEATURE_PCID))
17466+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467+ else
17468+#endif
17469+
17470+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17471+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17472+ pax_close_kernel();
17473+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID)) {
17477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17478+ u64 descriptor[2];
17479+ descriptor[0] = PCID_USER;
17480+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17482+ descriptor[0] = PCID_KERNEL;
17483+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17484+ }
17485+ } else {
17486+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17487+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17488+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17489+ else
17490+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17491+ }
17492+ } else
17493+#endif
17494+
17495+ load_cr3(get_cpu_pgd(cpu, kernel));
17496+#endif
17497+
17498 #ifdef CONFIG_SMP
17499- else {
17500 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17501 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17502
17503@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17504 * tlb flush IPI delivery. We must reload CR3
17505 * to make sure to use no freed page tables.
17506 */
17507+
17508+#ifndef CONFIG_PAX_PER_CPU_PGD
17509 load_cr3(next->pgd);
17510 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17511+#endif
17512+
17513 load_mm_cr4(next);
17514 load_LDT_nolock(&next->context);
17515+
17516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17517+ if (!(__supported_pte_mask & _PAGE_NX))
17518+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17519+#endif
17520+
17521+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17522+#ifdef CONFIG_PAX_PAGEEXEC
17523+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17524+#endif
17525+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526+#endif
17527+
17528 }
17529+#endif
17530 }
17531-#endif
17532 }
17533
17534 #define activate_mm(prev, next) \
17535diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17536index e3b7819..b257c64 100644
17537--- a/arch/x86/include/asm/module.h
17538+++ b/arch/x86/include/asm/module.h
17539@@ -5,6 +5,7 @@
17540
17541 #ifdef CONFIG_X86_64
17542 /* X86_64 does not define MODULE_PROC_FAMILY */
17543+#define MODULE_PROC_FAMILY ""
17544 #elif defined CONFIG_M486
17545 #define MODULE_PROC_FAMILY "486 "
17546 #elif defined CONFIG_M586
17547@@ -57,8 +58,20 @@
17548 #error unknown processor family
17549 #endif
17550
17551-#ifdef CONFIG_X86_32
17552-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17553+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17554+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17555+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17556+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17557+#else
17558+#define MODULE_PAX_KERNEXEC ""
17559 #endif
17560
17561+#ifdef CONFIG_PAX_MEMORY_UDEREF
17562+#define MODULE_PAX_UDEREF "UDEREF "
17563+#else
17564+#define MODULE_PAX_UDEREF ""
17565+#endif
17566+
17567+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17568+
17569 #endif /* _ASM_X86_MODULE_H */
17570diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17571index 5f2fc44..106caa6 100644
17572--- a/arch/x86/include/asm/nmi.h
17573+++ b/arch/x86/include/asm/nmi.h
17574@@ -36,26 +36,35 @@ enum {
17575
17576 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17577
17578+struct nmiaction;
17579+
17580+struct nmiwork {
17581+ const struct nmiaction *action;
17582+ u64 max_duration;
17583+ struct irq_work irq_work;
17584+};
17585+
17586 struct nmiaction {
17587 struct list_head list;
17588 nmi_handler_t handler;
17589- u64 max_duration;
17590- struct irq_work irq_work;
17591 unsigned long flags;
17592 const char *name;
17593-};
17594+ struct nmiwork *work;
17595+} __do_const;
17596
17597 #define register_nmi_handler(t, fn, fg, n, init...) \
17598 ({ \
17599- static struct nmiaction init fn##_na = { \
17600+ static struct nmiwork fn##_nw; \
17601+ static const struct nmiaction init fn##_na = { \
17602 .handler = (fn), \
17603 .name = (n), \
17604 .flags = (fg), \
17605+ .work = &fn##_nw, \
17606 }; \
17607 __register_nmi_handler((t), &fn##_na); \
17608 })
17609
17610-int __register_nmi_handler(unsigned int, struct nmiaction *);
17611+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17612
17613 void unregister_nmi_handler(unsigned int, const char *);
17614
17615diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17616index 802dde3..9183e68 100644
17617--- a/arch/x86/include/asm/page.h
17618+++ b/arch/x86/include/asm/page.h
17619@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17620 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17621
17622 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17623+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17624
17625 #define __boot_va(x) __va(x)
17626 #define __boot_pa(x) __pa(x)
17627@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17628 * virt_to_page(kaddr) returns a valid pointer if and only if
17629 * virt_addr_valid(kaddr) returns true.
17630 */
17631-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17632 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17633 extern bool __virt_addr_valid(unsigned long kaddr);
17634 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17635
17636+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17637+#define virt_to_page(kaddr) \
17638+ ({ \
17639+ const void *__kaddr = (const void *)(kaddr); \
17640+ BUG_ON(!virt_addr_valid(__kaddr)); \
17641+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17642+ })
17643+#else
17644+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17645+#endif
17646+
17647 #endif /* __ASSEMBLY__ */
17648
17649 #include <asm-generic/memory_model.h>
17650diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17651index b3bebf9..13ac22e 100644
17652--- a/arch/x86/include/asm/page_64.h
17653+++ b/arch/x86/include/asm/page_64.h
17654@@ -7,9 +7,9 @@
17655
17656 /* duplicated to the one in bootmem.h */
17657 extern unsigned long max_pfn;
17658-extern unsigned long phys_base;
17659+extern const unsigned long phys_base;
17660
17661-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17662+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17663 {
17664 unsigned long y = x - __START_KERNEL_map;
17665
17666@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17667 }
17668
17669 #ifdef CONFIG_DEBUG_VIRTUAL
17670-extern unsigned long __phys_addr(unsigned long);
17671-extern unsigned long __phys_addr_symbol(unsigned long);
17672+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17673+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17674 #else
17675 #define __phys_addr(x) __phys_addr_nodebug(x)
17676 #define __phys_addr_symbol(x) \
17677diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17678index 965c47d..ffe0af8 100644
17679--- a/arch/x86/include/asm/paravirt.h
17680+++ b/arch/x86/include/asm/paravirt.h
17681@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17682 return (pmd_t) { ret };
17683 }
17684
17685-static inline pmdval_t pmd_val(pmd_t pmd)
17686+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17687 {
17688 pmdval_t ret;
17689
17690@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17691 val);
17692 }
17693
17694+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17695+{
17696+ pgdval_t val = native_pgd_val(pgd);
17697+
17698+ if (sizeof(pgdval_t) > sizeof(long))
17699+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17700+ val, (u64)val >> 32);
17701+ else
17702+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17703+ val);
17704+}
17705+
17706 static inline void pgd_clear(pgd_t *pgdp)
17707 {
17708 set_pgd(pgdp, __pgd(0));
17709@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17710 pv_mmu_ops.set_fixmap(idx, phys, flags);
17711 }
17712
17713+#ifdef CONFIG_PAX_KERNEXEC
17714+static inline unsigned long pax_open_kernel(void)
17715+{
17716+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17717+}
17718+
17719+static inline unsigned long pax_close_kernel(void)
17720+{
17721+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17722+}
17723+#else
17724+static inline unsigned long pax_open_kernel(void) { return 0; }
17725+static inline unsigned long pax_close_kernel(void) { return 0; }
17726+#endif
17727+
17728 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17729
17730 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17731@@ -906,7 +933,7 @@ extern void default_banner(void);
17732
17733 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17734 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17735-#define PARA_INDIRECT(addr) *%cs:addr
17736+#define PARA_INDIRECT(addr) *%ss:addr
17737 #endif
17738
17739 #define INTERRUPT_RETURN \
17740@@ -981,6 +1008,21 @@ extern void default_banner(void);
17741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17742 CLBR_NONE, \
17743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17744+
17745+#define GET_CR0_INTO_RDI \
17746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17747+ mov %rax,%rdi
17748+
17749+#define SET_RDI_INTO_CR0 \
17750+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17751+
17752+#define GET_CR3_INTO_RDI \
17753+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17754+ mov %rax,%rdi
17755+
17756+#define SET_RDI_INTO_CR3 \
17757+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17758+
17759 #endif /* CONFIG_X86_32 */
17760
17761 #endif /* __ASSEMBLY__ */
17762diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17763index 7549b8b..f0edfda 100644
17764--- a/arch/x86/include/asm/paravirt_types.h
17765+++ b/arch/x86/include/asm/paravirt_types.h
17766@@ -84,7 +84,7 @@ struct pv_init_ops {
17767 */
17768 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17769 unsigned long addr, unsigned len);
17770-};
17771+} __no_const __no_randomize_layout;
17772
17773
17774 struct pv_lazy_ops {
17775@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17776 void (*enter)(void);
17777 void (*leave)(void);
17778 void (*flush)(void);
17779-};
17780+} __no_randomize_layout;
17781
17782 struct pv_time_ops {
17783 unsigned long long (*sched_clock)(void);
17784 unsigned long long (*steal_clock)(int cpu);
17785 unsigned long (*get_tsc_khz)(void);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789 struct pv_cpu_ops {
17790 /* hooks for various privileged instructions */
17791@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17792
17793 void (*start_context_switch)(struct task_struct *prev);
17794 void (*end_context_switch)(struct task_struct *next);
17795-};
17796+} __no_const __no_randomize_layout;
17797
17798 struct pv_irq_ops {
17799 /*
17800@@ -215,7 +215,7 @@ struct pv_irq_ops {
17801 #ifdef CONFIG_X86_64
17802 void (*adjust_exception_frame)(void);
17803 #endif
17804-};
17805+} __no_randomize_layout;
17806
17807 struct pv_apic_ops {
17808 #ifdef CONFIG_X86_LOCAL_APIC
17809@@ -223,7 +223,7 @@ struct pv_apic_ops {
17810 unsigned long start_eip,
17811 unsigned long start_esp);
17812 #endif
17813-};
17814+} __no_const __no_randomize_layout;
17815
17816 struct pv_mmu_ops {
17817 unsigned long (*read_cr2)(void);
17818@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17819 struct paravirt_callee_save make_pud;
17820
17821 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17822+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17823 #endif /* PAGETABLE_LEVELS == 4 */
17824 #endif /* PAGETABLE_LEVELS >= 3 */
17825
17826@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17827 an mfn. We can tell which is which from the index. */
17828 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17829 phys_addr_t phys, pgprot_t flags);
17830-};
17831+
17832+#ifdef CONFIG_PAX_KERNEXEC
17833+ unsigned long (*pax_open_kernel)(void);
17834+ unsigned long (*pax_close_kernel)(void);
17835+#endif
17836+
17837+} __no_randomize_layout;
17838
17839 struct arch_spinlock;
17840 #ifdef CONFIG_SMP
17841@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17842 struct pv_lock_ops {
17843 struct paravirt_callee_save lock_spinning;
17844 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17845-};
17846+} __no_randomize_layout;
17847
17848 /* This contains all the paravirt structures: we get a convenient
17849 * number for each function using the offset which we use to indicate
17850- * what to patch. */
17851+ * what to patch.
17852+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17853+ */
17854+
17855 struct paravirt_patch_template {
17856 struct pv_init_ops pv_init_ops;
17857 struct pv_time_ops pv_time_ops;
17858@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17859 struct pv_apic_ops pv_apic_ops;
17860 struct pv_mmu_ops pv_mmu_ops;
17861 struct pv_lock_ops pv_lock_ops;
17862-};
17863+} __no_randomize_layout;
17864
17865 extern struct pv_info pv_info;
17866 extern struct pv_init_ops pv_init_ops;
17867diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17868index c4412e9..90e88c5 100644
17869--- a/arch/x86/include/asm/pgalloc.h
17870+++ b/arch/x86/include/asm/pgalloc.h
17871@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17872 pmd_t *pmd, pte_t *pte)
17873 {
17874 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17875+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17876+}
17877+
17878+static inline void pmd_populate_user(struct mm_struct *mm,
17879+ pmd_t *pmd, pte_t *pte)
17880+{
17881+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17882 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17883 }
17884
17885@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17886
17887 #ifdef CONFIG_X86_PAE
17888 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17890+{
17891+ pud_populate(mm, pudp, pmd);
17892+}
17893 #else /* !CONFIG_X86_PAE */
17894 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17895 {
17896 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17897 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17898 }
17899+
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17901+{
17902+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17903+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17904+}
17905 #endif /* CONFIG_X86_PAE */
17906
17907 #if PAGETABLE_LEVELS > 3
17908@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17909 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17910 }
17911
17912+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17913+{
17914+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17915+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17916+}
17917+
17918 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17919 {
17920 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17921diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17922index fd74a11..35fd5af 100644
17923--- a/arch/x86/include/asm/pgtable-2level.h
17924+++ b/arch/x86/include/asm/pgtable-2level.h
17925@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17926
17927 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17928 {
17929+ pax_open_kernel();
17930 *pmdp = pmd;
17931+ pax_close_kernel();
17932 }
17933
17934 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17935diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17936index cdaa58c..e61122b 100644
17937--- a/arch/x86/include/asm/pgtable-3level.h
17938+++ b/arch/x86/include/asm/pgtable-3level.h
17939@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17940
17941 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17942 {
17943+ pax_open_kernel();
17944 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17945+ pax_close_kernel();
17946 }
17947
17948 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17949 {
17950+ pax_open_kernel();
17951 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17952+ pax_close_kernel();
17953 }
17954
17955 /*
17956diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17957index a0c35bf..3647d79 100644
17958--- a/arch/x86/include/asm/pgtable.h
17959+++ b/arch/x86/include/asm/pgtable.h
17960@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17961
17962 #ifndef __PAGETABLE_PUD_FOLDED
17963 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17964+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17965 #define pgd_clear(pgd) native_pgd_clear(pgd)
17966 #endif
17967
17968@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17969
17970 #define arch_end_context_switch(prev) do {} while(0)
17971
17972+#define pax_open_kernel() native_pax_open_kernel()
17973+#define pax_close_kernel() native_pax_close_kernel()
17974 #endif /* CONFIG_PARAVIRT */
17975
17976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17978+
17979+#ifdef CONFIG_PAX_KERNEXEC
17980+static inline unsigned long native_pax_open_kernel(void)
17981+{
17982+ unsigned long cr0;
17983+
17984+ preempt_disable();
17985+ barrier();
17986+ cr0 = read_cr0() ^ X86_CR0_WP;
17987+ BUG_ON(cr0 & X86_CR0_WP);
17988+ write_cr0(cr0);
17989+ barrier();
17990+ return cr0 ^ X86_CR0_WP;
17991+}
17992+
17993+static inline unsigned long native_pax_close_kernel(void)
17994+{
17995+ unsigned long cr0;
17996+
17997+ barrier();
17998+ cr0 = read_cr0() ^ X86_CR0_WP;
17999+ BUG_ON(!(cr0 & X86_CR0_WP));
18000+ write_cr0(cr0);
18001+ barrier();
18002+ preempt_enable_no_resched();
18003+ return cr0 ^ X86_CR0_WP;
18004+}
18005+#else
18006+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18007+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18008+#endif
18009+
18010 /*
18011 * The following only work if pte_present() is true.
18012 * Undefined behaviour if not..
18013 */
18014+static inline int pte_user(pte_t pte)
18015+{
18016+ return pte_val(pte) & _PAGE_USER;
18017+}
18018+
18019 static inline int pte_dirty(pte_t pte)
18020 {
18021 return pte_flags(pte) & _PAGE_DIRTY;
18022@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18023 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18024 }
18025
18026+static inline unsigned long pgd_pfn(pgd_t pgd)
18027+{
18028+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18029+}
18030+
18031 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18032
18033 static inline int pmd_large(pmd_t pte)
18034@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18035 return pte_clear_flags(pte, _PAGE_RW);
18036 }
18037
18038+static inline pte_t pte_mkread(pte_t pte)
18039+{
18040+ return __pte(pte_val(pte) | _PAGE_USER);
18041+}
18042+
18043 static inline pte_t pte_mkexec(pte_t pte)
18044 {
18045- return pte_clear_flags(pte, _PAGE_NX);
18046+#ifdef CONFIG_X86_PAE
18047+ if (__supported_pte_mask & _PAGE_NX)
18048+ return pte_clear_flags(pte, _PAGE_NX);
18049+ else
18050+#endif
18051+ return pte_set_flags(pte, _PAGE_USER);
18052+}
18053+
18054+static inline pte_t pte_exprotect(pte_t pte)
18055+{
18056+#ifdef CONFIG_X86_PAE
18057+ if (__supported_pte_mask & _PAGE_NX)
18058+ return pte_set_flags(pte, _PAGE_NX);
18059+ else
18060+#endif
18061+ return pte_clear_flags(pte, _PAGE_USER);
18062 }
18063
18064 static inline pte_t pte_mkdirty(pte_t pte)
18065@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18066 #endif
18067
18068 #ifndef __ASSEMBLY__
18069+
18070+#ifdef CONFIG_PAX_PER_CPU_PGD
18071+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18072+enum cpu_pgd_type {kernel = 0, user = 1};
18073+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18074+{
18075+ return cpu_pgd[cpu][type];
18076+}
18077+#endif
18078+
18079 #include <linux/mm_types.h>
18080 #include <linux/mmdebug.h>
18081 #include <linux/log2.h>
18082@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18083 * Currently stuck as a macro due to indirect forward reference to
18084 * linux/mmzone.h's __section_mem_map_addr() definition:
18085 */
18086-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18087+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18088
18089 /* Find an entry in the second-level page table.. */
18090 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18091@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18092 * Currently stuck as a macro due to indirect forward reference to
18093 * linux/mmzone.h's __section_mem_map_addr() definition:
18094 */
18095-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18096+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18097
18098 /* to find an entry in a page-table-directory. */
18099 static inline unsigned long pud_index(unsigned long address)
18100@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18101
18102 static inline int pgd_bad(pgd_t pgd)
18103 {
18104- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18105+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18106 }
18107
18108 static inline int pgd_none(pgd_t pgd)
18109@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18110 * pgd_offset() returns a (pgd_t *)
18111 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18112 */
18113-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18114+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18115+
18116+#ifdef CONFIG_PAX_PER_CPU_PGD
18117+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18118+#endif
18119+
18120 /*
18121 * a shortcut which implies the use of the kernel's pgd, instead
18122 * of a process's
18123@@ -660,6 +742,23 @@ static inline int pgd_none(pgd_t pgd)
18124 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18125 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18126
18127+#ifdef CONFIG_X86_32
18128+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18129+#else
18130+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18131+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18132+
18133+#ifdef CONFIG_PAX_MEMORY_UDEREF
18134+#ifdef __ASSEMBLY__
18135+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18136+#else
18137+extern unsigned long pax_user_shadow_base;
18138+extern pgdval_t clone_pgd_mask;
18139+#endif
18140+#endif
18141+
18142+#endif
18143+
18144 #ifndef __ASSEMBLY__
18145
18146 extern int direct_gbpages;
18147@@ -826,11 +925,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18148 * dst and src can be on the same page, but the range must not overlap,
18149 * and must not cross a page boundary.
18150 */
18151-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18152+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18153 {
18154- memcpy(dst, src, count * sizeof(pgd_t));
18155+ pax_open_kernel();
18156+ while (count--)
18157+ *dst++ = *src++;
18158+ pax_close_kernel();
18159 }
18160
18161+#ifdef CONFIG_PAX_PER_CPU_PGD
18162+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18163+#endif
18164+
18165+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18166+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18167+#else
18168+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18169+#endif
18170+
18171 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18172 static inline int page_level_shift(enum pg_level level)
18173 {
18174diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18175index b6c0b40..3535d47 100644
18176--- a/arch/x86/include/asm/pgtable_32.h
18177+++ b/arch/x86/include/asm/pgtable_32.h
18178@@ -25,9 +25,6 @@
18179 struct mm_struct;
18180 struct vm_area_struct;
18181
18182-extern pgd_t swapper_pg_dir[1024];
18183-extern pgd_t initial_page_table[1024];
18184-
18185 static inline void pgtable_cache_init(void) { }
18186 static inline void check_pgt_cache(void) { }
18187 void paging_init(void);
18188@@ -45,6 +42,12 @@ void paging_init(void);
18189 # include <asm/pgtable-2level.h>
18190 #endif
18191
18192+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18193+extern pgd_t initial_page_table[PTRS_PER_PGD];
18194+#ifdef CONFIG_X86_PAE
18195+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18196+#endif
18197+
18198 #if defined(CONFIG_HIGHPTE)
18199 #define pte_offset_map(dir, address) \
18200 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18201@@ -59,12 +62,17 @@ void paging_init(void);
18202 /* Clear a kernel PTE and flush it from the TLB */
18203 #define kpte_clear_flush(ptep, vaddr) \
18204 do { \
18205+ pax_open_kernel(); \
18206 pte_clear(&init_mm, (vaddr), (ptep)); \
18207+ pax_close_kernel(); \
18208 __flush_tlb_one((vaddr)); \
18209 } while (0)
18210
18211 #endif /* !__ASSEMBLY__ */
18212
18213+#define HAVE_ARCH_UNMAPPED_AREA
18214+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18215+
18216 /*
18217 * kern_addr_valid() is (1) for FLATMEM and (0) for
18218 * SPARSEMEM and DISCONTIGMEM
18219diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18220index 9fb2f2b..b04b4bf 100644
18221--- a/arch/x86/include/asm/pgtable_32_types.h
18222+++ b/arch/x86/include/asm/pgtable_32_types.h
18223@@ -8,7 +8,7 @@
18224 */
18225 #ifdef CONFIG_X86_PAE
18226 # include <asm/pgtable-3level_types.h>
18227-# define PMD_SIZE (1UL << PMD_SHIFT)
18228+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18229 # define PMD_MASK (~(PMD_SIZE - 1))
18230 #else
18231 # include <asm/pgtable-2level_types.h>
18232@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18233 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18234 #endif
18235
18236+#ifdef CONFIG_PAX_KERNEXEC
18237+#ifndef __ASSEMBLY__
18238+extern unsigned char MODULES_EXEC_VADDR[];
18239+extern unsigned char MODULES_EXEC_END[];
18240+#endif
18241+#include <asm/boot.h>
18242+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18243+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18244+#else
18245+#define ktla_ktva(addr) (addr)
18246+#define ktva_ktla(addr) (addr)
18247+#endif
18248+
18249 #define MODULES_VADDR VMALLOC_START
18250 #define MODULES_END VMALLOC_END
18251 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18252diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18253index 2ee7811..db41d8c 100644
18254--- a/arch/x86/include/asm/pgtable_64.h
18255+++ b/arch/x86/include/asm/pgtable_64.h
18256@@ -16,11 +16,16 @@
18257
18258 extern pud_t level3_kernel_pgt[512];
18259 extern pud_t level3_ident_pgt[512];
18260+extern pud_t level3_vmalloc_start_pgt[512];
18261+extern pud_t level3_vmalloc_end_pgt[512];
18262+extern pud_t level3_vmemmap_pgt[512];
18263+extern pud_t level2_vmemmap_pgt[512];
18264 extern pmd_t level2_kernel_pgt[512];
18265 extern pmd_t level2_fixmap_pgt[512];
18266-extern pmd_t level2_ident_pgt[512];
18267+extern pmd_t level2_ident_pgt[512*2];
18268 extern pte_t level1_fixmap_pgt[512];
18269-extern pgd_t init_level4_pgt[];
18270+extern pte_t level1_vsyscall_pgt[512];
18271+extern pgd_t init_level4_pgt[512];
18272
18273 #define swapper_pg_dir init_level4_pgt
18274
18275@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18276
18277 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18278 {
18279+ pax_open_kernel();
18280 *pmdp = pmd;
18281+ pax_close_kernel();
18282 }
18283
18284 static inline void native_pmd_clear(pmd_t *pmd)
18285@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18286
18287 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18288 {
18289+ pax_open_kernel();
18290 *pudp = pud;
18291+ pax_close_kernel();
18292 }
18293
18294 static inline void native_pud_clear(pud_t *pud)
18295@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18296
18297 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18298 {
18299+ pax_open_kernel();
18300+ *pgdp = pgd;
18301+ pax_close_kernel();
18302+}
18303+
18304+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18305+{
18306 *pgdp = pgd;
18307 }
18308
18309diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18310index 602b602..acb53ed 100644
18311--- a/arch/x86/include/asm/pgtable_64_types.h
18312+++ b/arch/x86/include/asm/pgtable_64_types.h
18313@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18314 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18315 #define MODULES_END _AC(0xffffffffff000000, UL)
18316 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18317+#define MODULES_EXEC_VADDR MODULES_VADDR
18318+#define MODULES_EXEC_END MODULES_END
18319 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18320 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18321 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18322 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18323
18324+#define ktla_ktva(addr) (addr)
18325+#define ktva_ktla(addr) (addr)
18326+
18327 #define EARLY_DYNAMIC_PAGE_TABLES 64
18328
18329 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18330diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18331index 8c7c108..1c1b77f 100644
18332--- a/arch/x86/include/asm/pgtable_types.h
18333+++ b/arch/x86/include/asm/pgtable_types.h
18334@@ -85,8 +85,10 @@
18335
18336 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18337 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18338-#else
18339+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18340 #define _PAGE_NX (_AT(pteval_t, 0))
18341+#else
18342+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18343 #endif
18344
18345 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18346@@ -141,6 +143,9 @@ enum page_cache_mode {
18347 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18348 _PAGE_ACCESSED)
18349
18350+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18351+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18352+
18353 #define __PAGE_KERNEL_EXEC \
18354 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18355 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18356@@ -148,7 +153,7 @@ enum page_cache_mode {
18357 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18358 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18359 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18360-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18361+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18362 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18363 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18364 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18365@@ -194,7 +199,7 @@ enum page_cache_mode {
18366 #ifdef CONFIG_X86_64
18367 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18368 #else
18369-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18370+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18371 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18372 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18373 #endif
18374@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18375 {
18376 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18377 }
18378+#endif
18379
18380+#if PAGETABLE_LEVELS == 3
18381+#include <asm-generic/pgtable-nopud.h>
18382+#endif
18383+
18384+#if PAGETABLE_LEVELS == 2
18385+#include <asm-generic/pgtable-nopmd.h>
18386+#endif
18387+
18388+#ifndef __ASSEMBLY__
18389 #if PAGETABLE_LEVELS > 3
18390 typedef struct { pudval_t pud; } pud_t;
18391
18392@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18393 return pud.pud;
18394 }
18395 #else
18396-#include <asm-generic/pgtable-nopud.h>
18397-
18398 static inline pudval_t native_pud_val(pud_t pud)
18399 {
18400 return native_pgd_val(pud.pgd);
18401@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18402 return pmd.pmd;
18403 }
18404 #else
18405-#include <asm-generic/pgtable-nopmd.h>
18406-
18407 static inline pmdval_t native_pmd_val(pmd_t pmd)
18408 {
18409 return native_pgd_val(pmd.pud.pgd);
18410@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18411
18412 extern pteval_t __supported_pte_mask;
18413 extern void set_nx(void);
18414-extern int nx_enabled;
18415
18416 #define pgprot_writecombine pgprot_writecombine
18417 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18418diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18419index 8f327184..368fb29 100644
18420--- a/arch/x86/include/asm/preempt.h
18421+++ b/arch/x86/include/asm/preempt.h
18422@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18423 */
18424 static __always_inline bool __preempt_count_dec_and_test(void)
18425 {
18426- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18427+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18428 }
18429
18430 /*
18431diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18432index ec1c935..5cc6023 100644
18433--- a/arch/x86/include/asm/processor.h
18434+++ b/arch/x86/include/asm/processor.h
18435@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18436 /* Index into per_cpu list: */
18437 u16 cpu_index;
18438 u32 microcode;
18439-};
18440+} __randomize_layout;
18441
18442 #define X86_VENDOR_INTEL 0
18443 #define X86_VENDOR_CYRIX 1
18444@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18445 : "memory");
18446 }
18447
18448+/* invpcid (%rdx),%rax */
18449+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18450+
18451+#define INVPCID_SINGLE_ADDRESS 0UL
18452+#define INVPCID_SINGLE_CONTEXT 1UL
18453+#define INVPCID_ALL_GLOBAL 2UL
18454+#define INVPCID_ALL_NONGLOBAL 3UL
18455+
18456+#define PCID_KERNEL 0UL
18457+#define PCID_USER 1UL
18458+#define PCID_NOFLUSH (1UL << 63)
18459+
18460 static inline void load_cr3(pgd_t *pgdir)
18461 {
18462- write_cr3(__pa(pgdir));
18463+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18464 }
18465
18466 #ifdef CONFIG_X86_32
18467@@ -282,7 +294,7 @@ struct tss_struct {
18468
18469 } ____cacheline_aligned;
18470
18471-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18472+extern struct tss_struct init_tss[NR_CPUS];
18473
18474 /*
18475 * Save the original ist values for checking stack pointers during debugging
18476@@ -479,6 +491,7 @@ struct thread_struct {
18477 unsigned short ds;
18478 unsigned short fsindex;
18479 unsigned short gsindex;
18480+ unsigned short ss;
18481 #endif
18482 #ifdef CONFIG_X86_32
18483 unsigned long ip;
18484@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18485 */
18486 #define TASK_SIZE PAGE_OFFSET
18487 #define TASK_SIZE_MAX TASK_SIZE
18488+
18489+#ifdef CONFIG_PAX_SEGMEXEC
18490+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18491+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18492+#else
18493 #define STACK_TOP TASK_SIZE
18494-#define STACK_TOP_MAX STACK_TOP
18495+#endif
18496+
18497+#define STACK_TOP_MAX TASK_SIZE
18498
18499 #define INIT_THREAD { \
18500- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18501+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18502 .vm86_info = NULL, \
18503 .sysenter_cs = __KERNEL_CS, \
18504 .io_bitmap_ptr = NULL, \
18505@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18506 */
18507 #define INIT_TSS { \
18508 .x86_tss = { \
18509- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18510+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18511 .ss0 = __KERNEL_DS, \
18512 .ss1 = __KERNEL_CS, \
18513 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18514@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18515 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18516
18517 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18518-#define KSTK_TOP(info) \
18519-({ \
18520- unsigned long *__ptr = (unsigned long *)(info); \
18521- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18522-})
18523+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18524
18525 /*
18526 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18527@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18528 #define task_pt_regs(task) \
18529 ({ \
18530 struct pt_regs *__regs__; \
18531- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18532+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18533 __regs__ - 1; \
18534 })
18535
18536@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18537 * particular problem by preventing anything from being mapped
18538 * at the maximum canonical address.
18539 */
18540-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18541+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18542
18543 /* This decides where the kernel will search for a free chunk of vm
18544 * space during mmap's.
18545 */
18546 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18547- 0xc0000000 : 0xFFFFe000)
18548+ 0xc0000000 : 0xFFFFf000)
18549
18550 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18551 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18552@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18553 #define STACK_TOP_MAX TASK_SIZE_MAX
18554
18555 #define INIT_THREAD { \
18556- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18557+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18558 }
18559
18560 #define INIT_TSS { \
18561- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18562+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18563 }
18564
18565 /*
18566@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18567 */
18568 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18569
18570+#ifdef CONFIG_PAX_SEGMEXEC
18571+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18572+#endif
18573+
18574 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18575
18576 /* Get/set a process' ability to use the timestamp counter instruction */
18577@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18578 return 0;
18579 }
18580
18581-extern unsigned long arch_align_stack(unsigned long sp);
18582+#define arch_align_stack(x) ((x) & ~0xfUL)
18583 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18584
18585 void default_idle(void);
18586@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18587 #define xen_set_default_idle 0
18588 #endif
18589
18590-void stop_this_cpu(void *dummy);
18591+void stop_this_cpu(void *dummy) __noreturn;
18592 void df_debug(struct pt_regs *regs, long error_code);
18593 #endif /* _ASM_X86_PROCESSOR_H */
18594diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18595index 86fc2bb..bd5049a 100644
18596--- a/arch/x86/include/asm/ptrace.h
18597+++ b/arch/x86/include/asm/ptrace.h
18598@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18599 }
18600
18601 /*
18602- * user_mode_vm(regs) determines whether a register set came from user mode.
18603+ * user_mode(regs) determines whether a register set came from user mode.
18604 * This is true if V8086 mode was enabled OR if the register set was from
18605 * protected mode with RPL-3 CS value. This tricky test checks that with
18606 * one comparison. Many places in the kernel can bypass this full check
18607- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18608+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18609+ * be used.
18610 */
18611-static inline int user_mode(struct pt_regs *regs)
18612+static inline int user_mode_novm(struct pt_regs *regs)
18613 {
18614 #ifdef CONFIG_X86_32
18615 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18616 #else
18617- return !!(regs->cs & 3);
18618+ return !!(regs->cs & SEGMENT_RPL_MASK);
18619 #endif
18620 }
18621
18622-static inline int user_mode_vm(struct pt_regs *regs)
18623+static inline int user_mode(struct pt_regs *regs)
18624 {
18625 #ifdef CONFIG_X86_32
18626 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18627 USER_RPL;
18628 #else
18629- return user_mode(regs);
18630+ return user_mode_novm(regs);
18631 #endif
18632 }
18633
18634@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18635 #ifdef CONFIG_X86_64
18636 static inline bool user_64bit_mode(struct pt_regs *regs)
18637 {
18638+ unsigned long cs = regs->cs & 0xffff;
18639 #ifndef CONFIG_PARAVIRT
18640 /*
18641 * On non-paravirt systems, this is the only long mode CPL 3
18642 * selector. We do not allow long mode selectors in the LDT.
18643 */
18644- return regs->cs == __USER_CS;
18645+ return cs == __USER_CS;
18646 #else
18647 /* Headers are too twisted for this to go in paravirt.h. */
18648- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18649+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18650 #endif
18651 }
18652
18653@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18654 * Traps from the kernel do not save sp and ss.
18655 * Use the helper function to retrieve sp.
18656 */
18657- if (offset == offsetof(struct pt_regs, sp) &&
18658- regs->cs == __KERNEL_CS)
18659- return kernel_stack_pointer(regs);
18660+ if (offset == offsetof(struct pt_regs, sp)) {
18661+ unsigned long cs = regs->cs & 0xffff;
18662+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18663+ return kernel_stack_pointer(regs);
18664+ }
18665 #endif
18666 return *(unsigned long *)((unsigned long)regs + offset);
18667 }
18668diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18669index ae0e241..e80b10b 100644
18670--- a/arch/x86/include/asm/qrwlock.h
18671+++ b/arch/x86/include/asm/qrwlock.h
18672@@ -7,8 +7,8 @@
18673 #define queue_write_unlock queue_write_unlock
18674 static inline void queue_write_unlock(struct qrwlock *lock)
18675 {
18676- barrier();
18677- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18678+ barrier();
18679+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18680 }
18681 #endif
18682
18683diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18684index 9c6b890..5305f53 100644
18685--- a/arch/x86/include/asm/realmode.h
18686+++ b/arch/x86/include/asm/realmode.h
18687@@ -22,16 +22,14 @@ struct real_mode_header {
18688 #endif
18689 /* APM/BIOS reboot */
18690 u32 machine_real_restart_asm;
18691-#ifdef CONFIG_X86_64
18692 u32 machine_real_restart_seg;
18693-#endif
18694 };
18695
18696 /* This must match data at trampoline_32/64.S */
18697 struct trampoline_header {
18698 #ifdef CONFIG_X86_32
18699 u32 start;
18700- u16 gdt_pad;
18701+ u16 boot_cs;
18702 u16 gdt_limit;
18703 u32 gdt_base;
18704 #else
18705diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18706index a82c4f1..ac45053 100644
18707--- a/arch/x86/include/asm/reboot.h
18708+++ b/arch/x86/include/asm/reboot.h
18709@@ -6,13 +6,13 @@
18710 struct pt_regs;
18711
18712 struct machine_ops {
18713- void (*restart)(char *cmd);
18714- void (*halt)(void);
18715- void (*power_off)(void);
18716+ void (* __noreturn restart)(char *cmd);
18717+ void (* __noreturn halt)(void);
18718+ void (* __noreturn power_off)(void);
18719 void (*shutdown)(void);
18720 void (*crash_shutdown)(struct pt_regs *);
18721- void (*emergency_restart)(void);
18722-};
18723+ void (* __noreturn emergency_restart)(void);
18724+} __no_const;
18725
18726 extern struct machine_ops machine_ops;
18727
18728diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18729index 8f7866a..e442f20 100644
18730--- a/arch/x86/include/asm/rmwcc.h
18731+++ b/arch/x86/include/asm/rmwcc.h
18732@@ -3,7 +3,34 @@
18733
18734 #ifdef CC_HAVE_ASM_GOTO
18735
18736-#define __GEN_RMWcc(fullop, var, cc, ...) \
18737+#ifdef CONFIG_PAX_REFCOUNT
18738+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18739+do { \
18740+ asm_volatile_goto (fullop \
18741+ ";jno 0f\n" \
18742+ fullantiop \
18743+ ";int $4\n0:\n" \
18744+ _ASM_EXTABLE(0b, 0b) \
18745+ ";j" cc " %l[cc_label]" \
18746+ : : "m" (var), ## __VA_ARGS__ \
18747+ : "memory" : cc_label); \
18748+ return 0; \
18749+cc_label: \
18750+ return 1; \
18751+} while (0)
18752+#else
18753+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18754+do { \
18755+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18756+ : : "m" (var), ## __VA_ARGS__ \
18757+ : "memory" : cc_label); \
18758+ return 0; \
18759+cc_label: \
18760+ return 1; \
18761+} while (0)
18762+#endif
18763+
18764+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18765 do { \
18766 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18767 : : "m" (var), ## __VA_ARGS__ \
18768@@ -13,15 +40,46 @@ cc_label: \
18769 return 1; \
18770 } while (0)
18771
18772-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18773- __GEN_RMWcc(op " " arg0, var, cc)
18774+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18775+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18776
18777-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18778- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18779+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18780+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18781+
18782+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18783+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18784+
18785+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18786+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18787
18788 #else /* !CC_HAVE_ASM_GOTO */
18789
18790-#define __GEN_RMWcc(fullop, var, cc, ...) \
18791+#ifdef CONFIG_PAX_REFCOUNT
18792+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18793+do { \
18794+ char c; \
18795+ asm volatile (fullop \
18796+ ";jno 0f\n" \
18797+ fullantiop \
18798+ ";int $4\n0:\n" \
18799+ _ASM_EXTABLE(0b, 0b) \
18800+ "; set" cc " %1" \
18801+ : "+m" (var), "=qm" (c) \
18802+ : __VA_ARGS__ : "memory"); \
18803+ return c != 0; \
18804+} while (0)
18805+#else
18806+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18807+do { \
18808+ char c; \
18809+ asm volatile (fullop "; set" cc " %1" \
18810+ : "+m" (var), "=qm" (c) \
18811+ : __VA_ARGS__ : "memory"); \
18812+ return c != 0; \
18813+} while (0)
18814+#endif
18815+
18816+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18817 do { \
18818 char c; \
18819 asm volatile (fullop "; set" cc " %1" \
18820@@ -30,11 +88,17 @@ do { \
18821 return c != 0; \
18822 } while (0)
18823
18824-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18825- __GEN_RMWcc(op " " arg0, var, cc)
18826+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18827+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18828+
18829+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18830+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18831+
18832+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18833+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18834
18835-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18836- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18837+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18838+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18839
18840 #endif /* CC_HAVE_ASM_GOTO */
18841
18842diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18843index cad82c9..2e5c5c1 100644
18844--- a/arch/x86/include/asm/rwsem.h
18845+++ b/arch/x86/include/asm/rwsem.h
18846@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18847 {
18848 asm volatile("# beginning down_read\n\t"
18849 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18850+
18851+#ifdef CONFIG_PAX_REFCOUNT
18852+ "jno 0f\n"
18853+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18854+ "int $4\n0:\n"
18855+ _ASM_EXTABLE(0b, 0b)
18856+#endif
18857+
18858 /* adds 0x00000001 */
18859 " jns 1f\n"
18860 " call call_rwsem_down_read_failed\n"
18861@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18862 "1:\n\t"
18863 " mov %1,%2\n\t"
18864 " add %3,%2\n\t"
18865+
18866+#ifdef CONFIG_PAX_REFCOUNT
18867+ "jno 0f\n"
18868+ "sub %3,%2\n"
18869+ "int $4\n0:\n"
18870+ _ASM_EXTABLE(0b, 0b)
18871+#endif
18872+
18873 " jle 2f\n\t"
18874 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18875 " jnz 1b\n\t"
18876@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18877 long tmp;
18878 asm volatile("# beginning down_write\n\t"
18879 LOCK_PREFIX " xadd %1,(%2)\n\t"
18880+
18881+#ifdef CONFIG_PAX_REFCOUNT
18882+ "jno 0f\n"
18883+ "mov %1,(%2)\n"
18884+ "int $4\n0:\n"
18885+ _ASM_EXTABLE(0b, 0b)
18886+#endif
18887+
18888 /* adds 0xffff0001, returns the old value */
18889 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18890 /* was the active mask 0 before? */
18891@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18892 long tmp;
18893 asm volatile("# beginning __up_read\n\t"
18894 LOCK_PREFIX " xadd %1,(%2)\n\t"
18895+
18896+#ifdef CONFIG_PAX_REFCOUNT
18897+ "jno 0f\n"
18898+ "mov %1,(%2)\n"
18899+ "int $4\n0:\n"
18900+ _ASM_EXTABLE(0b, 0b)
18901+#endif
18902+
18903 /* subtracts 1, returns the old value */
18904 " jns 1f\n\t"
18905 " call call_rwsem_wake\n" /* expects old value in %edx */
18906@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18907 long tmp;
18908 asm volatile("# beginning __up_write\n\t"
18909 LOCK_PREFIX " xadd %1,(%2)\n\t"
18910+
18911+#ifdef CONFIG_PAX_REFCOUNT
18912+ "jno 0f\n"
18913+ "mov %1,(%2)\n"
18914+ "int $4\n0:\n"
18915+ _ASM_EXTABLE(0b, 0b)
18916+#endif
18917+
18918 /* subtracts 0xffff0001, returns the old value */
18919 " jns 1f\n\t"
18920 " call call_rwsem_wake\n" /* expects old value in %edx */
18921@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18922 {
18923 asm volatile("# beginning __downgrade_write\n\t"
18924 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18925+
18926+#ifdef CONFIG_PAX_REFCOUNT
18927+ "jno 0f\n"
18928+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18929+ "int $4\n0:\n"
18930+ _ASM_EXTABLE(0b, 0b)
18931+#endif
18932+
18933 /*
18934 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18935 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18936@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18937 */
18938 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18939 {
18940- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18941+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18942+
18943+#ifdef CONFIG_PAX_REFCOUNT
18944+ "jno 0f\n"
18945+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18946+ "int $4\n0:\n"
18947+ _ASM_EXTABLE(0b, 0b)
18948+#endif
18949+
18950 : "+m" (sem->count)
18951 : "er" (delta));
18952 }
18953@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18954 */
18955 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18956 {
18957- return delta + xadd(&sem->count, delta);
18958+ return delta + xadd_check_overflow(&sem->count, delta);
18959 }
18960
18961 #endif /* __KERNEL__ */
18962diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18963index db257a5..b91bc77 100644
18964--- a/arch/x86/include/asm/segment.h
18965+++ b/arch/x86/include/asm/segment.h
18966@@ -73,10 +73,15 @@
18967 * 26 - ESPFIX small SS
18968 * 27 - per-cpu [ offset to per-cpu data area ]
18969 * 28 - stack_canary-20 [ for stack protector ]
18970- * 29 - unused
18971- * 30 - unused
18972+ * 29 - PCI BIOS CS
18973+ * 30 - PCI BIOS DS
18974 * 31 - TSS for double fault handler
18975 */
18976+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18977+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18978+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18979+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18980+
18981 #define GDT_ENTRY_TLS_MIN 6
18982 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18983
18984@@ -88,6 +93,8 @@
18985
18986 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18987
18988+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18989+
18990 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18991
18992 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18993@@ -113,6 +120,12 @@
18994 #define __KERNEL_STACK_CANARY 0
18995 #endif
18996
18997+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18998+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18999+
19000+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19001+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19002+
19003 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19004
19005 /*
19006@@ -140,7 +153,7 @@
19007 */
19008
19009 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19010-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19011+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19012
19013
19014 #else
19015@@ -164,6 +177,8 @@
19016 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19017 #define __USER32_DS __USER_DS
19018
19019+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19020+
19021 #define GDT_ENTRY_TSS 8 /* needs two entries */
19022 #define GDT_ENTRY_LDT 10 /* needs two entries */
19023 #define GDT_ENTRY_TLS_MIN 12
19024@@ -172,6 +187,8 @@
19025 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19026 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19027
19028+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19029+
19030 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19031 #define FS_TLS 0
19032 #define GS_TLS 1
19033@@ -179,12 +196,14 @@
19034 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19035 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19036
19037-#define GDT_ENTRIES 16
19038+#define GDT_ENTRIES 17
19039
19040 #endif
19041
19042 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19043+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19044 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19045+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19046 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19047 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19048 #ifndef CONFIG_PARAVIRT
19049@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19050 {
19051 unsigned long __limit;
19052 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19053- return __limit + 1;
19054+ return __limit;
19055 }
19056
19057 #endif /* !__ASSEMBLY__ */
19058diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19059index 8d3120f..352b440 100644
19060--- a/arch/x86/include/asm/smap.h
19061+++ b/arch/x86/include/asm/smap.h
19062@@ -25,11 +25,40 @@
19063
19064 #include <asm/alternative-asm.h>
19065
19066+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19067+#define ASM_PAX_OPEN_USERLAND \
19068+ 661: jmp 663f; \
19069+ .pushsection .altinstr_replacement, "a" ; \
19070+ 662: pushq %rax; nop; \
19071+ .popsection ; \
19072+ .pushsection .altinstructions, "a" ; \
19073+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19074+ .popsection ; \
19075+ call __pax_open_userland; \
19076+ popq %rax; \
19077+ 663:
19078+
19079+#define ASM_PAX_CLOSE_USERLAND \
19080+ 661: jmp 663f; \
19081+ .pushsection .altinstr_replacement, "a" ; \
19082+ 662: pushq %rax; nop; \
19083+ .popsection; \
19084+ .pushsection .altinstructions, "a" ; \
19085+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19086+ .popsection; \
19087+ call __pax_close_userland; \
19088+ popq %rax; \
19089+ 663:
19090+#else
19091+#define ASM_PAX_OPEN_USERLAND
19092+#define ASM_PAX_CLOSE_USERLAND
19093+#endif
19094+
19095 #ifdef CONFIG_X86_SMAP
19096
19097 #define ASM_CLAC \
19098 661: ASM_NOP3 ; \
19099- .pushsection .altinstr_replacement, "ax" ; \
19100+ .pushsection .altinstr_replacement, "a" ; \
19101 662: __ASM_CLAC ; \
19102 .popsection ; \
19103 .pushsection .altinstructions, "a" ; \
19104@@ -38,7 +67,7 @@
19105
19106 #define ASM_STAC \
19107 661: ASM_NOP3 ; \
19108- .pushsection .altinstr_replacement, "ax" ; \
19109+ .pushsection .altinstr_replacement, "a" ; \
19110 662: __ASM_STAC ; \
19111 .popsection ; \
19112 .pushsection .altinstructions, "a" ; \
19113@@ -56,6 +85,37 @@
19114
19115 #include <asm/alternative.h>
19116
19117+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19118+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19119+
19120+extern void __pax_open_userland(void);
19121+static __always_inline unsigned long pax_open_userland(void)
19122+{
19123+
19124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19125+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19126+ :
19127+ : [open] "i" (__pax_open_userland)
19128+ : "memory", "rax");
19129+#endif
19130+
19131+ return 0;
19132+}
19133+
19134+extern void __pax_close_userland(void);
19135+static __always_inline unsigned long pax_close_userland(void)
19136+{
19137+
19138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19139+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19140+ :
19141+ : [close] "i" (__pax_close_userland)
19142+ : "memory", "rax");
19143+#endif
19144+
19145+ return 0;
19146+}
19147+
19148 #ifdef CONFIG_X86_SMAP
19149
19150 static __always_inline void clac(void)
19151diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19152index 8cd1cc3..827e09e 100644
19153--- a/arch/x86/include/asm/smp.h
19154+++ b/arch/x86/include/asm/smp.h
19155@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19156 /* cpus sharing the last level cache: */
19157 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19158 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19159-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19160+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19161
19162 static inline struct cpumask *cpu_sibling_mask(int cpu)
19163 {
19164@@ -78,7 +78,7 @@ struct smp_ops {
19165
19166 void (*send_call_func_ipi)(const struct cpumask *mask);
19167 void (*send_call_func_single_ipi)(int cpu);
19168-};
19169+} __no_const;
19170
19171 /* Globals due to paravirt */
19172 extern void set_cpu_sibling_map(int cpu);
19173@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19174 extern int safe_smp_processor_id(void);
19175
19176 #elif defined(CONFIG_X86_64_SMP)
19177-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19178-
19179-#define stack_smp_processor_id() \
19180-({ \
19181- struct thread_info *ti; \
19182- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19183- ti->cpu; \
19184-})
19185+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19186+#define stack_smp_processor_id() raw_smp_processor_id()
19187 #define safe_smp_processor_id() smp_processor_id()
19188
19189 #endif
19190diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19191index 6a99859..03cb807 100644
19192--- a/arch/x86/include/asm/stackprotector.h
19193+++ b/arch/x86/include/asm/stackprotector.h
19194@@ -47,7 +47,7 @@
19195 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19196 */
19197 #define GDT_STACK_CANARY_INIT \
19198- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19199+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19200
19201 /*
19202 * Initialize the stackprotector canary value.
19203@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19204
19205 static inline void load_stack_canary_segment(void)
19206 {
19207-#ifdef CONFIG_X86_32
19208+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19209 asm volatile ("mov %0, %%gs" : : "r" (0));
19210 #endif
19211 }
19212diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19213index 70bbe39..4ae2bd4 100644
19214--- a/arch/x86/include/asm/stacktrace.h
19215+++ b/arch/x86/include/asm/stacktrace.h
19216@@ -11,28 +11,20 @@
19217
19218 extern int kstack_depth_to_print;
19219
19220-struct thread_info;
19221+struct task_struct;
19222 struct stacktrace_ops;
19223
19224-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19225- unsigned long *stack,
19226- unsigned long bp,
19227- const struct stacktrace_ops *ops,
19228- void *data,
19229- unsigned long *end,
19230- int *graph);
19231+typedef unsigned long walk_stack_t(struct task_struct *task,
19232+ void *stack_start,
19233+ unsigned long *stack,
19234+ unsigned long bp,
19235+ const struct stacktrace_ops *ops,
19236+ void *data,
19237+ unsigned long *end,
19238+ int *graph);
19239
19240-extern unsigned long
19241-print_context_stack(struct thread_info *tinfo,
19242- unsigned long *stack, unsigned long bp,
19243- const struct stacktrace_ops *ops, void *data,
19244- unsigned long *end, int *graph);
19245-
19246-extern unsigned long
19247-print_context_stack_bp(struct thread_info *tinfo,
19248- unsigned long *stack, unsigned long bp,
19249- const struct stacktrace_ops *ops, void *data,
19250- unsigned long *end, int *graph);
19251+extern walk_stack_t print_context_stack;
19252+extern walk_stack_t print_context_stack_bp;
19253
19254 /* Generic stack tracer with callbacks */
19255
19256@@ -40,7 +32,7 @@ struct stacktrace_ops {
19257 void (*address)(void *data, unsigned long address, int reliable);
19258 /* On negative return stop dumping */
19259 int (*stack)(void *data, char *name);
19260- walk_stack_t walk_stack;
19261+ walk_stack_t *walk_stack;
19262 };
19263
19264 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19265diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19266index 751bf4b..a1278b5 100644
19267--- a/arch/x86/include/asm/switch_to.h
19268+++ b/arch/x86/include/asm/switch_to.h
19269@@ -112,7 +112,7 @@ do { \
19270 "call __switch_to\n\t" \
19271 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19272 __switch_canary \
19273- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19274+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19275 "movq %%rax,%%rdi\n\t" \
19276 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19277 "jnz ret_from_fork\n\t" \
19278@@ -123,7 +123,7 @@ do { \
19279 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19280 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19281 [_tif_fork] "i" (_TIF_FORK), \
19282- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19283+ [thread_info] "m" (current_tinfo), \
19284 [current_task] "m" (current_task) \
19285 __switch_canary_iparam \
19286 : "memory", "cc" __EXTRA_CLOBBER)
19287diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19288index 1d4e4f2..506db18 100644
19289--- a/arch/x86/include/asm/thread_info.h
19290+++ b/arch/x86/include/asm/thread_info.h
19291@@ -24,7 +24,6 @@ struct exec_domain;
19292 #include <linux/atomic.h>
19293
19294 struct thread_info {
19295- struct task_struct *task; /* main task structure */
19296 struct exec_domain *exec_domain; /* execution domain */
19297 __u32 flags; /* low level flags */
19298 __u32 status; /* thread synchronous flags */
19299@@ -32,13 +31,13 @@ struct thread_info {
19300 int saved_preempt_count;
19301 mm_segment_t addr_limit;
19302 void __user *sysenter_return;
19303+ unsigned long lowest_stack;
19304 unsigned int sig_on_uaccess_error:1;
19305 unsigned int uaccess_err:1; /* uaccess failed */
19306 };
19307
19308-#define INIT_THREAD_INFO(tsk) \
19309+#define INIT_THREAD_INFO \
19310 { \
19311- .task = &tsk, \
19312 .exec_domain = &default_exec_domain, \
19313 .flags = 0, \
19314 .cpu = 0, \
19315@@ -46,7 +45,7 @@ struct thread_info {
19316 .addr_limit = KERNEL_DS, \
19317 }
19318
19319-#define init_thread_info (init_thread_union.thread_info)
19320+#define init_thread_info (init_thread_union.stack)
19321 #define init_stack (init_thread_union.stack)
19322
19323 #else /* !__ASSEMBLY__ */
19324@@ -86,6 +85,7 @@ struct thread_info {
19325 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19326 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19327 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19328+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19329
19330 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19331 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19332@@ -109,17 +109,18 @@ struct thread_info {
19333 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19334 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19335 #define _TIF_X32 (1 << TIF_X32)
19336+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19337
19338 /* work to do in syscall_trace_enter() */
19339 #define _TIF_WORK_SYSCALL_ENTRY \
19340 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19341 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19342- _TIF_NOHZ)
19343+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19344
19345 /* work to do in syscall_trace_leave() */
19346 #define _TIF_WORK_SYSCALL_EXIT \
19347 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19348- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19349+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19350
19351 /* work to do on interrupt/exception return */
19352 #define _TIF_WORK_MASK \
19353@@ -130,7 +131,7 @@ struct thread_info {
19354 /* work to do on any return to user space */
19355 #define _TIF_ALLWORK_MASK \
19356 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19357- _TIF_NOHZ)
19358+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19359
19360 /* Only used for 64 bit */
19361 #define _TIF_DO_NOTIFY_MASK \
19362@@ -145,7 +146,6 @@ struct thread_info {
19363 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19364
19365 #define STACK_WARN (THREAD_SIZE/8)
19366-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19367
19368 /*
19369 * macros/functions for gaining access to the thread information structure
19370@@ -156,12 +156,11 @@ struct thread_info {
19371
19372 DECLARE_PER_CPU(unsigned long, kernel_stack);
19373
19374+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19375+
19376 static inline struct thread_info *current_thread_info(void)
19377 {
19378- struct thread_info *ti;
19379- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19380- KERNEL_STACK_OFFSET - THREAD_SIZE);
19381- return ti;
19382+ return this_cpu_read_stable(current_tinfo);
19383 }
19384
19385 static inline unsigned long current_stack_pointer(void)
19386@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19387
19388 /* how to get the thread information struct from ASM */
19389 #define GET_THREAD_INFO(reg) \
19390- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19391- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19392-
19393-/*
19394- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19395- * a certain register (to be used in assembler memory operands).
19396- */
19397-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19398+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19399
19400 #endif
19401
19402@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19403 extern void arch_task_cache_init(void);
19404 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19405 extern void arch_release_task_struct(struct task_struct *tsk);
19406+
19407+#define __HAVE_THREAD_FUNCTIONS
19408+#define task_thread_info(task) (&(task)->tinfo)
19409+#define task_stack_page(task) ((task)->stack)
19410+#define setup_thread_stack(p, org) do {} while (0)
19411+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19412+
19413 #endif
19414 #endif /* _ASM_X86_THREAD_INFO_H */
19415diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19416index cd79194..e7a9491 100644
19417--- a/arch/x86/include/asm/tlbflush.h
19418+++ b/arch/x86/include/asm/tlbflush.h
19419@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19420
19421 static inline void __native_flush_tlb(void)
19422 {
19423+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19424+ u64 descriptor[2];
19425+
19426+ descriptor[0] = PCID_KERNEL;
19427+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19428+ return;
19429+ }
19430+
19431+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19432+ if (static_cpu_has(X86_FEATURE_PCID)) {
19433+ unsigned int cpu = raw_get_cpu();
19434+
19435+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19436+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19437+ raw_put_cpu_no_resched();
19438+ return;
19439+ }
19440+#endif
19441+
19442 native_write_cr3(native_read_cr3());
19443 }
19444
19445 static inline void __native_flush_tlb_global_irq_disabled(void)
19446 {
19447- unsigned long cr4;
19448+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19449+ u64 descriptor[2];
19450
19451- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19452- /* clear PGE */
19453- native_write_cr4(cr4 & ~X86_CR4_PGE);
19454- /* write old PGE again and flush TLBs */
19455- native_write_cr4(cr4);
19456+ descriptor[0] = PCID_KERNEL;
19457+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19458+ } else {
19459+ unsigned long cr4;
19460+
19461+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19462+ /* clear PGE */
19463+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19464+ /* write old PGE again and flush TLBs */
19465+ native_write_cr4(cr4);
19466+ }
19467 }
19468
19469 static inline void __native_flush_tlb_global(void)
19470@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19471
19472 static inline void __native_flush_tlb_single(unsigned long addr)
19473 {
19474+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19475+ u64 descriptor[2];
19476+
19477+ descriptor[0] = PCID_KERNEL;
19478+ descriptor[1] = addr;
19479+
19480+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19482+ if (addr < TASK_SIZE_MAX)
19483+ descriptor[1] += pax_user_shadow_base;
19484+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19485+ }
19486+
19487+ descriptor[0] = PCID_USER;
19488+ descriptor[1] = addr;
19489+#endif
19490+
19491+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19492+ return;
19493+ }
19494+
19495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19496+ if (static_cpu_has(X86_FEATURE_PCID)) {
19497+ unsigned int cpu = raw_get_cpu();
19498+
19499+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19500+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19501+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19502+ raw_put_cpu_no_resched();
19503+
19504+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19505+ addr += pax_user_shadow_base;
19506+ }
19507+#endif
19508+
19509 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19510 }
19511
19512diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19513index ace9dec..3f9e253 100644
19514--- a/arch/x86/include/asm/uaccess.h
19515+++ b/arch/x86/include/asm/uaccess.h
19516@@ -7,6 +7,7 @@
19517 #include <linux/compiler.h>
19518 #include <linux/thread_info.h>
19519 #include <linux/string.h>
19520+#include <linux/spinlock.h>
19521 #include <asm/asm.h>
19522 #include <asm/page.h>
19523 #include <asm/smap.h>
19524@@ -29,7 +30,12 @@
19525
19526 #define get_ds() (KERNEL_DS)
19527 #define get_fs() (current_thread_info()->addr_limit)
19528+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19529+void __set_fs(mm_segment_t x);
19530+void set_fs(mm_segment_t x);
19531+#else
19532 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19533+#endif
19534
19535 #define segment_eq(a, b) ((a).seg == (b).seg)
19536
19537@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19538 * checks that the pointer is in the user space range - after calling
19539 * this function, memory access functions may still return -EFAULT.
19540 */
19541-#define access_ok(type, addr, size) \
19542- likely(!__range_not_ok(addr, size, user_addr_max()))
19543+extern int _cond_resched(void);
19544+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19545+#define access_ok(type, addr, size) \
19546+({ \
19547+ unsigned long __size = size; \
19548+ unsigned long __addr = (unsigned long)addr; \
19549+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19550+ if (__ret_ao && __size) { \
19551+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19552+ unsigned long __end_ao = __addr + __size - 1; \
19553+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19554+ while (__addr_ao <= __end_ao) { \
19555+ char __c_ao; \
19556+ __addr_ao += PAGE_SIZE; \
19557+ if (__size > PAGE_SIZE) \
19558+ _cond_resched(); \
19559+ if (__get_user(__c_ao, (char __user *)__addr)) \
19560+ break; \
19561+ if (type != VERIFY_WRITE) { \
19562+ __addr = __addr_ao; \
19563+ continue; \
19564+ } \
19565+ if (__put_user(__c_ao, (char __user *)__addr)) \
19566+ break; \
19567+ __addr = __addr_ao; \
19568+ } \
19569+ } \
19570+ } \
19571+ __ret_ao; \
19572+})
19573
19574 /*
19575 * The exception table consists of pairs of addresses relative to the
19576@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19577 extern int __get_user_bad(void);
19578
19579 /*
19580- * This is a type: either unsigned long, if the argument fits into
19581- * that type, or otherwise unsigned long long.
19582+ * This is a type: either (un)signed int, if the argument fits into
19583+ * that type, or otherwise (un)signed long long.
19584 */
19585 #define __inttype(x) \
19586-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19587+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19588+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19589+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19590
19591 /**
19592 * get_user: - Get a simple variable from user space.
19593@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19594 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19595 __chk_user_ptr(ptr); \
19596 might_fault(); \
19597+ pax_open_userland(); \
19598 asm volatile("call __get_user_%P3" \
19599 : "=a" (__ret_gu), "=r" (__val_gu) \
19600 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19601 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19602+ pax_close_userland(); \
19603 __ret_gu; \
19604 })
19605
19606@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19607 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19608 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19609
19610-
19611+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19612+#define __copyuser_seg "gs;"
19613+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19614+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19615+#else
19616+#define __copyuser_seg
19617+#define __COPYUSER_SET_ES
19618+#define __COPYUSER_RESTORE_ES
19619+#endif
19620
19621 #ifdef CONFIG_X86_32
19622 #define __put_user_asm_u64(x, addr, err, errret) \
19623 asm volatile(ASM_STAC "\n" \
19624- "1: movl %%eax,0(%2)\n" \
19625- "2: movl %%edx,4(%2)\n" \
19626+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19627+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19628 "3: " ASM_CLAC "\n" \
19629 ".section .fixup,\"ax\"\n" \
19630 "4: movl %3,%0\n" \
19631@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19632
19633 #define __put_user_asm_ex_u64(x, addr) \
19634 asm volatile(ASM_STAC "\n" \
19635- "1: movl %%eax,0(%1)\n" \
19636- "2: movl %%edx,4(%1)\n" \
19637+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19638+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19639 "3: " ASM_CLAC "\n" \
19640 _ASM_EXTABLE_EX(1b, 2b) \
19641 _ASM_EXTABLE_EX(2b, 3b) \
19642@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19643 __typeof__(*(ptr)) __pu_val; \
19644 __chk_user_ptr(ptr); \
19645 might_fault(); \
19646- __pu_val = x; \
19647+ __pu_val = (x); \
19648+ pax_open_userland(); \
19649 switch (sizeof(*(ptr))) { \
19650 case 1: \
19651 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19652@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19653 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19654 break; \
19655 } \
19656+ pax_close_userland(); \
19657 __ret_pu; \
19658 })
19659
19660@@ -355,8 +403,10 @@ do { \
19661 } while (0)
19662
19663 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19664+do { \
19665+ pax_open_userland(); \
19666 asm volatile(ASM_STAC "\n" \
19667- "1: mov"itype" %2,%"rtype"1\n" \
19668+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19669 "2: " ASM_CLAC "\n" \
19670 ".section .fixup,\"ax\"\n" \
19671 "3: mov %3,%0\n" \
19672@@ -364,8 +414,10 @@ do { \
19673 " jmp 2b\n" \
19674 ".previous\n" \
19675 _ASM_EXTABLE(1b, 3b) \
19676- : "=r" (err), ltype(x) \
19677- : "m" (__m(addr)), "i" (errret), "0" (err))
19678+ : "=r" (err), ltype (x) \
19679+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19680+ pax_close_userland(); \
19681+} while (0)
19682
19683 #define __get_user_size_ex(x, ptr, size) \
19684 do { \
19685@@ -389,7 +441,7 @@ do { \
19686 } while (0)
19687
19688 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19689- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19690+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19691 "2:\n" \
19692 _ASM_EXTABLE_EX(1b, 2b) \
19693 : ltype(x) : "m" (__m(addr)))
19694@@ -406,13 +458,24 @@ do { \
19695 int __gu_err; \
19696 unsigned long __gu_val; \
19697 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19698- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19699+ (x) = (__typeof__(*(ptr)))__gu_val; \
19700 __gu_err; \
19701 })
19702
19703 /* FIXME: this hack is definitely wrong -AK */
19704 struct __large_struct { unsigned long buf[100]; };
19705-#define __m(x) (*(struct __large_struct __user *)(x))
19706+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19707+#define ____m(x) \
19708+({ \
19709+ unsigned long ____x = (unsigned long)(x); \
19710+ if (____x < pax_user_shadow_base) \
19711+ ____x += pax_user_shadow_base; \
19712+ (typeof(x))____x; \
19713+})
19714+#else
19715+#define ____m(x) (x)
19716+#endif
19717+#define __m(x) (*(struct __large_struct __user *)____m(x))
19718
19719 /*
19720 * Tell gcc we read from memory instead of writing: this is because
19721@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19722 * aliasing issues.
19723 */
19724 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19725+do { \
19726+ pax_open_userland(); \
19727 asm volatile(ASM_STAC "\n" \
19728- "1: mov"itype" %"rtype"1,%2\n" \
19729+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19730 "2: " ASM_CLAC "\n" \
19731 ".section .fixup,\"ax\"\n" \
19732 "3: mov %3,%0\n" \
19733@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19734 ".previous\n" \
19735 _ASM_EXTABLE(1b, 3b) \
19736 : "=r"(err) \
19737- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19738+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19739+ pax_close_userland(); \
19740+} while (0)
19741
19742 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19743- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19744+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19745 "2:\n" \
19746 _ASM_EXTABLE_EX(1b, 2b) \
19747 : : ltype(x), "m" (__m(addr)))
19748@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19749 */
19750 #define uaccess_try do { \
19751 current_thread_info()->uaccess_err = 0; \
19752+ pax_open_userland(); \
19753 stac(); \
19754 barrier();
19755
19756 #define uaccess_catch(err) \
19757 clac(); \
19758+ pax_close_userland(); \
19759 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19760 } while (0)
19761
19762@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19763 * On error, the variable @x is set to zero.
19764 */
19765
19766+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19767+#define __get_user(x, ptr) get_user((x), (ptr))
19768+#else
19769 #define __get_user(x, ptr) \
19770 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19771+#endif
19772
19773 /**
19774 * __put_user: - Write a simple value into user space, with less checking.
19775@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19776 * Returns zero on success, or -EFAULT on error.
19777 */
19778
19779+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19780+#define __put_user(x, ptr) put_user((x), (ptr))
19781+#else
19782 #define __put_user(x, ptr) \
19783 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19784+#endif
19785
19786 #define __get_user_unaligned __get_user
19787 #define __put_user_unaligned __put_user
19788@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19789 #define get_user_ex(x, ptr) do { \
19790 unsigned long __gue_val; \
19791 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19792- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19793+ (x) = (__typeof__(*(ptr)))__gue_val; \
19794 } while (0)
19795
19796 #define put_user_try uaccess_try
19797@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19798 extern __must_check long strnlen_user(const char __user *str, long n);
19799
19800 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19801-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19802+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19803
19804 extern void __cmpxchg_wrong_size(void)
19805 __compiletime_error("Bad argument size for cmpxchg");
19806@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19807 __typeof__(ptr) __uval = (uval); \
19808 __typeof__(*(ptr)) __old = (old); \
19809 __typeof__(*(ptr)) __new = (new); \
19810+ pax_open_userland(); \
19811 switch (size) { \
19812 case 1: \
19813 { \
19814 asm volatile("\t" ASM_STAC "\n" \
19815- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19816+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19817 "2:\t" ASM_CLAC "\n" \
19818 "\t.section .fixup, \"ax\"\n" \
19819 "3:\tmov %3, %0\n" \
19820 "\tjmp 2b\n" \
19821 "\t.previous\n" \
19822 _ASM_EXTABLE(1b, 3b) \
19823- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19824+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19825 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19826 : "memory" \
19827 ); \
19828@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19829 case 2: \
19830 { \
19831 asm volatile("\t" ASM_STAC "\n" \
19832- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19833+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19834 "2:\t" ASM_CLAC "\n" \
19835 "\t.section .fixup, \"ax\"\n" \
19836 "3:\tmov %3, %0\n" \
19837 "\tjmp 2b\n" \
19838 "\t.previous\n" \
19839 _ASM_EXTABLE(1b, 3b) \
19840- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19841+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19842 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19843 : "memory" \
19844 ); \
19845@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19846 case 4: \
19847 { \
19848 asm volatile("\t" ASM_STAC "\n" \
19849- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19850+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19851 "2:\t" ASM_CLAC "\n" \
19852 "\t.section .fixup, \"ax\"\n" \
19853 "3:\tmov %3, %0\n" \
19854 "\tjmp 2b\n" \
19855 "\t.previous\n" \
19856 _ASM_EXTABLE(1b, 3b) \
19857- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19858+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19859 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19860 : "memory" \
19861 ); \
19862@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19863 __cmpxchg_wrong_size(); \
19864 \
19865 asm volatile("\t" ASM_STAC "\n" \
19866- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19867+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19868 "2:\t" ASM_CLAC "\n" \
19869 "\t.section .fixup, \"ax\"\n" \
19870 "3:\tmov %3, %0\n" \
19871 "\tjmp 2b\n" \
19872 "\t.previous\n" \
19873 _ASM_EXTABLE(1b, 3b) \
19874- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19875+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19876 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19877 : "memory" \
19878 ); \
19879@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19880 default: \
19881 __cmpxchg_wrong_size(); \
19882 } \
19883+ pax_close_userland(); \
19884 *__uval = __old; \
19885 __ret; \
19886 })
19887@@ -636,17 +715,6 @@ extern struct movsl_mask {
19888
19889 #define ARCH_HAS_NOCACHE_UACCESS 1
19890
19891-#ifdef CONFIG_X86_32
19892-# include <asm/uaccess_32.h>
19893-#else
19894-# include <asm/uaccess_64.h>
19895-#endif
19896-
19897-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19898- unsigned n);
19899-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19900- unsigned n);
19901-
19902 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19903 # define copy_user_diag __compiletime_error
19904 #else
19905@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19906 extern void copy_user_diag("copy_from_user() buffer size is too small")
19907 copy_from_user_overflow(void);
19908 extern void copy_user_diag("copy_to_user() buffer size is too small")
19909-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19910+copy_to_user_overflow(void);
19911
19912 #undef copy_user_diag
19913
19914@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19915
19916 extern void
19917 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19918-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19919+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19920 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19921
19922 #else
19923@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19924
19925 #endif
19926
19927+#ifdef CONFIG_X86_32
19928+# include <asm/uaccess_32.h>
19929+#else
19930+# include <asm/uaccess_64.h>
19931+#endif
19932+
19933 static inline unsigned long __must_check
19934 copy_from_user(void *to, const void __user *from, unsigned long n)
19935 {
19936- int sz = __compiletime_object_size(to);
19937+ size_t sz = __compiletime_object_size(to);
19938
19939 might_fault();
19940
19941@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19942 * case, and do only runtime checking for non-constant sizes.
19943 */
19944
19945- if (likely(sz < 0 || sz >= n))
19946- n = _copy_from_user(to, from, n);
19947- else if(__builtin_constant_p(n))
19948- copy_from_user_overflow();
19949- else
19950- __copy_from_user_overflow(sz, n);
19951+ if (likely(sz != (size_t)-1 && sz < n)) {
19952+ if(__builtin_constant_p(n))
19953+ copy_from_user_overflow();
19954+ else
19955+ __copy_from_user_overflow(sz, n);
19956+ } else if (access_ok(VERIFY_READ, from, n))
19957+ n = __copy_from_user(to, from, n);
19958+ else if ((long)n > 0)
19959+ memset(to, 0, n);
19960
19961 return n;
19962 }
19963@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19964 static inline unsigned long __must_check
19965 copy_to_user(void __user *to, const void *from, unsigned long n)
19966 {
19967- int sz = __compiletime_object_size(from);
19968+ size_t sz = __compiletime_object_size(from);
19969
19970 might_fault();
19971
19972 /* See the comment in copy_from_user() above. */
19973- if (likely(sz < 0 || sz >= n))
19974- n = _copy_to_user(to, from, n);
19975- else if(__builtin_constant_p(n))
19976- copy_to_user_overflow();
19977- else
19978- __copy_to_user_overflow(sz, n);
19979+ if (likely(sz != (size_t)-1 && sz < n)) {
19980+ if(__builtin_constant_p(n))
19981+ copy_to_user_overflow();
19982+ else
19983+ __copy_to_user_overflow(sz, n);
19984+ } else if (access_ok(VERIFY_WRITE, to, n))
19985+ n = __copy_to_user(to, from, n);
19986
19987 return n;
19988 }
19989diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19990index 3c03a5d..edb68ae 100644
19991--- a/arch/x86/include/asm/uaccess_32.h
19992+++ b/arch/x86/include/asm/uaccess_32.h
19993@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19994 * anything, so this is accurate.
19995 */
19996
19997-static __always_inline unsigned long __must_check
19998+static __always_inline __size_overflow(3) unsigned long __must_check
19999 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20000 {
20001+ if ((long)n < 0)
20002+ return n;
20003+
20004+ check_object_size(from, n, true);
20005+
20006 if (__builtin_constant_p(n)) {
20007 unsigned long ret;
20008
20009@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20010 __copy_to_user(void __user *to, const void *from, unsigned long n)
20011 {
20012 might_fault();
20013+
20014 return __copy_to_user_inatomic(to, from, n);
20015 }
20016
20017-static __always_inline unsigned long
20018+static __always_inline __size_overflow(3) unsigned long
20019 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20020 {
20021+ if ((long)n < 0)
20022+ return n;
20023+
20024 /* Avoid zeroing the tail if the copy fails..
20025 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20026 * but as the zeroing behaviour is only significant when n is not
20027@@ -137,6 +146,12 @@ static __always_inline unsigned long
20028 __copy_from_user(void *to, const void __user *from, unsigned long n)
20029 {
20030 might_fault();
20031+
20032+ if ((long)n < 0)
20033+ return n;
20034+
20035+ check_object_size(to, n, false);
20036+
20037 if (__builtin_constant_p(n)) {
20038 unsigned long ret;
20039
20040@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20041 const void __user *from, unsigned long n)
20042 {
20043 might_fault();
20044+
20045+ if ((long)n < 0)
20046+ return n;
20047+
20048 if (__builtin_constant_p(n)) {
20049 unsigned long ret;
20050
20051@@ -181,7 +200,10 @@ static __always_inline unsigned long
20052 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20053 unsigned long n)
20054 {
20055- return __copy_from_user_ll_nocache_nozero(to, from, n);
20056+ if ((long)n < 0)
20057+ return n;
20058+
20059+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20060 }
20061
20062 #endif /* _ASM_X86_UACCESS_32_H */
20063diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20064index f2f9b39..2ae1bf8 100644
20065--- a/arch/x86/include/asm/uaccess_64.h
20066+++ b/arch/x86/include/asm/uaccess_64.h
20067@@ -10,6 +10,9 @@
20068 #include <asm/alternative.h>
20069 #include <asm/cpufeature.h>
20070 #include <asm/page.h>
20071+#include <asm/pgtable.h>
20072+
20073+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20074
20075 /*
20076 * Copy To/From Userspace
20077@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20078 __must_check unsigned long
20079 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20080
20081-static __always_inline __must_check unsigned long
20082-copy_user_generic(void *to, const void *from, unsigned len)
20083+static __always_inline __must_check __size_overflow(3) unsigned long
20084+copy_user_generic(void *to, const void *from, unsigned long len)
20085 {
20086 unsigned ret;
20087
20088@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20089 }
20090
20091 __must_check unsigned long
20092-copy_in_user(void __user *to, const void __user *from, unsigned len);
20093+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20094
20095 static __always_inline __must_check
20096-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20097+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20098 {
20099- int ret = 0;
20100+ size_t sz = __compiletime_object_size(dst);
20101+ unsigned ret = 0;
20102+
20103+ if (size > INT_MAX)
20104+ return size;
20105+
20106+ check_object_size(dst, size, false);
20107+
20108+#ifdef CONFIG_PAX_MEMORY_UDEREF
20109+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20110+ return size;
20111+#endif
20112+
20113+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20114+ if(__builtin_constant_p(size))
20115+ copy_from_user_overflow();
20116+ else
20117+ __copy_from_user_overflow(sz, size);
20118+ return size;
20119+ }
20120
20121 if (!__builtin_constant_p(size))
20122- return copy_user_generic(dst, (__force void *)src, size);
20123+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20124 switch (size) {
20125- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20126+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20127 ret, "b", "b", "=q", 1);
20128 return ret;
20129- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20130+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20131 ret, "w", "w", "=r", 2);
20132 return ret;
20133- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20134+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20135 ret, "l", "k", "=r", 4);
20136 return ret;
20137- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20138+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20139 ret, "q", "", "=r", 8);
20140 return ret;
20141 case 10:
20142- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20143+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20144 ret, "q", "", "=r", 10);
20145 if (unlikely(ret))
20146 return ret;
20147 __get_user_asm(*(u16 *)(8 + (char *)dst),
20148- (u16 __user *)(8 + (char __user *)src),
20149+ (const u16 __user *)(8 + (const char __user *)src),
20150 ret, "w", "w", "=r", 2);
20151 return ret;
20152 case 16:
20153- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20154+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20155 ret, "q", "", "=r", 16);
20156 if (unlikely(ret))
20157 return ret;
20158 __get_user_asm(*(u64 *)(8 + (char *)dst),
20159- (u64 __user *)(8 + (char __user *)src),
20160+ (const u64 __user *)(8 + (const char __user *)src),
20161 ret, "q", "", "=r", 8);
20162 return ret;
20163 default:
20164- return copy_user_generic(dst, (__force void *)src, size);
20165+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20166 }
20167 }
20168
20169 static __always_inline __must_check
20170-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20171+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20172 {
20173 might_fault();
20174 return __copy_from_user_nocheck(dst, src, size);
20175 }
20176
20177 static __always_inline __must_check
20178-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20179+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20180 {
20181- int ret = 0;
20182+ size_t sz = __compiletime_object_size(src);
20183+ unsigned ret = 0;
20184+
20185+ if (size > INT_MAX)
20186+ return size;
20187+
20188+ check_object_size(src, size, true);
20189+
20190+#ifdef CONFIG_PAX_MEMORY_UDEREF
20191+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20192+ return size;
20193+#endif
20194+
20195+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20196+ if(__builtin_constant_p(size))
20197+ copy_to_user_overflow();
20198+ else
20199+ __copy_to_user_overflow(sz, size);
20200+ return size;
20201+ }
20202
20203 if (!__builtin_constant_p(size))
20204- return copy_user_generic((__force void *)dst, src, size);
20205+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20206 switch (size) {
20207- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20208+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20209 ret, "b", "b", "iq", 1);
20210 return ret;
20211- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20212+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20213 ret, "w", "w", "ir", 2);
20214 return ret;
20215- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20216+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20217 ret, "l", "k", "ir", 4);
20218 return ret;
20219- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20220+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20221 ret, "q", "", "er", 8);
20222 return ret;
20223 case 10:
20224- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20225+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20226 ret, "q", "", "er", 10);
20227 if (unlikely(ret))
20228 return ret;
20229 asm("":::"memory");
20230- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20231+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20232 ret, "w", "w", "ir", 2);
20233 return ret;
20234 case 16:
20235- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20236+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20237 ret, "q", "", "er", 16);
20238 if (unlikely(ret))
20239 return ret;
20240 asm("":::"memory");
20241- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20242+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20243 ret, "q", "", "er", 8);
20244 return ret;
20245 default:
20246- return copy_user_generic((__force void *)dst, src, size);
20247+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20248 }
20249 }
20250
20251 static __always_inline __must_check
20252-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20253+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20254 {
20255 might_fault();
20256 return __copy_to_user_nocheck(dst, src, size);
20257 }
20258
20259 static __always_inline __must_check
20260-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20261+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20262 {
20263- int ret = 0;
20264+ unsigned ret = 0;
20265
20266 might_fault();
20267+
20268+ if (size > INT_MAX)
20269+ return size;
20270+
20271+#ifdef CONFIG_PAX_MEMORY_UDEREF
20272+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20273+ return size;
20274+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20275+ return size;
20276+#endif
20277+
20278 if (!__builtin_constant_p(size))
20279- return copy_user_generic((__force void *)dst,
20280- (__force void *)src, size);
20281+ return copy_user_generic((__force_kernel void *)____m(dst),
20282+ (__force_kernel const void *)____m(src), size);
20283 switch (size) {
20284 case 1: {
20285 u8 tmp;
20286- __get_user_asm(tmp, (u8 __user *)src,
20287+ __get_user_asm(tmp, (const u8 __user *)src,
20288 ret, "b", "b", "=q", 1);
20289 if (likely(!ret))
20290 __put_user_asm(tmp, (u8 __user *)dst,
20291@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20292 }
20293 case 2: {
20294 u16 tmp;
20295- __get_user_asm(tmp, (u16 __user *)src,
20296+ __get_user_asm(tmp, (const u16 __user *)src,
20297 ret, "w", "w", "=r", 2);
20298 if (likely(!ret))
20299 __put_user_asm(tmp, (u16 __user *)dst,
20300@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20301
20302 case 4: {
20303 u32 tmp;
20304- __get_user_asm(tmp, (u32 __user *)src,
20305+ __get_user_asm(tmp, (const u32 __user *)src,
20306 ret, "l", "k", "=r", 4);
20307 if (likely(!ret))
20308 __put_user_asm(tmp, (u32 __user *)dst,
20309@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20310 }
20311 case 8: {
20312 u64 tmp;
20313- __get_user_asm(tmp, (u64 __user *)src,
20314+ __get_user_asm(tmp, (const u64 __user *)src,
20315 ret, "q", "", "=r", 8);
20316 if (likely(!ret))
20317 __put_user_asm(tmp, (u64 __user *)dst,
20318@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20319 return ret;
20320 }
20321 default:
20322- return copy_user_generic((__force void *)dst,
20323- (__force void *)src, size);
20324+ return copy_user_generic((__force_kernel void *)____m(dst),
20325+ (__force_kernel const void *)____m(src), size);
20326 }
20327 }
20328
20329-static __must_check __always_inline int
20330-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20331+static __must_check __always_inline unsigned long
20332+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20333 {
20334 return __copy_from_user_nocheck(dst, src, size);
20335 }
20336
20337-static __must_check __always_inline int
20338-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20339+static __must_check __always_inline unsigned long
20340+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20341 {
20342 return __copy_to_user_nocheck(dst, src, size);
20343 }
20344
20345-extern long __copy_user_nocache(void *dst, const void __user *src,
20346- unsigned size, int zerorest);
20347+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20348+ unsigned long size, int zerorest);
20349
20350-static inline int
20351-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20352+static inline unsigned long
20353+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20354 {
20355 might_fault();
20356+
20357+ if (size > INT_MAX)
20358+ return size;
20359+
20360+#ifdef CONFIG_PAX_MEMORY_UDEREF
20361+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20362+ return size;
20363+#endif
20364+
20365 return __copy_user_nocache(dst, src, size, 1);
20366 }
20367
20368-static inline int
20369+static inline unsigned long
20370 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20371- unsigned size)
20372+ unsigned long size)
20373 {
20374+ if (size > INT_MAX)
20375+ return size;
20376+
20377+#ifdef CONFIG_PAX_MEMORY_UDEREF
20378+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20379+ return size;
20380+#endif
20381+
20382 return __copy_user_nocache(dst, src, size, 0);
20383 }
20384
20385 unsigned long
20386-copy_user_handle_tail(char *to, char *from, unsigned len);
20387+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20388
20389 #endif /* _ASM_X86_UACCESS_64_H */
20390diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20391index 5b238981..77fdd78 100644
20392--- a/arch/x86/include/asm/word-at-a-time.h
20393+++ b/arch/x86/include/asm/word-at-a-time.h
20394@@ -11,7 +11,7 @@
20395 * and shift, for example.
20396 */
20397 struct word_at_a_time {
20398- const unsigned long one_bits, high_bits;
20399+ unsigned long one_bits, high_bits;
20400 };
20401
20402 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20403diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20404index f58a9c7..dc378042a 100644
20405--- a/arch/x86/include/asm/x86_init.h
20406+++ b/arch/x86/include/asm/x86_init.h
20407@@ -129,7 +129,7 @@ struct x86_init_ops {
20408 struct x86_init_timers timers;
20409 struct x86_init_iommu iommu;
20410 struct x86_init_pci pci;
20411-};
20412+} __no_const;
20413
20414 /**
20415 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20416@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20417 void (*setup_percpu_clockev)(void);
20418 void (*early_percpu_clock_init)(void);
20419 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20420-};
20421+} __no_const;
20422
20423 struct timespec;
20424
20425@@ -168,7 +168,7 @@ struct x86_platform_ops {
20426 void (*save_sched_clock_state)(void);
20427 void (*restore_sched_clock_state)(void);
20428 void (*apic_post_init)(void);
20429-};
20430+} __no_const;
20431
20432 struct pci_dev;
20433 struct msi_msg;
20434@@ -182,7 +182,7 @@ struct x86_msi_ops {
20435 void (*teardown_msi_irqs)(struct pci_dev *dev);
20436 void (*restore_msi_irqs)(struct pci_dev *dev);
20437 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20438-};
20439+} __no_const;
20440
20441 struct IO_APIC_route_entry;
20442 struct io_apic_irq_attr;
20443@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20444 unsigned int destination, int vector,
20445 struct io_apic_irq_attr *attr);
20446 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20447-};
20448+} __no_const;
20449
20450 extern struct x86_init_ops x86_init;
20451 extern struct x86_cpuinit_ops x86_cpuinit;
20452diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20453index 358dcd3..23c0bf1 100644
20454--- a/arch/x86/include/asm/xen/page.h
20455+++ b/arch/x86/include/asm/xen/page.h
20456@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20457 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20458 * cases needing an extended handling.
20459 */
20460-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20461+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20462 {
20463 unsigned long mfn;
20464
20465diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20466index c9a6d68..cb57f42 100644
20467--- a/arch/x86/include/asm/xsave.h
20468+++ b/arch/x86/include/asm/xsave.h
20469@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20470 if (unlikely(err))
20471 return -EFAULT;
20472
20473+ pax_open_userland();
20474 __asm__ __volatile__(ASM_STAC "\n"
20475- "1:"XSAVE"\n"
20476+ "1:"
20477+ __copyuser_seg
20478+ XSAVE"\n"
20479 "2: " ASM_CLAC "\n"
20480 xstate_fault
20481 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20482 : "memory");
20483+ pax_close_userland();
20484 return err;
20485 }
20486
20487@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20488 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20489 {
20490 int err = 0;
20491- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20492+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20493 u32 lmask = mask;
20494 u32 hmask = mask >> 32;
20495
20496+ pax_open_userland();
20497 __asm__ __volatile__(ASM_STAC "\n"
20498- "1:"XRSTOR"\n"
20499+ "1:"
20500+ __copyuser_seg
20501+ XRSTOR"\n"
20502 "2: " ASM_CLAC "\n"
20503 xstate_fault
20504 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20505 : "memory"); /* memory required? */
20506+ pax_close_userland();
20507 return err;
20508 }
20509
20510diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20511index d993e33..8db1b18 100644
20512--- a/arch/x86/include/uapi/asm/e820.h
20513+++ b/arch/x86/include/uapi/asm/e820.h
20514@@ -58,7 +58,7 @@ struct e820map {
20515 #define ISA_START_ADDRESS 0xa0000
20516 #define ISA_END_ADDRESS 0x100000
20517
20518-#define BIOS_BEGIN 0x000a0000
20519+#define BIOS_BEGIN 0x000c0000
20520 #define BIOS_END 0x00100000
20521
20522 #define BIOS_ROM_BASE 0xffe00000
20523diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20524index 7b0a55a..ad115bf 100644
20525--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20526+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20527@@ -49,7 +49,6 @@
20528 #define EFLAGS 144
20529 #define RSP 152
20530 #define SS 160
20531-#define ARGOFFSET R11
20532 #endif /* __ASSEMBLY__ */
20533
20534 /* top of stack page */
20535diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20536index cdb1b70..426434c 100644
20537--- a/arch/x86/kernel/Makefile
20538+++ b/arch/x86/kernel/Makefile
20539@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20540 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20541 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20542 obj-y += probe_roms.o
20543-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20544+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20545 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20546 obj-$(CONFIG_X86_64) += mcount_64.o
20547 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20548diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20549index 803b684..68c64f1 100644
20550--- a/arch/x86/kernel/acpi/boot.c
20551+++ b/arch/x86/kernel/acpi/boot.c
20552@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20553 * If your system is blacklisted here, but you find that acpi=force
20554 * works for you, please contact linux-acpi@vger.kernel.org
20555 */
20556-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20557+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20558 /*
20559 * Boxes that need ACPI disabled
20560 */
20561@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20562 };
20563
20564 /* second table for DMI checks that should run after early-quirks */
20565-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20566+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20567 /*
20568 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20569 * which includes some code which overrides all temperature
20570diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20571index d1daead..acd77e2 100644
20572--- a/arch/x86/kernel/acpi/sleep.c
20573+++ b/arch/x86/kernel/acpi/sleep.c
20574@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20575 #else /* CONFIG_64BIT */
20576 #ifdef CONFIG_SMP
20577 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20578+
20579+ pax_open_kernel();
20580 early_gdt_descr.address =
20581 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20582+ pax_close_kernel();
20583+
20584 initial_gs = per_cpu_offset(smp_processor_id());
20585 #endif
20586 initial_code = (unsigned long)wakeup_long64;
20587diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20588index 665c6b7..eae4d56 100644
20589--- a/arch/x86/kernel/acpi/wakeup_32.S
20590+++ b/arch/x86/kernel/acpi/wakeup_32.S
20591@@ -29,13 +29,11 @@ wakeup_pmode_return:
20592 # and restore the stack ... but you need gdt for this to work
20593 movl saved_context_esp, %esp
20594
20595- movl %cs:saved_magic, %eax
20596- cmpl $0x12345678, %eax
20597+ cmpl $0x12345678, saved_magic
20598 jne bogus_magic
20599
20600 # jump to place where we left off
20601- movl saved_eip, %eax
20602- jmp *%eax
20603+ jmp *(saved_eip)
20604
20605 bogus_magic:
20606 jmp bogus_magic
20607diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20608index 703130f..27a155d 100644
20609--- a/arch/x86/kernel/alternative.c
20610+++ b/arch/x86/kernel/alternative.c
20611@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20612 */
20613 for (a = start; a < end; a++) {
20614 instr = (u8 *)&a->instr_offset + a->instr_offset;
20615+
20616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20617+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20618+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20619+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20620+#endif
20621+
20622 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20623 BUG_ON(a->replacementlen > a->instrlen);
20624 BUG_ON(a->instrlen > sizeof(insnbuf));
20625@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20626 add_nops(insnbuf + a->replacementlen,
20627 a->instrlen - a->replacementlen);
20628
20629+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20630+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20631+ instr = ktva_ktla(instr);
20632+#endif
20633+
20634 text_poke_early(instr, insnbuf, a->instrlen);
20635 }
20636 }
20637@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20638 for (poff = start; poff < end; poff++) {
20639 u8 *ptr = (u8 *)poff + *poff;
20640
20641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20642+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20643+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20644+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20645+#endif
20646+
20647 if (!*poff || ptr < text || ptr >= text_end)
20648 continue;
20649 /* turn DS segment override prefix into lock prefix */
20650- if (*ptr == 0x3e)
20651+ if (*ktla_ktva(ptr) == 0x3e)
20652 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20653 }
20654 mutex_unlock(&text_mutex);
20655@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20656 for (poff = start; poff < end; poff++) {
20657 u8 *ptr = (u8 *)poff + *poff;
20658
20659+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20660+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20661+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20662+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20663+#endif
20664+
20665 if (!*poff || ptr < text || ptr >= text_end)
20666 continue;
20667 /* turn lock prefix into DS segment override prefix */
20668- if (*ptr == 0xf0)
20669+ if (*ktla_ktva(ptr) == 0xf0)
20670 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20671 }
20672 mutex_unlock(&text_mutex);
20673@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20674
20675 BUG_ON(p->len > MAX_PATCH_LEN);
20676 /* prep the buffer with the original instructions */
20677- memcpy(insnbuf, p->instr, p->len);
20678+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20679 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20680 (unsigned long)p->instr, p->len);
20681
20682@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20683 if (!uniproc_patched || num_possible_cpus() == 1)
20684 free_init_pages("SMP alternatives",
20685 (unsigned long)__smp_locks,
20686- (unsigned long)__smp_locks_end);
20687+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20688 #endif
20689
20690 apply_paravirt(__parainstructions, __parainstructions_end);
20691@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20692 * instructions. And on the local CPU you need to be protected again NMI or MCE
20693 * handlers seeing an inconsistent instruction while you patch.
20694 */
20695-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20696+void *__kprobes text_poke_early(void *addr, const void *opcode,
20697 size_t len)
20698 {
20699 unsigned long flags;
20700 local_irq_save(flags);
20701- memcpy(addr, opcode, len);
20702+
20703+ pax_open_kernel();
20704+ memcpy(ktla_ktva(addr), opcode, len);
20705 sync_core();
20706+ pax_close_kernel();
20707+
20708 local_irq_restore(flags);
20709 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20710 that causes hangs on some VIA CPUs. */
20711@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20712 */
20713 void *text_poke(void *addr, const void *opcode, size_t len)
20714 {
20715- unsigned long flags;
20716- char *vaddr;
20717+ unsigned char *vaddr = ktla_ktva(addr);
20718 struct page *pages[2];
20719- int i;
20720+ size_t i;
20721
20722 if (!core_kernel_text((unsigned long)addr)) {
20723- pages[0] = vmalloc_to_page(addr);
20724- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20725+ pages[0] = vmalloc_to_page(vaddr);
20726+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20727 } else {
20728- pages[0] = virt_to_page(addr);
20729+ pages[0] = virt_to_page(vaddr);
20730 WARN_ON(!PageReserved(pages[0]));
20731- pages[1] = virt_to_page(addr + PAGE_SIZE);
20732+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20733 }
20734 BUG_ON(!pages[0]);
20735- local_irq_save(flags);
20736- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20737- if (pages[1])
20738- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20739- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20740- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20741- clear_fixmap(FIX_TEXT_POKE0);
20742- if (pages[1])
20743- clear_fixmap(FIX_TEXT_POKE1);
20744- local_flush_tlb();
20745- sync_core();
20746- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20747- that causes hangs on some VIA CPUs. */
20748+ text_poke_early(addr, opcode, len);
20749 for (i = 0; i < len; i++)
20750- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20751- local_irq_restore(flags);
20752+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20753 return addr;
20754 }
20755
20756@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20757 if (likely(!bp_patching_in_progress))
20758 return 0;
20759
20760- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20761+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20762 return 0;
20763
20764 /* set up the specified breakpoint handler */
20765@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20766 */
20767 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20768 {
20769- unsigned char int3 = 0xcc;
20770+ const unsigned char int3 = 0xcc;
20771
20772 bp_int3_handler = handler;
20773 bp_int3_addr = (u8 *)addr + sizeof(int3);
20774diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20775index ad3639a..bd4253c 100644
20776--- a/arch/x86/kernel/apic/apic.c
20777+++ b/arch/x86/kernel/apic/apic.c
20778@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20779 /*
20780 * Debug level, exported for io_apic.c
20781 */
20782-unsigned int apic_verbosity;
20783+int apic_verbosity;
20784
20785 int pic_mode;
20786
20787@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20788 apic_write(APIC_ESR, 0);
20789 v = apic_read(APIC_ESR);
20790 ack_APIC_irq();
20791- atomic_inc(&irq_err_count);
20792+ atomic_inc_unchecked(&irq_err_count);
20793
20794 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20795 smp_processor_id(), v);
20796diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20797index de918c4..32eed23 100644
20798--- a/arch/x86/kernel/apic/apic_flat_64.c
20799+++ b/arch/x86/kernel/apic/apic_flat_64.c
20800@@ -154,7 +154,7 @@ static int flat_probe(void)
20801 return 1;
20802 }
20803
20804-static struct apic apic_flat = {
20805+static struct apic apic_flat __read_only = {
20806 .name = "flat",
20807 .probe = flat_probe,
20808 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20809@@ -260,7 +260,7 @@ static int physflat_probe(void)
20810 return 0;
20811 }
20812
20813-static struct apic apic_physflat = {
20814+static struct apic apic_physflat __read_only = {
20815
20816 .name = "physical flat",
20817 .probe = physflat_probe,
20818diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20819index b205cdb..d8503ff 100644
20820--- a/arch/x86/kernel/apic/apic_noop.c
20821+++ b/arch/x86/kernel/apic/apic_noop.c
20822@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20823 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20824 }
20825
20826-struct apic apic_noop = {
20827+struct apic apic_noop __read_only = {
20828 .name = "noop",
20829 .probe = noop_probe,
20830 .acpi_madt_oem_check = NULL,
20831diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20832index c4a8d63..fe893ac 100644
20833--- a/arch/x86/kernel/apic/bigsmp_32.c
20834+++ b/arch/x86/kernel/apic/bigsmp_32.c
20835@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20836 return dmi_bigsmp;
20837 }
20838
20839-static struct apic apic_bigsmp = {
20840+static struct apic apic_bigsmp __read_only = {
20841
20842 .name = "bigsmp",
20843 .probe = probe_bigsmp,
20844diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20845index f4dc246..fbab133 100644
20846--- a/arch/x86/kernel/apic/io_apic.c
20847+++ b/arch/x86/kernel/apic/io_apic.c
20848@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20849 return ret;
20850 }
20851
20852-atomic_t irq_mis_count;
20853+atomic_unchecked_t irq_mis_count;
20854
20855 #ifdef CONFIG_GENERIC_PENDING_IRQ
20856 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20857@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20858 * at the cpu.
20859 */
20860 if (!(v & (1 << (i & 0x1f)))) {
20861- atomic_inc(&irq_mis_count);
20862+ atomic_inc_unchecked(&irq_mis_count);
20863
20864 eoi_ioapic_irq(irq, cfg);
20865 }
20866@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20867 ioapic_irqd_unmask(data, cfg, masked);
20868 }
20869
20870-static struct irq_chip ioapic_chip __read_mostly = {
20871+static struct irq_chip ioapic_chip = {
20872 .name = "IO-APIC",
20873 .irq_startup = startup_ioapic_irq,
20874 .irq_mask = mask_ioapic_irq,
20875@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20876 ack_APIC_irq();
20877 }
20878
20879-static struct irq_chip lapic_chip __read_mostly = {
20880+static struct irq_chip lapic_chip = {
20881 .name = "local-APIC",
20882 .irq_mask = mask_lapic_irq,
20883 .irq_unmask = unmask_lapic_irq,
20884diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20885index bda4886..f9c7195 100644
20886--- a/arch/x86/kernel/apic/probe_32.c
20887+++ b/arch/x86/kernel/apic/probe_32.c
20888@@ -72,7 +72,7 @@ static int probe_default(void)
20889 return 1;
20890 }
20891
20892-static struct apic apic_default = {
20893+static struct apic apic_default __read_only = {
20894
20895 .name = "default",
20896 .probe = probe_default,
20897diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20898index 6cedd79..023ff8e 100644
20899--- a/arch/x86/kernel/apic/vector.c
20900+++ b/arch/x86/kernel/apic/vector.c
20901@@ -21,7 +21,7 @@
20902
20903 static DEFINE_RAW_SPINLOCK(vector_lock);
20904
20905-void lock_vector_lock(void)
20906+void lock_vector_lock(void) __acquires(vector_lock)
20907 {
20908 /* Used to the online set of cpus does not change
20909 * during assign_irq_vector.
20910@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20911 raw_spin_lock(&vector_lock);
20912 }
20913
20914-void unlock_vector_lock(void)
20915+void unlock_vector_lock(void) __releases(vector_lock)
20916 {
20917 raw_spin_unlock(&vector_lock);
20918 }
20919diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20920index e658f21..b695a1a 100644
20921--- a/arch/x86/kernel/apic/x2apic_cluster.c
20922+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20923@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20924 return notifier_from_errno(err);
20925 }
20926
20927-static struct notifier_block __refdata x2apic_cpu_notifier = {
20928+static struct notifier_block x2apic_cpu_notifier = {
20929 .notifier_call = update_clusterinfo,
20930 };
20931
20932@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20933 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20934 }
20935
20936-static struct apic apic_x2apic_cluster = {
20937+static struct apic apic_x2apic_cluster __read_only = {
20938
20939 .name = "cluster x2apic",
20940 .probe = x2apic_cluster_probe,
20941diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20942index 6fae733..5ca17af 100644
20943--- a/arch/x86/kernel/apic/x2apic_phys.c
20944+++ b/arch/x86/kernel/apic/x2apic_phys.c
20945@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20946 return apic == &apic_x2apic_phys;
20947 }
20948
20949-static struct apic apic_x2apic_phys = {
20950+static struct apic apic_x2apic_phys __read_only = {
20951
20952 .name = "physical x2apic",
20953 .probe = x2apic_phys_probe,
20954diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20955index 8e9dcfd..c61b3e4 100644
20956--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20957+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20958@@ -348,7 +348,7 @@ static int uv_probe(void)
20959 return apic == &apic_x2apic_uv_x;
20960 }
20961
20962-static struct apic __refdata apic_x2apic_uv_x = {
20963+static struct apic apic_x2apic_uv_x __read_only = {
20964
20965 .name = "UV large system",
20966 .probe = uv_probe,
20967diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20968index 927ec92..de68f32 100644
20969--- a/arch/x86/kernel/apm_32.c
20970+++ b/arch/x86/kernel/apm_32.c
20971@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20972 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20973 * even though they are called in protected mode.
20974 */
20975-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20976+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20977 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20978
20979 static const char driver_version[] = "1.16ac"; /* no spaces */
20980@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20981 BUG_ON(cpu != 0);
20982 gdt = get_cpu_gdt_table(cpu);
20983 save_desc_40 = gdt[0x40 / 8];
20984+
20985+ pax_open_kernel();
20986 gdt[0x40 / 8] = bad_bios_desc;
20987+ pax_close_kernel();
20988
20989 apm_irq_save(flags);
20990 APM_DO_SAVE_SEGS;
20991@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20992 &call->esi);
20993 APM_DO_RESTORE_SEGS;
20994 apm_irq_restore(flags);
20995+
20996+ pax_open_kernel();
20997 gdt[0x40 / 8] = save_desc_40;
20998+ pax_close_kernel();
20999+
21000 put_cpu();
21001
21002 return call->eax & 0xff;
21003@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21004 BUG_ON(cpu != 0);
21005 gdt = get_cpu_gdt_table(cpu);
21006 save_desc_40 = gdt[0x40 / 8];
21007+
21008+ pax_open_kernel();
21009 gdt[0x40 / 8] = bad_bios_desc;
21010+ pax_close_kernel();
21011
21012 apm_irq_save(flags);
21013 APM_DO_SAVE_SEGS;
21014@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21015 &call->eax);
21016 APM_DO_RESTORE_SEGS;
21017 apm_irq_restore(flags);
21018+
21019+ pax_open_kernel();
21020 gdt[0x40 / 8] = save_desc_40;
21021+ pax_close_kernel();
21022+
21023 put_cpu();
21024 return error;
21025 }
21026@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21027 return 0;
21028 }
21029
21030-static struct dmi_system_id __initdata apm_dmi_table[] = {
21031+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21032 {
21033 print_if_true,
21034 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21035@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21036 * code to that CPU.
21037 */
21038 gdt = get_cpu_gdt_table(0);
21039+
21040+ pax_open_kernel();
21041 set_desc_base(&gdt[APM_CS >> 3],
21042 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21043 set_desc_base(&gdt[APM_CS_16 >> 3],
21044 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21045 set_desc_base(&gdt[APM_DS >> 3],
21046 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21047+ pax_close_kernel();
21048
21049 proc_create("apm", 0, NULL, &apm_file_ops);
21050
21051diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21052index 9f6b934..cf5ffb3 100644
21053--- a/arch/x86/kernel/asm-offsets.c
21054+++ b/arch/x86/kernel/asm-offsets.c
21055@@ -32,6 +32,8 @@ void common(void) {
21056 OFFSET(TI_flags, thread_info, flags);
21057 OFFSET(TI_status, thread_info, status);
21058 OFFSET(TI_addr_limit, thread_info, addr_limit);
21059+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21060+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21061
21062 BLANK();
21063 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21064@@ -52,8 +54,26 @@ void common(void) {
21065 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21066 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21067 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21068+
21069+#ifdef CONFIG_PAX_KERNEXEC
21070+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21071 #endif
21072
21073+#ifdef CONFIG_PAX_MEMORY_UDEREF
21074+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21075+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21076+#ifdef CONFIG_X86_64
21077+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21078+#endif
21079+#endif
21080+
21081+#endif
21082+
21083+ BLANK();
21084+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21085+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21086+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21087+
21088 #ifdef CONFIG_XEN
21089 BLANK();
21090 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21091diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21092index fdcbb4d..036dd93 100644
21093--- a/arch/x86/kernel/asm-offsets_64.c
21094+++ b/arch/x86/kernel/asm-offsets_64.c
21095@@ -80,6 +80,7 @@ int main(void)
21096 BLANK();
21097 #undef ENTRY
21098
21099+ DEFINE(TSS_size, sizeof(struct tss_struct));
21100 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21101 BLANK();
21102
21103diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21104index 80091ae..0c5184f 100644
21105--- a/arch/x86/kernel/cpu/Makefile
21106+++ b/arch/x86/kernel/cpu/Makefile
21107@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21108 CFLAGS_REMOVE_perf_event.o = -pg
21109 endif
21110
21111-# Make sure load_percpu_segment has no stackprotector
21112-nostackp := $(call cc-option, -fno-stack-protector)
21113-CFLAGS_common.o := $(nostackp)
21114-
21115 obj-y := intel_cacheinfo.o scattered.o topology.o
21116 obj-y += common.o
21117 obj-y += rdrand.o
21118diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21119index a220239..607fc38 100644
21120--- a/arch/x86/kernel/cpu/amd.c
21121+++ b/arch/x86/kernel/cpu/amd.c
21122@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21123 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21124 {
21125 /* AMD errata T13 (order #21922) */
21126- if ((c->x86 == 6)) {
21127+ if (c->x86 == 6) {
21128 /* Duron Rev A0 */
21129 if (c->x86_model == 3 && c->x86_mask == 0)
21130 size = 64;
21131diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21132index 2346c95..c061472 100644
21133--- a/arch/x86/kernel/cpu/common.c
21134+++ b/arch/x86/kernel/cpu/common.c
21135@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21136
21137 static const struct cpu_dev *this_cpu = &default_cpu;
21138
21139-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21140-#ifdef CONFIG_X86_64
21141- /*
21142- * We need valid kernel segments for data and code in long mode too
21143- * IRET will check the segment types kkeil 2000/10/28
21144- * Also sysret mandates a special GDT layout
21145- *
21146- * TLS descriptors are currently at a different place compared to i386.
21147- * Hopefully nobody expects them at a fixed place (Wine?)
21148- */
21149- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21150- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21151- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21152- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21153- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21154- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21155-#else
21156- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21157- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21158- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21159- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21160- /*
21161- * Segments used for calling PnP BIOS have byte granularity.
21162- * They code segments and data segments have fixed 64k limits,
21163- * the transfer segment sizes are set at run time.
21164- */
21165- /* 32-bit code */
21166- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21167- /* 16-bit code */
21168- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21169- /* 16-bit data */
21170- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21171- /* 16-bit data */
21172- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21173- /* 16-bit data */
21174- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21175- /*
21176- * The APM segments have byte granularity and their bases
21177- * are set at run time. All have 64k limits.
21178- */
21179- /* 32-bit code */
21180- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21181- /* 16-bit code */
21182- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21183- /* data */
21184- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21185-
21186- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21187- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21188- GDT_STACK_CANARY_INIT
21189-#endif
21190-} };
21191-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21192-
21193 static int __init x86_xsave_setup(char *s)
21194 {
21195 if (strlen(s))
21196@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21197 }
21198 }
21199
21200+#ifdef CONFIG_X86_64
21201+static __init int setup_disable_pcid(char *arg)
21202+{
21203+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21204+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21205+
21206+#ifdef CONFIG_PAX_MEMORY_UDEREF
21207+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21208+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21209+#endif
21210+
21211+ return 1;
21212+}
21213+__setup("nopcid", setup_disable_pcid);
21214+
21215+static void setup_pcid(struct cpuinfo_x86 *c)
21216+{
21217+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21218+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21219+
21220+#ifdef CONFIG_PAX_MEMORY_UDEREF
21221+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21222+ pax_open_kernel();
21223+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21224+ pax_close_kernel();
21225+ printk("PAX: slow and weak UDEREF enabled\n");
21226+ } else
21227+ printk("PAX: UDEREF disabled\n");
21228+#endif
21229+
21230+ return;
21231+ }
21232+
21233+ printk("PAX: PCID detected\n");
21234+ cr4_set_bits(X86_CR4_PCIDE);
21235+
21236+#ifdef CONFIG_PAX_MEMORY_UDEREF
21237+ pax_open_kernel();
21238+ clone_pgd_mask = ~(pgdval_t)0UL;
21239+ pax_close_kernel();
21240+ if (pax_user_shadow_base)
21241+ printk("PAX: weak UDEREF enabled\n");
21242+ else {
21243+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21244+ printk("PAX: strong UDEREF enabled\n");
21245+ }
21246+#endif
21247+
21248+ if (cpu_has(c, X86_FEATURE_INVPCID))
21249+ printk("PAX: INVPCID detected\n");
21250+}
21251+#endif
21252+
21253 /*
21254 * Some CPU features depend on higher CPUID levels, which may not always
21255 * be available due to CPUID level capping or broken virtualization
21256@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21257 {
21258 struct desc_ptr gdt_descr;
21259
21260- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21261+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21262 gdt_descr.size = GDT_SIZE - 1;
21263 load_gdt(&gdt_descr);
21264 /* Reload the per-cpu base */
21265@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21266 setup_smep(c);
21267 setup_smap(c);
21268
21269+#ifdef CONFIG_X86_32
21270+#ifdef CONFIG_PAX_PAGEEXEC
21271+ if (!(__supported_pte_mask & _PAGE_NX))
21272+ clear_cpu_cap(c, X86_FEATURE_PSE);
21273+#endif
21274+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21275+ clear_cpu_cap(c, X86_FEATURE_SEP);
21276+#endif
21277+#endif
21278+
21279+#ifdef CONFIG_X86_64
21280+ setup_pcid(c);
21281+#endif
21282+
21283 /*
21284 * The vendor-specific functions might have changed features.
21285 * Now we do "generic changes."
21286@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21287 void enable_sep_cpu(void)
21288 {
21289 int cpu = get_cpu();
21290- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21291+ struct tss_struct *tss = init_tss + cpu;
21292
21293 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21294 put_cpu();
21295@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21296 }
21297 __setup("clearcpuid=", setup_disablecpuid);
21298
21299+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21300+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21301+
21302 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21303- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21304+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21305 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21306
21307 #ifdef CONFIG_X86_64
21308-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21309-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21310- (unsigned long) debug_idt_table };
21311+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21312+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21313
21314 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21315 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21316@@ -1307,7 +1322,7 @@ void cpu_init(void)
21317 */
21318 load_ucode_ap();
21319
21320- t = &per_cpu(init_tss, cpu);
21321+ t = init_tss + cpu;
21322 oist = &per_cpu(orig_ist, cpu);
21323
21324 #ifdef CONFIG_NUMA
21325@@ -1339,7 +1354,6 @@ void cpu_init(void)
21326 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21327 barrier();
21328
21329- x86_configure_nx();
21330 x2apic_setup();
21331
21332 /*
21333@@ -1391,7 +1405,7 @@ void cpu_init(void)
21334 {
21335 int cpu = smp_processor_id();
21336 struct task_struct *curr = current;
21337- struct tss_struct *t = &per_cpu(init_tss, cpu);
21338+ struct tss_struct *t = init_tss + cpu;
21339 struct thread_struct *thread = &curr->thread;
21340
21341 wait_for_master_cpu(cpu);
21342diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21343index 6596433..1ad6eaf 100644
21344--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21345+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21346@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21347 };
21348
21349 #ifdef CONFIG_AMD_NB
21350+static struct attribute *default_attrs_amd_nb[] = {
21351+ &type.attr,
21352+ &level.attr,
21353+ &coherency_line_size.attr,
21354+ &physical_line_partition.attr,
21355+ &ways_of_associativity.attr,
21356+ &number_of_sets.attr,
21357+ &size.attr,
21358+ &shared_cpu_map.attr,
21359+ &shared_cpu_list.attr,
21360+ NULL,
21361+ NULL,
21362+ NULL,
21363+ NULL
21364+};
21365+
21366 static struct attribute **amd_l3_attrs(void)
21367 {
21368 static struct attribute **attrs;
21369@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21370
21371 n = ARRAY_SIZE(default_attrs);
21372
21373- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21374- n += 2;
21375-
21376- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21377- n += 1;
21378-
21379- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21380- if (attrs == NULL)
21381- return attrs = default_attrs;
21382-
21383- for (n = 0; default_attrs[n]; n++)
21384- attrs[n] = default_attrs[n];
21385+ attrs = default_attrs_amd_nb;
21386
21387 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21388 attrs[n++] = &cache_disable_0.attr;
21389@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21390 .default_attrs = default_attrs,
21391 };
21392
21393+#ifdef CONFIG_AMD_NB
21394+static struct kobj_type ktype_cache_amd_nb = {
21395+ .sysfs_ops = &sysfs_ops,
21396+ .default_attrs = default_attrs_amd_nb,
21397+};
21398+#endif
21399+
21400 static struct kobj_type ktype_percpu_entry = {
21401 .sysfs_ops = &sysfs_ops,
21402 };
21403@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21404 return retval;
21405 }
21406
21407+#ifdef CONFIG_AMD_NB
21408+ amd_l3_attrs();
21409+#endif
21410+
21411 for (i = 0; i < num_cache_leaves; i++) {
21412+ struct kobj_type *ktype;
21413+
21414 this_object = INDEX_KOBJECT_PTR(cpu, i);
21415 this_object->cpu = cpu;
21416 this_object->index = i;
21417
21418 this_leaf = CPUID4_INFO_IDX(cpu, i);
21419
21420- ktype_cache.default_attrs = default_attrs;
21421+ ktype = &ktype_cache;
21422 #ifdef CONFIG_AMD_NB
21423 if (this_leaf->base.nb)
21424- ktype_cache.default_attrs = amd_l3_attrs();
21425+ ktype = &ktype_cache_amd_nb;
21426 #endif
21427 retval = kobject_init_and_add(&(this_object->kobj),
21428- &ktype_cache,
21429+ ktype,
21430 per_cpu(ici_cache_kobject, cpu),
21431 "index%1lu", i);
21432 if (unlikely(retval)) {
21433diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21434index 3c036cb..3b5677d 100644
21435--- a/arch/x86/kernel/cpu/mcheck/mce.c
21436+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21437@@ -47,6 +47,7 @@
21438 #include <asm/tlbflush.h>
21439 #include <asm/mce.h>
21440 #include <asm/msr.h>
21441+#include <asm/local.h>
21442
21443 #include "mce-internal.h"
21444
21445@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21446 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21447 m->cs, m->ip);
21448
21449- if (m->cs == __KERNEL_CS)
21450+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21451 print_symbol("{%s}", m->ip);
21452 pr_cont("\n");
21453 }
21454@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21455
21456 #define PANIC_TIMEOUT 5 /* 5 seconds */
21457
21458-static atomic_t mce_panicked;
21459+static atomic_unchecked_t mce_panicked;
21460
21461 static int fake_panic;
21462-static atomic_t mce_fake_panicked;
21463+static atomic_unchecked_t mce_fake_panicked;
21464
21465 /* Panic in progress. Enable interrupts and wait for final IPI */
21466 static void wait_for_panic(void)
21467@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21468 /*
21469 * Make sure only one CPU runs in machine check panic
21470 */
21471- if (atomic_inc_return(&mce_panicked) > 1)
21472+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21473 wait_for_panic();
21474 barrier();
21475
21476@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21477 console_verbose();
21478 } else {
21479 /* Don't log too much for fake panic */
21480- if (atomic_inc_return(&mce_fake_panicked) > 1)
21481+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21482 return;
21483 }
21484 /* First print corrected ones that are still unlogged */
21485@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21486 if (!fake_panic) {
21487 if (panic_timeout == 0)
21488 panic_timeout = mca_cfg.panic_timeout;
21489- panic(msg);
21490+ panic("%s", msg);
21491 } else
21492 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21493 }
21494@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21495 * might have been modified by someone else.
21496 */
21497 rmb();
21498- if (atomic_read(&mce_panicked))
21499+ if (atomic_read_unchecked(&mce_panicked))
21500 wait_for_panic();
21501 if (!mca_cfg.monarch_timeout)
21502 goto out;
21503@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21504 }
21505
21506 /* Call the installed machine check handler for this CPU setup. */
21507-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21508+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21509 unexpected_machine_check;
21510
21511 /*
21512@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21513 return;
21514 }
21515
21516+ pax_open_kernel();
21517 machine_check_vector = do_machine_check;
21518+ pax_close_kernel();
21519
21520 __mcheck_cpu_init_generic();
21521 __mcheck_cpu_init_vendor(c);
21522@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21523 */
21524
21525 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21526-static int mce_chrdev_open_count; /* #times opened */
21527+static local_t mce_chrdev_open_count; /* #times opened */
21528 static int mce_chrdev_open_exclu; /* already open exclusive? */
21529
21530 static int mce_chrdev_open(struct inode *inode, struct file *file)
21531@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21532 spin_lock(&mce_chrdev_state_lock);
21533
21534 if (mce_chrdev_open_exclu ||
21535- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21536+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21537 spin_unlock(&mce_chrdev_state_lock);
21538
21539 return -EBUSY;
21540@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21541
21542 if (file->f_flags & O_EXCL)
21543 mce_chrdev_open_exclu = 1;
21544- mce_chrdev_open_count++;
21545+ local_inc(&mce_chrdev_open_count);
21546
21547 spin_unlock(&mce_chrdev_state_lock);
21548
21549@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21550 {
21551 spin_lock(&mce_chrdev_state_lock);
21552
21553- mce_chrdev_open_count--;
21554+ local_dec(&mce_chrdev_open_count);
21555 mce_chrdev_open_exclu = 0;
21556
21557 spin_unlock(&mce_chrdev_state_lock);
21558@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21559
21560 for (i = 0; i < mca_cfg.banks; i++) {
21561 struct mce_bank *b = &mce_banks[i];
21562- struct device_attribute *a = &b->attr;
21563+ device_attribute_no_const *a = &b->attr;
21564
21565 sysfs_attr_init(&a->attr);
21566 a->attr.name = b->attrname;
21567@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21568 static void mce_reset(void)
21569 {
21570 cpu_missing = 0;
21571- atomic_set(&mce_fake_panicked, 0);
21572+ atomic_set_unchecked(&mce_fake_panicked, 0);
21573 atomic_set(&mce_executing, 0);
21574 atomic_set(&mce_callin, 0);
21575 atomic_set(&global_nwo, 0);
21576diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21577index 737b0ad..09ec66e 100644
21578--- a/arch/x86/kernel/cpu/mcheck/p5.c
21579+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21580@@ -12,6 +12,7 @@
21581 #include <asm/tlbflush.h>
21582 #include <asm/mce.h>
21583 #include <asm/msr.h>
21584+#include <asm/pgtable.h>
21585
21586 /* By default disabled */
21587 int mce_p5_enabled __read_mostly;
21588@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21589 if (!cpu_has(c, X86_FEATURE_MCE))
21590 return;
21591
21592+ pax_open_kernel();
21593 machine_check_vector = pentium_machine_check;
21594+ pax_close_kernel();
21595 /* Make sure the vector pointer is visible before we enable MCEs: */
21596 wmb();
21597
21598diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21599index 44f1382..315b292 100644
21600--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21601+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21602@@ -11,6 +11,7 @@
21603 #include <asm/tlbflush.h>
21604 #include <asm/mce.h>
21605 #include <asm/msr.h>
21606+#include <asm/pgtable.h>
21607
21608 /* Machine check handler for WinChip C6: */
21609 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21610@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21611 {
21612 u32 lo, hi;
21613
21614+ pax_open_kernel();
21615 machine_check_vector = winchip_machine_check;
21616+ pax_close_kernel();
21617 /* Make sure the vector pointer is visible before we enable MCEs: */
21618 wmb();
21619
21620diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21621index 36a8361..e7058c2 100644
21622--- a/arch/x86/kernel/cpu/microcode/core.c
21623+++ b/arch/x86/kernel/cpu/microcode/core.c
21624@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21625 return NOTIFY_OK;
21626 }
21627
21628-static struct notifier_block __refdata mc_cpu_notifier = {
21629+static struct notifier_block mc_cpu_notifier = {
21630 .notifier_call = mc_cpu_callback,
21631 };
21632
21633diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21634index 746e7fd..8dc677e 100644
21635--- a/arch/x86/kernel/cpu/microcode/intel.c
21636+++ b/arch/x86/kernel/cpu/microcode/intel.c
21637@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21638
21639 static int get_ucode_user(void *to, const void *from, size_t n)
21640 {
21641- return copy_from_user(to, from, n);
21642+ return copy_from_user(to, (const void __force_user *)from, n);
21643 }
21644
21645 static enum ucode_state
21646 request_microcode_user(int cpu, const void __user *buf, size_t size)
21647 {
21648- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21649+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21650 }
21651
21652 static void microcode_fini_cpu(int cpu)
21653diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21654index ea5f363..cb0e905 100644
21655--- a/arch/x86/kernel/cpu/mtrr/main.c
21656+++ b/arch/x86/kernel/cpu/mtrr/main.c
21657@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21658 u64 size_or_mask, size_and_mask;
21659 static bool mtrr_aps_delayed_init;
21660
21661-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21662+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21663
21664 const struct mtrr_ops *mtrr_if;
21665
21666diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21667index df5e41f..816c719 100644
21668--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21669+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21670@@ -25,7 +25,7 @@ struct mtrr_ops {
21671 int (*validate_add_page)(unsigned long base, unsigned long size,
21672 unsigned int type);
21673 int (*have_wrcomb)(void);
21674-};
21675+} __do_const;
21676
21677 extern int generic_get_free_region(unsigned long base, unsigned long size,
21678 int replace_reg);
21679diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21680index b71a7f8..534af0e 100644
21681--- a/arch/x86/kernel/cpu/perf_event.c
21682+++ b/arch/x86/kernel/cpu/perf_event.c
21683@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21684
21685 }
21686
21687-static struct attribute_group x86_pmu_format_group = {
21688+static attribute_group_no_const x86_pmu_format_group = {
21689 .name = "format",
21690 .attrs = NULL,
21691 };
21692@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21693 NULL,
21694 };
21695
21696-static struct attribute_group x86_pmu_events_group = {
21697+static attribute_group_no_const x86_pmu_events_group = {
21698 .name = "events",
21699 .attrs = events_attr,
21700 };
21701@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21702 if (idx > GDT_ENTRIES)
21703 return 0;
21704
21705- desc = raw_cpu_ptr(gdt_page.gdt);
21706+ desc = get_cpu_gdt_table(smp_processor_id());
21707 }
21708
21709 return get_desc_base(desc + idx);
21710@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21711 break;
21712
21713 perf_callchain_store(entry, frame.return_address);
21714- fp = frame.next_frame;
21715+ fp = (const void __force_user *)frame.next_frame;
21716 }
21717 }
21718
21719diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21720index 97242a9..cf9c30e 100644
21721--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21722+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21723@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21724 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21725 {
21726 struct attribute **attrs;
21727- struct attribute_group *attr_group;
21728+ attribute_group_no_const *attr_group;
21729 int i = 0, j;
21730
21731 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21732diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21733index 2589906..1ca1000 100644
21734--- a/arch/x86/kernel/cpu/perf_event_intel.c
21735+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21736@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21737 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21738
21739 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21740- u64 capabilities;
21741+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21742
21743- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21744- x86_pmu.intel_cap.capabilities = capabilities;
21745+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21746+ x86_pmu.intel_cap.capabilities = capabilities;
21747 }
21748
21749 intel_ds_init();
21750diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21751index c4bb8b8..9f7384d 100644
21752--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21753+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21754@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21755 NULL,
21756 };
21757
21758-static struct attribute_group rapl_pmu_events_group = {
21759+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21760 .name = "events",
21761 .attrs = NULL, /* patched at runtime */
21762 };
21763diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21764index c635b8b..b78835e 100644
21765--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21766+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21767@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21768 static int __init uncore_type_init(struct intel_uncore_type *type)
21769 {
21770 struct intel_uncore_pmu *pmus;
21771- struct attribute_group *attr_group;
21772+ attribute_group_no_const *attr_group;
21773 struct attribute **attrs;
21774 int i, j;
21775
21776diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21777index 6c8c1e7..515b98a 100644
21778--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21779+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21780@@ -114,7 +114,7 @@ struct intel_uncore_box {
21781 struct uncore_event_desc {
21782 struct kobj_attribute attr;
21783 const char *config;
21784-};
21785+} __do_const;
21786
21787 ssize_t uncore_event_show(struct kobject *kobj,
21788 struct kobj_attribute *attr, char *buf);
21789diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21790index 83741a7..bd3507d 100644
21791--- a/arch/x86/kernel/cpuid.c
21792+++ b/arch/x86/kernel/cpuid.c
21793@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21794 return notifier_from_errno(err);
21795 }
21796
21797-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21798+static struct notifier_block cpuid_class_cpu_notifier =
21799 {
21800 .notifier_call = cpuid_class_cpu_callback,
21801 };
21802diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21803index aceb2f9..c76d3e3 100644
21804--- a/arch/x86/kernel/crash.c
21805+++ b/arch/x86/kernel/crash.c
21806@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21807 #ifdef CONFIG_X86_32
21808 struct pt_regs fixed_regs;
21809
21810- if (!user_mode_vm(regs)) {
21811+ if (!user_mode(regs)) {
21812 crash_fixup_ss_esp(&fixed_regs, regs);
21813 regs = &fixed_regs;
21814 }
21815diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21816index afa64ad..dce67dd 100644
21817--- a/arch/x86/kernel/crash_dump_64.c
21818+++ b/arch/x86/kernel/crash_dump_64.c
21819@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21820 return -ENOMEM;
21821
21822 if (userbuf) {
21823- if (copy_to_user(buf, vaddr + offset, csize)) {
21824+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21825 iounmap(vaddr);
21826 return -EFAULT;
21827 }
21828diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21829index f6dfd93..892ade4 100644
21830--- a/arch/x86/kernel/doublefault.c
21831+++ b/arch/x86/kernel/doublefault.c
21832@@ -12,7 +12,7 @@
21833
21834 #define DOUBLEFAULT_STACKSIZE (1024)
21835 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21836-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21837+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21838
21839 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21840
21841@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21842 unsigned long gdt, tss;
21843
21844 native_store_gdt(&gdt_desc);
21845- gdt = gdt_desc.address;
21846+ gdt = (unsigned long)gdt_desc.address;
21847
21848 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21849
21850@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21851 /* 0x2 bit is always set */
21852 .flags = X86_EFLAGS_SF | 0x2,
21853 .sp = STACK_START,
21854- .es = __USER_DS,
21855+ .es = __KERNEL_DS,
21856 .cs = __KERNEL_CS,
21857 .ss = __KERNEL_DS,
21858- .ds = __USER_DS,
21859+ .ds = __KERNEL_DS,
21860 .fs = __KERNEL_PERCPU,
21861
21862 .__cr3 = __pa_nodebug(swapper_pg_dir),
21863diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21864index cf3df1d..b637d9a 100644
21865--- a/arch/x86/kernel/dumpstack.c
21866+++ b/arch/x86/kernel/dumpstack.c
21867@@ -2,6 +2,9 @@
21868 * Copyright (C) 1991, 1992 Linus Torvalds
21869 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21870 */
21871+#ifdef CONFIG_GRKERNSEC_HIDESYM
21872+#define __INCLUDED_BY_HIDESYM 1
21873+#endif
21874 #include <linux/kallsyms.h>
21875 #include <linux/kprobes.h>
21876 #include <linux/uaccess.h>
21877@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21878
21879 void printk_address(unsigned long address)
21880 {
21881- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21882+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21883 }
21884
21885 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21886 static void
21887 print_ftrace_graph_addr(unsigned long addr, void *data,
21888 const struct stacktrace_ops *ops,
21889- struct thread_info *tinfo, int *graph)
21890+ struct task_struct *task, int *graph)
21891 {
21892- struct task_struct *task;
21893 unsigned long ret_addr;
21894 int index;
21895
21896 if (addr != (unsigned long)return_to_handler)
21897 return;
21898
21899- task = tinfo->task;
21900 index = task->curr_ret_stack;
21901
21902 if (!task->ret_stack || index < *graph)
21903@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21904 static inline void
21905 print_ftrace_graph_addr(unsigned long addr, void *data,
21906 const struct stacktrace_ops *ops,
21907- struct thread_info *tinfo, int *graph)
21908+ struct task_struct *task, int *graph)
21909 { }
21910 #endif
21911
21912@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21913 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21914 */
21915
21916-static inline int valid_stack_ptr(struct thread_info *tinfo,
21917- void *p, unsigned int size, void *end)
21918+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21919 {
21920- void *t = tinfo;
21921 if (end) {
21922 if (p < end && p >= (end-THREAD_SIZE))
21923 return 1;
21924@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21925 }
21926
21927 unsigned long
21928-print_context_stack(struct thread_info *tinfo,
21929+print_context_stack(struct task_struct *task, void *stack_start,
21930 unsigned long *stack, unsigned long bp,
21931 const struct stacktrace_ops *ops, void *data,
21932 unsigned long *end, int *graph)
21933 {
21934 struct stack_frame *frame = (struct stack_frame *)bp;
21935
21936- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21937+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21938 unsigned long addr;
21939
21940 addr = *stack;
21941@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21942 } else {
21943 ops->address(data, addr, 0);
21944 }
21945- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21946+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21947 }
21948 stack++;
21949 }
21950@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21951 EXPORT_SYMBOL_GPL(print_context_stack);
21952
21953 unsigned long
21954-print_context_stack_bp(struct thread_info *tinfo,
21955+print_context_stack_bp(struct task_struct *task, void *stack_start,
21956 unsigned long *stack, unsigned long bp,
21957 const struct stacktrace_ops *ops, void *data,
21958 unsigned long *end, int *graph)
21959@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21960 struct stack_frame *frame = (struct stack_frame *)bp;
21961 unsigned long *ret_addr = &frame->return_address;
21962
21963- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21964+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21965 unsigned long addr = *ret_addr;
21966
21967 if (!__kernel_text_address(addr))
21968@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21969 ops->address(data, addr, 1);
21970 frame = frame->next_frame;
21971 ret_addr = &frame->return_address;
21972- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21973+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21974 }
21975
21976 return (unsigned long)frame;
21977@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21978 static void print_trace_address(void *data, unsigned long addr, int reliable)
21979 {
21980 touch_nmi_watchdog();
21981- printk(data);
21982+ printk("%s", (char *)data);
21983 printk_stack_address(addr, reliable);
21984 }
21985
21986@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21987 EXPORT_SYMBOL_GPL(oops_begin);
21988 NOKPROBE_SYMBOL(oops_begin);
21989
21990+extern void gr_handle_kernel_exploit(void);
21991+
21992 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21993 {
21994 if (regs && kexec_should_crash(current))
21995@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21996 panic("Fatal exception in interrupt");
21997 if (panic_on_oops)
21998 panic("Fatal exception");
21999- do_exit(signr);
22000+
22001+ gr_handle_kernel_exploit();
22002+
22003+ do_group_exit(signr);
22004 }
22005 NOKPROBE_SYMBOL(oops_end);
22006
22007@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22008 print_modules();
22009 show_regs(regs);
22010 #ifdef CONFIG_X86_32
22011- if (user_mode_vm(regs)) {
22012+ if (user_mode(regs)) {
22013 sp = regs->sp;
22014 ss = regs->ss & 0xffff;
22015 } else {
22016@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22017 unsigned long flags = oops_begin();
22018 int sig = SIGSEGV;
22019
22020- if (!user_mode_vm(regs))
22021+ if (!user_mode(regs))
22022 report_bug(regs->ip, regs);
22023
22024 if (__die(str, regs, err))
22025diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22026index 5abd4cd..c65733b 100644
22027--- a/arch/x86/kernel/dumpstack_32.c
22028+++ b/arch/x86/kernel/dumpstack_32.c
22029@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22030 bp = stack_frame(task, regs);
22031
22032 for (;;) {
22033- struct thread_info *context;
22034+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22035 void *end_stack;
22036
22037 end_stack = is_hardirq_stack(stack, cpu);
22038 if (!end_stack)
22039 end_stack = is_softirq_stack(stack, cpu);
22040
22041- context = task_thread_info(task);
22042- bp = ops->walk_stack(context, stack, bp, ops, data,
22043+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22044 end_stack, &graph);
22045
22046 /* Stop if not on irq stack */
22047@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22048 int i;
22049
22050 show_regs_print_info(KERN_EMERG);
22051- __show_regs(regs, !user_mode_vm(regs));
22052+ __show_regs(regs, !user_mode(regs));
22053
22054 /*
22055 * When in-kernel, we also print out the stack and code at the
22056 * time of the fault..
22057 */
22058- if (!user_mode_vm(regs)) {
22059+ if (!user_mode(regs)) {
22060 unsigned int code_prologue = code_bytes * 43 / 64;
22061 unsigned int code_len = code_bytes;
22062 unsigned char c;
22063 u8 *ip;
22064+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22065
22066 pr_emerg("Stack:\n");
22067 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22068
22069 pr_emerg("Code:");
22070
22071- ip = (u8 *)regs->ip - code_prologue;
22072+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22073 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22074 /* try starting at IP */
22075- ip = (u8 *)regs->ip;
22076+ ip = (u8 *)regs->ip + cs_base;
22077 code_len = code_len - code_prologue + 1;
22078 }
22079 for (i = 0; i < code_len; i++, ip++) {
22080@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22081 pr_cont(" Bad EIP value.");
22082 break;
22083 }
22084- if (ip == (u8 *)regs->ip)
22085+ if (ip == (u8 *)regs->ip + cs_base)
22086 pr_cont(" <%02x>", c);
22087 else
22088 pr_cont(" %02x", c);
22089@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22090 {
22091 unsigned short ud2;
22092
22093+ ip = ktla_ktva(ip);
22094 if (ip < PAGE_OFFSET)
22095 return 0;
22096 if (probe_kernel_address((unsigned short *)ip, ud2))
22097@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22098
22099 return ud2 == 0x0b0f;
22100 }
22101+
22102+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22103+void pax_check_alloca(unsigned long size)
22104+{
22105+ unsigned long sp = (unsigned long)&sp, stack_left;
22106+
22107+ /* all kernel stacks are of the same size */
22108+ stack_left = sp & (THREAD_SIZE - 1);
22109+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22110+}
22111+EXPORT_SYMBOL(pax_check_alloca);
22112+#endif
22113diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22114index ff86f19..73eabf4 100644
22115--- a/arch/x86/kernel/dumpstack_64.c
22116+++ b/arch/x86/kernel/dumpstack_64.c
22117@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22118 const struct stacktrace_ops *ops, void *data)
22119 {
22120 const unsigned cpu = get_cpu();
22121- struct thread_info *tinfo;
22122 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22123 unsigned long dummy;
22124 unsigned used = 0;
22125 int graph = 0;
22126 int done = 0;
22127+ void *stack_start;
22128
22129 if (!task)
22130 task = current;
22131@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22132 * current stack address. If the stacks consist of nested
22133 * exceptions
22134 */
22135- tinfo = task_thread_info(task);
22136 while (!done) {
22137 unsigned long *stack_end;
22138 enum stack_type stype;
22139@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22140 if (ops->stack(data, id) < 0)
22141 break;
22142
22143- bp = ops->walk_stack(tinfo, stack, bp, ops,
22144+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22145 data, stack_end, &graph);
22146 ops->stack(data, "<EOE>");
22147 /*
22148@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22149 * second-to-last pointer (index -2 to end) in the
22150 * exception stack:
22151 */
22152+ if ((u16)stack_end[-1] != __KERNEL_DS)
22153+ goto out;
22154 stack = (unsigned long *) stack_end[-2];
22155 done = 0;
22156 break;
22157@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22158
22159 if (ops->stack(data, "IRQ") < 0)
22160 break;
22161- bp = ops->walk_stack(tinfo, stack, bp,
22162+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22163 ops, data, stack_end, &graph);
22164 /*
22165 * We link to the next stack (which would be
22166@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22167 /*
22168 * This handles the process stack:
22169 */
22170- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22171+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22172+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22173+out:
22174 put_cpu();
22175 }
22176 EXPORT_SYMBOL(dump_trace);
22177@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22178 {
22179 unsigned short ud2;
22180
22181- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22182+ if (probe_kernel_address((unsigned short *)ip, ud2))
22183 return 0;
22184
22185 return ud2 == 0x0b0f;
22186 }
22187+
22188+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22189+void pax_check_alloca(unsigned long size)
22190+{
22191+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22192+ unsigned cpu, used;
22193+ char *id;
22194+
22195+ /* check the process stack first */
22196+ stack_start = (unsigned long)task_stack_page(current);
22197+ stack_end = stack_start + THREAD_SIZE;
22198+ if (likely(stack_start <= sp && sp < stack_end)) {
22199+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22200+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22201+ return;
22202+ }
22203+
22204+ cpu = get_cpu();
22205+
22206+ /* check the irq stacks */
22207+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22208+ stack_start = stack_end - IRQ_STACK_SIZE;
22209+ if (stack_start <= sp && sp < stack_end) {
22210+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22211+ put_cpu();
22212+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22213+ return;
22214+ }
22215+
22216+ /* check the exception stacks */
22217+ used = 0;
22218+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22219+ stack_start = stack_end - EXCEPTION_STKSZ;
22220+ if (stack_end && stack_start <= sp && sp < stack_end) {
22221+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22222+ put_cpu();
22223+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22224+ return;
22225+ }
22226+
22227+ put_cpu();
22228+
22229+ /* unknown stack */
22230+ BUG();
22231+}
22232+EXPORT_SYMBOL(pax_check_alloca);
22233+#endif
22234diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22235index 46201de..ebffabf 100644
22236--- a/arch/x86/kernel/e820.c
22237+++ b/arch/x86/kernel/e820.c
22238@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22239
22240 static void early_panic(char *msg)
22241 {
22242- early_printk(msg);
22243- panic(msg);
22244+ early_printk("%s", msg);
22245+ panic("%s", msg);
22246 }
22247
22248 static int userdef __initdata;
22249diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22250index a62536a..8444df4 100644
22251--- a/arch/x86/kernel/early_printk.c
22252+++ b/arch/x86/kernel/early_printk.c
22253@@ -7,6 +7,7 @@
22254 #include <linux/pci_regs.h>
22255 #include <linux/pci_ids.h>
22256 #include <linux/errno.h>
22257+#include <linux/sched.h>
22258 #include <asm/io.h>
22259 #include <asm/processor.h>
22260 #include <asm/fcntl.h>
22261diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22262index 31e2d5b..b31c76d 100644
22263--- a/arch/x86/kernel/entry_32.S
22264+++ b/arch/x86/kernel/entry_32.S
22265@@ -177,13 +177,154 @@
22266 /*CFI_REL_OFFSET gs, PT_GS*/
22267 .endm
22268 .macro SET_KERNEL_GS reg
22269+
22270+#ifdef CONFIG_CC_STACKPROTECTOR
22271 movl $(__KERNEL_STACK_CANARY), \reg
22272+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22273+ movl $(__USER_DS), \reg
22274+#else
22275+ xorl \reg, \reg
22276+#endif
22277+
22278 movl \reg, %gs
22279 .endm
22280
22281 #endif /* CONFIG_X86_32_LAZY_GS */
22282
22283-.macro SAVE_ALL
22284+.macro pax_enter_kernel
22285+#ifdef CONFIG_PAX_KERNEXEC
22286+ call pax_enter_kernel
22287+#endif
22288+.endm
22289+
22290+.macro pax_exit_kernel
22291+#ifdef CONFIG_PAX_KERNEXEC
22292+ call pax_exit_kernel
22293+#endif
22294+.endm
22295+
22296+#ifdef CONFIG_PAX_KERNEXEC
22297+ENTRY(pax_enter_kernel)
22298+#ifdef CONFIG_PARAVIRT
22299+ pushl %eax
22300+ pushl %ecx
22301+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22302+ mov %eax, %esi
22303+#else
22304+ mov %cr0, %esi
22305+#endif
22306+ bts $16, %esi
22307+ jnc 1f
22308+ mov %cs, %esi
22309+ cmp $__KERNEL_CS, %esi
22310+ jz 3f
22311+ ljmp $__KERNEL_CS, $3f
22312+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22313+2:
22314+#ifdef CONFIG_PARAVIRT
22315+ mov %esi, %eax
22316+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22317+#else
22318+ mov %esi, %cr0
22319+#endif
22320+3:
22321+#ifdef CONFIG_PARAVIRT
22322+ popl %ecx
22323+ popl %eax
22324+#endif
22325+ ret
22326+ENDPROC(pax_enter_kernel)
22327+
22328+ENTRY(pax_exit_kernel)
22329+#ifdef CONFIG_PARAVIRT
22330+ pushl %eax
22331+ pushl %ecx
22332+#endif
22333+ mov %cs, %esi
22334+ cmp $__KERNEXEC_KERNEL_CS, %esi
22335+ jnz 2f
22336+#ifdef CONFIG_PARAVIRT
22337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22338+ mov %eax, %esi
22339+#else
22340+ mov %cr0, %esi
22341+#endif
22342+ btr $16, %esi
22343+ ljmp $__KERNEL_CS, $1f
22344+1:
22345+#ifdef CONFIG_PARAVIRT
22346+ mov %esi, %eax
22347+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22348+#else
22349+ mov %esi, %cr0
22350+#endif
22351+2:
22352+#ifdef CONFIG_PARAVIRT
22353+ popl %ecx
22354+ popl %eax
22355+#endif
22356+ ret
22357+ENDPROC(pax_exit_kernel)
22358+#endif
22359+
22360+ .macro pax_erase_kstack
22361+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22362+ call pax_erase_kstack
22363+#endif
22364+ .endm
22365+
22366+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22367+/*
22368+ * ebp: thread_info
22369+ */
22370+ENTRY(pax_erase_kstack)
22371+ pushl %edi
22372+ pushl %ecx
22373+ pushl %eax
22374+
22375+ mov TI_lowest_stack(%ebp), %edi
22376+ mov $-0xBEEF, %eax
22377+ std
22378+
22379+1: mov %edi, %ecx
22380+ and $THREAD_SIZE_asm - 1, %ecx
22381+ shr $2, %ecx
22382+ repne scasl
22383+ jecxz 2f
22384+
22385+ cmp $2*16, %ecx
22386+ jc 2f
22387+
22388+ mov $2*16, %ecx
22389+ repe scasl
22390+ jecxz 2f
22391+ jne 1b
22392+
22393+2: cld
22394+ or $2*4, %edi
22395+ mov %esp, %ecx
22396+ sub %edi, %ecx
22397+
22398+ cmp $THREAD_SIZE_asm, %ecx
22399+ jb 3f
22400+ ud2
22401+3:
22402+
22403+ shr $2, %ecx
22404+ rep stosl
22405+
22406+ mov TI_task_thread_sp0(%ebp), %edi
22407+ sub $128, %edi
22408+ mov %edi, TI_lowest_stack(%ebp)
22409+
22410+ popl %eax
22411+ popl %ecx
22412+ popl %edi
22413+ ret
22414+ENDPROC(pax_erase_kstack)
22415+#endif
22416+
22417+.macro __SAVE_ALL _DS
22418 cld
22419 PUSH_GS
22420 pushl_cfi %fs
22421@@ -206,7 +347,7 @@
22422 CFI_REL_OFFSET ecx, 0
22423 pushl_cfi %ebx
22424 CFI_REL_OFFSET ebx, 0
22425- movl $(__USER_DS), %edx
22426+ movl $\_DS, %edx
22427 movl %edx, %ds
22428 movl %edx, %es
22429 movl $(__KERNEL_PERCPU), %edx
22430@@ -214,6 +355,15 @@
22431 SET_KERNEL_GS %edx
22432 .endm
22433
22434+.macro SAVE_ALL
22435+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22436+ __SAVE_ALL __KERNEL_DS
22437+ pax_enter_kernel
22438+#else
22439+ __SAVE_ALL __USER_DS
22440+#endif
22441+.endm
22442+
22443 .macro RESTORE_INT_REGS
22444 popl_cfi %ebx
22445 CFI_RESTORE ebx
22446@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22447 popfl_cfi
22448 jmp syscall_exit
22449 CFI_ENDPROC
22450-END(ret_from_fork)
22451+ENDPROC(ret_from_fork)
22452
22453 ENTRY(ret_from_kernel_thread)
22454 CFI_STARTPROC
22455@@ -340,7 +490,15 @@ ret_from_intr:
22456 andl $SEGMENT_RPL_MASK, %eax
22457 #endif
22458 cmpl $USER_RPL, %eax
22459+
22460+#ifdef CONFIG_PAX_KERNEXEC
22461+ jae resume_userspace
22462+
22463+ pax_exit_kernel
22464+ jmp resume_kernel
22465+#else
22466 jb resume_kernel # not returning to v8086 or userspace
22467+#endif
22468
22469 ENTRY(resume_userspace)
22470 LOCKDEP_SYS_EXIT
22471@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22472 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22473 # int/exception return?
22474 jne work_pending
22475- jmp restore_all
22476-END(ret_from_exception)
22477+ jmp restore_all_pax
22478+ENDPROC(ret_from_exception)
22479
22480 #ifdef CONFIG_PREEMPT
22481 ENTRY(resume_kernel)
22482@@ -365,7 +523,7 @@ need_resched:
22483 jz restore_all
22484 call preempt_schedule_irq
22485 jmp need_resched
22486-END(resume_kernel)
22487+ENDPROC(resume_kernel)
22488 #endif
22489 CFI_ENDPROC
22490
22491@@ -395,30 +553,45 @@ sysenter_past_esp:
22492 /*CFI_REL_OFFSET cs, 0*/
22493 /*
22494 * Push current_thread_info()->sysenter_return to the stack.
22495- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22496- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22497 */
22498- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22499+ pushl_cfi $0
22500 CFI_REL_OFFSET eip, 0
22501
22502 pushl_cfi %eax
22503 SAVE_ALL
22504+ GET_THREAD_INFO(%ebp)
22505+ movl TI_sysenter_return(%ebp),%ebp
22506+ movl %ebp,PT_EIP(%esp)
22507 ENABLE_INTERRUPTS(CLBR_NONE)
22508
22509 /*
22510 * Load the potential sixth argument from user stack.
22511 * Careful about security.
22512 */
22513+ movl PT_OLDESP(%esp),%ebp
22514+
22515+#ifdef CONFIG_PAX_MEMORY_UDEREF
22516+ mov PT_OLDSS(%esp),%ds
22517+1: movl %ds:(%ebp),%ebp
22518+ push %ss
22519+ pop %ds
22520+#else
22521 cmpl $__PAGE_OFFSET-3,%ebp
22522 jae syscall_fault
22523 ASM_STAC
22524 1: movl (%ebp),%ebp
22525 ASM_CLAC
22526+#endif
22527+
22528 movl %ebp,PT_EBP(%esp)
22529 _ASM_EXTABLE(1b,syscall_fault)
22530
22531 GET_THREAD_INFO(%ebp)
22532
22533+#ifdef CONFIG_PAX_RANDKSTACK
22534+ pax_erase_kstack
22535+#endif
22536+
22537 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22538 jnz sysenter_audit
22539 sysenter_do_call:
22540@@ -434,12 +607,24 @@ sysenter_after_call:
22541 testl $_TIF_ALLWORK_MASK, %ecx
22542 jne sysexit_audit
22543 sysenter_exit:
22544+
22545+#ifdef CONFIG_PAX_RANDKSTACK
22546+ pushl_cfi %eax
22547+ movl %esp, %eax
22548+ call pax_randomize_kstack
22549+ popl_cfi %eax
22550+#endif
22551+
22552+ pax_erase_kstack
22553+
22554 /* if something modifies registers it must also disable sysexit */
22555 movl PT_EIP(%esp), %edx
22556 movl PT_OLDESP(%esp), %ecx
22557 xorl %ebp,%ebp
22558 TRACE_IRQS_ON
22559 1: mov PT_FS(%esp), %fs
22560+2: mov PT_DS(%esp), %ds
22561+3: mov PT_ES(%esp), %es
22562 PTGS_TO_GS
22563 ENABLE_INTERRUPTS_SYSEXIT
22564
22565@@ -453,6 +638,9 @@ sysenter_audit:
22566 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22567 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22568 call __audit_syscall_entry
22569+
22570+ pax_erase_kstack
22571+
22572 popl_cfi %ecx /* get that remapped edx off the stack */
22573 popl_cfi %ecx /* get that remapped esi off the stack */
22574 movl PT_EAX(%esp),%eax /* reload syscall number */
22575@@ -479,10 +667,16 @@ sysexit_audit:
22576
22577 CFI_ENDPROC
22578 .pushsection .fixup,"ax"
22579-2: movl $0,PT_FS(%esp)
22580+4: movl $0,PT_FS(%esp)
22581+ jmp 1b
22582+5: movl $0,PT_DS(%esp)
22583+ jmp 1b
22584+6: movl $0,PT_ES(%esp)
22585 jmp 1b
22586 .popsection
22587- _ASM_EXTABLE(1b,2b)
22588+ _ASM_EXTABLE(1b,4b)
22589+ _ASM_EXTABLE(2b,5b)
22590+ _ASM_EXTABLE(3b,6b)
22591 PTGS_TO_GS_EX
22592 ENDPROC(ia32_sysenter_target)
22593
22594@@ -493,6 +687,11 @@ ENTRY(system_call)
22595 pushl_cfi %eax # save orig_eax
22596 SAVE_ALL
22597 GET_THREAD_INFO(%ebp)
22598+
22599+#ifdef CONFIG_PAX_RANDKSTACK
22600+ pax_erase_kstack
22601+#endif
22602+
22603 # system call tracing in operation / emulation
22604 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22605 jnz syscall_trace_entry
22606@@ -512,6 +711,15 @@ syscall_exit:
22607 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22608 jne syscall_exit_work
22609
22610+restore_all_pax:
22611+
22612+#ifdef CONFIG_PAX_RANDKSTACK
22613+ movl %esp, %eax
22614+ call pax_randomize_kstack
22615+#endif
22616+
22617+ pax_erase_kstack
22618+
22619 restore_all:
22620 TRACE_IRQS_IRET
22621 restore_all_notrace:
22622@@ -566,14 +774,34 @@ ldt_ss:
22623 * compensating for the offset by changing to the ESPFIX segment with
22624 * a base address that matches for the difference.
22625 */
22626-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22627+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22628 mov %esp, %edx /* load kernel esp */
22629 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22630 mov %dx, %ax /* eax: new kernel esp */
22631 sub %eax, %edx /* offset (low word is 0) */
22632+#ifdef CONFIG_SMP
22633+ movl PER_CPU_VAR(cpu_number), %ebx
22634+ shll $PAGE_SHIFT_asm, %ebx
22635+ addl $cpu_gdt_table, %ebx
22636+#else
22637+ movl $cpu_gdt_table, %ebx
22638+#endif
22639 shr $16, %edx
22640- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22641- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22642+
22643+#ifdef CONFIG_PAX_KERNEXEC
22644+ mov %cr0, %esi
22645+ btr $16, %esi
22646+ mov %esi, %cr0
22647+#endif
22648+
22649+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22650+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22651+
22652+#ifdef CONFIG_PAX_KERNEXEC
22653+ bts $16, %esi
22654+ mov %esi, %cr0
22655+#endif
22656+
22657 pushl_cfi $__ESPFIX_SS
22658 pushl_cfi %eax /* new kernel esp */
22659 /* Disable interrupts, but do not irqtrace this section: we
22660@@ -603,20 +831,18 @@ work_resched:
22661 movl TI_flags(%ebp), %ecx
22662 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22663 # than syscall tracing?
22664- jz restore_all
22665+ jz restore_all_pax
22666 testb $_TIF_NEED_RESCHED, %cl
22667 jnz work_resched
22668
22669 work_notifysig: # deal with pending signals and
22670 # notify-resume requests
22671+ movl %esp, %eax
22672 #ifdef CONFIG_VM86
22673 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22674- movl %esp, %eax
22675 jne work_notifysig_v86 # returning to kernel-space or
22676 # vm86-space
22677 1:
22678-#else
22679- movl %esp, %eax
22680 #endif
22681 TRACE_IRQS_ON
22682 ENABLE_INTERRUPTS(CLBR_NONE)
22683@@ -637,7 +863,7 @@ work_notifysig_v86:
22684 movl %eax, %esp
22685 jmp 1b
22686 #endif
22687-END(work_pending)
22688+ENDPROC(work_pending)
22689
22690 # perform syscall exit tracing
22691 ALIGN
22692@@ -645,11 +871,14 @@ syscall_trace_entry:
22693 movl $-ENOSYS,PT_EAX(%esp)
22694 movl %esp, %eax
22695 call syscall_trace_enter
22696+
22697+ pax_erase_kstack
22698+
22699 /* What it returned is what we'll actually use. */
22700 cmpl $(NR_syscalls), %eax
22701 jnae syscall_call
22702 jmp syscall_exit
22703-END(syscall_trace_entry)
22704+ENDPROC(syscall_trace_entry)
22705
22706 # perform syscall exit tracing
22707 ALIGN
22708@@ -662,26 +891,30 @@ syscall_exit_work:
22709 movl %esp, %eax
22710 call syscall_trace_leave
22711 jmp resume_userspace
22712-END(syscall_exit_work)
22713+ENDPROC(syscall_exit_work)
22714 CFI_ENDPROC
22715
22716 RING0_INT_FRAME # can't unwind into user space anyway
22717 syscall_fault:
22718+#ifdef CONFIG_PAX_MEMORY_UDEREF
22719+ push %ss
22720+ pop %ds
22721+#endif
22722 ASM_CLAC
22723 GET_THREAD_INFO(%ebp)
22724 movl $-EFAULT,PT_EAX(%esp)
22725 jmp resume_userspace
22726-END(syscall_fault)
22727+ENDPROC(syscall_fault)
22728
22729 syscall_badsys:
22730 movl $-ENOSYS,%eax
22731 jmp syscall_after_call
22732-END(syscall_badsys)
22733+ENDPROC(syscall_badsys)
22734
22735 sysenter_badsys:
22736 movl $-ENOSYS,%eax
22737 jmp sysenter_after_call
22738-END(sysenter_badsys)
22739+ENDPROC(sysenter_badsys)
22740 CFI_ENDPROC
22741
22742 .macro FIXUP_ESPFIX_STACK
22743@@ -694,8 +927,15 @@ END(sysenter_badsys)
22744 */
22745 #ifdef CONFIG_X86_ESPFIX32
22746 /* fixup the stack */
22747- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22748- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22749+#ifdef CONFIG_SMP
22750+ movl PER_CPU_VAR(cpu_number), %ebx
22751+ shll $PAGE_SHIFT_asm, %ebx
22752+ addl $cpu_gdt_table, %ebx
22753+#else
22754+ movl $cpu_gdt_table, %ebx
22755+#endif
22756+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22757+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22758 shl $16, %eax
22759 addl %esp, %eax /* the adjusted stack pointer */
22760 pushl_cfi $__KERNEL_DS
22761@@ -751,7 +991,7 @@ vector=vector+1
22762 .endr
22763 2: jmp common_interrupt
22764 .endr
22765-END(irq_entries_start)
22766+ENDPROC(irq_entries_start)
22767
22768 .previous
22769 END(interrupt)
22770@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22771 pushl_cfi $do_coprocessor_error
22772 jmp error_code
22773 CFI_ENDPROC
22774-END(coprocessor_error)
22775+ENDPROC(coprocessor_error)
22776
22777 ENTRY(simd_coprocessor_error)
22778 RING0_INT_FRAME
22779@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22780 .section .altinstructions,"a"
22781 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22782 .previous
22783-.section .altinstr_replacement,"ax"
22784+.section .altinstr_replacement,"a"
22785 663: pushl $do_simd_coprocessor_error
22786 664:
22787 .previous
22788@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22789 #endif
22790 jmp error_code
22791 CFI_ENDPROC
22792-END(simd_coprocessor_error)
22793+ENDPROC(simd_coprocessor_error)
22794
22795 ENTRY(device_not_available)
22796 RING0_INT_FRAME
22797@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22798 pushl_cfi $do_device_not_available
22799 jmp error_code
22800 CFI_ENDPROC
22801-END(device_not_available)
22802+ENDPROC(device_not_available)
22803
22804 #ifdef CONFIG_PARAVIRT
22805 ENTRY(native_iret)
22806 iret
22807 _ASM_EXTABLE(native_iret, iret_exc)
22808-END(native_iret)
22809+ENDPROC(native_iret)
22810
22811 ENTRY(native_irq_enable_sysexit)
22812 sti
22813 sysexit
22814-END(native_irq_enable_sysexit)
22815+ENDPROC(native_irq_enable_sysexit)
22816 #endif
22817
22818 ENTRY(overflow)
22819@@ -860,7 +1100,7 @@ ENTRY(overflow)
22820 pushl_cfi $do_overflow
22821 jmp error_code
22822 CFI_ENDPROC
22823-END(overflow)
22824+ENDPROC(overflow)
22825
22826 ENTRY(bounds)
22827 RING0_INT_FRAME
22828@@ -869,7 +1109,7 @@ ENTRY(bounds)
22829 pushl_cfi $do_bounds
22830 jmp error_code
22831 CFI_ENDPROC
22832-END(bounds)
22833+ENDPROC(bounds)
22834
22835 ENTRY(invalid_op)
22836 RING0_INT_FRAME
22837@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22838 pushl_cfi $do_invalid_op
22839 jmp error_code
22840 CFI_ENDPROC
22841-END(invalid_op)
22842+ENDPROC(invalid_op)
22843
22844 ENTRY(coprocessor_segment_overrun)
22845 RING0_INT_FRAME
22846@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22847 pushl_cfi $do_coprocessor_segment_overrun
22848 jmp error_code
22849 CFI_ENDPROC
22850-END(coprocessor_segment_overrun)
22851+ENDPROC(coprocessor_segment_overrun)
22852
22853 ENTRY(invalid_TSS)
22854 RING0_EC_FRAME
22855@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22856 pushl_cfi $do_invalid_TSS
22857 jmp error_code
22858 CFI_ENDPROC
22859-END(invalid_TSS)
22860+ENDPROC(invalid_TSS)
22861
22862 ENTRY(segment_not_present)
22863 RING0_EC_FRAME
22864@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22865 pushl_cfi $do_segment_not_present
22866 jmp error_code
22867 CFI_ENDPROC
22868-END(segment_not_present)
22869+ENDPROC(segment_not_present)
22870
22871 ENTRY(stack_segment)
22872 RING0_EC_FRAME
22873@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22874 pushl_cfi $do_stack_segment
22875 jmp error_code
22876 CFI_ENDPROC
22877-END(stack_segment)
22878+ENDPROC(stack_segment)
22879
22880 ENTRY(alignment_check)
22881 RING0_EC_FRAME
22882@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22883 pushl_cfi $do_alignment_check
22884 jmp error_code
22885 CFI_ENDPROC
22886-END(alignment_check)
22887+ENDPROC(alignment_check)
22888
22889 ENTRY(divide_error)
22890 RING0_INT_FRAME
22891@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22892 pushl_cfi $do_divide_error
22893 jmp error_code
22894 CFI_ENDPROC
22895-END(divide_error)
22896+ENDPROC(divide_error)
22897
22898 #ifdef CONFIG_X86_MCE
22899 ENTRY(machine_check)
22900@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22901 pushl_cfi machine_check_vector
22902 jmp error_code
22903 CFI_ENDPROC
22904-END(machine_check)
22905+ENDPROC(machine_check)
22906 #endif
22907
22908 ENTRY(spurious_interrupt_bug)
22909@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22910 pushl_cfi $do_spurious_interrupt_bug
22911 jmp error_code
22912 CFI_ENDPROC
22913-END(spurious_interrupt_bug)
22914+ENDPROC(spurious_interrupt_bug)
22915
22916 #ifdef CONFIG_XEN
22917 /* Xen doesn't set %esp to be precisely what the normal sysenter
22918@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22919
22920 ENTRY(mcount)
22921 ret
22922-END(mcount)
22923+ENDPROC(mcount)
22924
22925 ENTRY(ftrace_caller)
22926 pushl %eax
22927@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22928 .globl ftrace_stub
22929 ftrace_stub:
22930 ret
22931-END(ftrace_caller)
22932+ENDPROC(ftrace_caller)
22933
22934 ENTRY(ftrace_regs_caller)
22935 pushf /* push flags before compare (in cs location) */
22936@@ -1185,7 +1425,7 @@ trace:
22937 popl %ecx
22938 popl %eax
22939 jmp ftrace_stub
22940-END(mcount)
22941+ENDPROC(mcount)
22942 #endif /* CONFIG_DYNAMIC_FTRACE */
22943 #endif /* CONFIG_FUNCTION_TRACER */
22944
22945@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22946 popl %ecx
22947 popl %eax
22948 ret
22949-END(ftrace_graph_caller)
22950+ENDPROC(ftrace_graph_caller)
22951
22952 .globl return_to_handler
22953 return_to_handler:
22954@@ -1264,15 +1504,18 @@ error_code:
22955 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22956 REG_TO_PTGS %ecx
22957 SET_KERNEL_GS %ecx
22958- movl $(__USER_DS), %ecx
22959+ movl $(__KERNEL_DS), %ecx
22960 movl %ecx, %ds
22961 movl %ecx, %es
22962+
22963+ pax_enter_kernel
22964+
22965 TRACE_IRQS_OFF
22966 movl %esp,%eax # pt_regs pointer
22967 call *%edi
22968 jmp ret_from_exception
22969 CFI_ENDPROC
22970-END(page_fault)
22971+ENDPROC(page_fault)
22972
22973 /*
22974 * Debug traps and NMI can happen at the one SYSENTER instruction
22975@@ -1315,7 +1558,7 @@ debug_stack_correct:
22976 call do_debug
22977 jmp ret_from_exception
22978 CFI_ENDPROC
22979-END(debug)
22980+ENDPROC(debug)
22981
22982 /*
22983 * NMI is doubly nasty. It can happen _while_ we're handling
22984@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22985 xorl %edx,%edx # zero error code
22986 movl %esp,%eax # pt_regs pointer
22987 call do_nmi
22988+
22989+ pax_exit_kernel
22990+
22991 jmp restore_all_notrace
22992 CFI_ENDPROC
22993
22994@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
22995 FIXUP_ESPFIX_STACK # %eax == %esp
22996 xorl %edx,%edx # zero error code
22997 call do_nmi
22998+
22999+ pax_exit_kernel
23000+
23001 RESTORE_REGS
23002 lss 12+4(%esp), %esp # back to espfix stack
23003 CFI_ADJUST_CFA_OFFSET -24
23004 jmp irq_return
23005 #endif
23006 CFI_ENDPROC
23007-END(nmi)
23008+ENDPROC(nmi)
23009
23010 ENTRY(int3)
23011 RING0_INT_FRAME
23012@@ -1411,14 +1660,14 @@ ENTRY(int3)
23013 call do_int3
23014 jmp ret_from_exception
23015 CFI_ENDPROC
23016-END(int3)
23017+ENDPROC(int3)
23018
23019 ENTRY(general_protection)
23020 RING0_EC_FRAME
23021 pushl_cfi $do_general_protection
23022 jmp error_code
23023 CFI_ENDPROC
23024-END(general_protection)
23025+ENDPROC(general_protection)
23026
23027 #ifdef CONFIG_KVM_GUEST
23028 ENTRY(async_page_fault)
23029@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23030 pushl_cfi $do_async_page_fault
23031 jmp error_code
23032 CFI_ENDPROC
23033-END(async_page_fault)
23034+ENDPROC(async_page_fault)
23035 #endif
23036
23037diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23038index f0095a7..ec77893 100644
23039--- a/arch/x86/kernel/entry_64.S
23040+++ b/arch/x86/kernel/entry_64.S
23041@@ -59,6 +59,8 @@
23042 #include <asm/smap.h>
23043 #include <asm/pgtable_types.h>
23044 #include <linux/err.h>
23045+#include <asm/pgtable.h>
23046+#include <asm/alternative-asm.h>
23047
23048 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23049 #include <linux/elf-em.h>
23050@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23051 ENDPROC(native_usergs_sysret64)
23052 #endif /* CONFIG_PARAVIRT */
23053
23054+ .macro ljmpq sel, off
23055+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23056+ .byte 0x48; ljmp *1234f(%rip)
23057+ .pushsection .rodata
23058+ .align 16
23059+ 1234: .quad \off; .word \sel
23060+ .popsection
23061+#else
23062+ pushq $\sel
23063+ pushq $\off
23064+ lretq
23065+#endif
23066+ .endm
23067+
23068+ .macro pax_enter_kernel
23069+ pax_set_fptr_mask
23070+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23071+ call pax_enter_kernel
23072+#endif
23073+ .endm
23074+
23075+ .macro pax_exit_kernel
23076+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23077+ call pax_exit_kernel
23078+#endif
23079+
23080+ .endm
23081+
23082+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23083+ENTRY(pax_enter_kernel)
23084+ pushq %rdi
23085+
23086+#ifdef CONFIG_PARAVIRT
23087+ PV_SAVE_REGS(CLBR_RDI)
23088+#endif
23089+
23090+#ifdef CONFIG_PAX_KERNEXEC
23091+ GET_CR0_INTO_RDI
23092+ bts $16,%rdi
23093+ jnc 3f
23094+ mov %cs,%edi
23095+ cmp $__KERNEL_CS,%edi
23096+ jnz 2f
23097+1:
23098+#endif
23099+
23100+#ifdef CONFIG_PAX_MEMORY_UDEREF
23101+ 661: jmp 111f
23102+ .pushsection .altinstr_replacement, "a"
23103+ 662: ASM_NOP2
23104+ .popsection
23105+ .pushsection .altinstructions, "a"
23106+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23107+ .popsection
23108+ GET_CR3_INTO_RDI
23109+ cmp $0,%dil
23110+ jnz 112f
23111+ mov $__KERNEL_DS,%edi
23112+ mov %edi,%ss
23113+ jmp 111f
23114+112: cmp $1,%dil
23115+ jz 113f
23116+ ud2
23117+113: sub $4097,%rdi
23118+ bts $63,%rdi
23119+ SET_RDI_INTO_CR3
23120+ mov $__UDEREF_KERNEL_DS,%edi
23121+ mov %edi,%ss
23122+111:
23123+#endif
23124+
23125+#ifdef CONFIG_PARAVIRT
23126+ PV_RESTORE_REGS(CLBR_RDI)
23127+#endif
23128+
23129+ popq %rdi
23130+ pax_force_retaddr
23131+ retq
23132+
23133+#ifdef CONFIG_PAX_KERNEXEC
23134+2: ljmpq __KERNEL_CS,1b
23135+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23136+4: SET_RDI_INTO_CR0
23137+ jmp 1b
23138+#endif
23139+ENDPROC(pax_enter_kernel)
23140+
23141+ENTRY(pax_exit_kernel)
23142+ pushq %rdi
23143+
23144+#ifdef CONFIG_PARAVIRT
23145+ PV_SAVE_REGS(CLBR_RDI)
23146+#endif
23147+
23148+#ifdef CONFIG_PAX_KERNEXEC
23149+ mov %cs,%rdi
23150+ cmp $__KERNEXEC_KERNEL_CS,%edi
23151+ jz 2f
23152+ GET_CR0_INTO_RDI
23153+ bts $16,%rdi
23154+ jnc 4f
23155+1:
23156+#endif
23157+
23158+#ifdef CONFIG_PAX_MEMORY_UDEREF
23159+ 661: jmp 111f
23160+ .pushsection .altinstr_replacement, "a"
23161+ 662: ASM_NOP2
23162+ .popsection
23163+ .pushsection .altinstructions, "a"
23164+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23165+ .popsection
23166+ mov %ss,%edi
23167+ cmp $__UDEREF_KERNEL_DS,%edi
23168+ jnz 111f
23169+ GET_CR3_INTO_RDI
23170+ cmp $0,%dil
23171+ jz 112f
23172+ ud2
23173+112: add $4097,%rdi
23174+ bts $63,%rdi
23175+ SET_RDI_INTO_CR3
23176+ mov $__KERNEL_DS,%edi
23177+ mov %edi,%ss
23178+111:
23179+#endif
23180+
23181+#ifdef CONFIG_PARAVIRT
23182+ PV_RESTORE_REGS(CLBR_RDI);
23183+#endif
23184+
23185+ popq %rdi
23186+ pax_force_retaddr
23187+ retq
23188+
23189+#ifdef CONFIG_PAX_KERNEXEC
23190+2: GET_CR0_INTO_RDI
23191+ btr $16,%rdi
23192+ jnc 4f
23193+ ljmpq __KERNEL_CS,3f
23194+3: SET_RDI_INTO_CR0
23195+ jmp 1b
23196+4: ud2
23197+ jmp 4b
23198+#endif
23199+ENDPROC(pax_exit_kernel)
23200+#endif
23201+
23202+ .macro pax_enter_kernel_user
23203+ pax_set_fptr_mask
23204+#ifdef CONFIG_PAX_MEMORY_UDEREF
23205+ call pax_enter_kernel_user
23206+#endif
23207+ .endm
23208+
23209+ .macro pax_exit_kernel_user
23210+#ifdef CONFIG_PAX_MEMORY_UDEREF
23211+ call pax_exit_kernel_user
23212+#endif
23213+#ifdef CONFIG_PAX_RANDKSTACK
23214+ pushq %rax
23215+ pushq %r11
23216+ call pax_randomize_kstack
23217+ popq %r11
23218+ popq %rax
23219+#endif
23220+ .endm
23221+
23222+#ifdef CONFIG_PAX_MEMORY_UDEREF
23223+ENTRY(pax_enter_kernel_user)
23224+ pushq %rdi
23225+ pushq %rbx
23226+
23227+#ifdef CONFIG_PARAVIRT
23228+ PV_SAVE_REGS(CLBR_RDI)
23229+#endif
23230+
23231+ 661: jmp 111f
23232+ .pushsection .altinstr_replacement, "a"
23233+ 662: ASM_NOP2
23234+ .popsection
23235+ .pushsection .altinstructions, "a"
23236+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23237+ .popsection
23238+ GET_CR3_INTO_RDI
23239+ cmp $1,%dil
23240+ jnz 4f
23241+ sub $4097,%rdi
23242+ bts $63,%rdi
23243+ SET_RDI_INTO_CR3
23244+ jmp 3f
23245+111:
23246+
23247+ GET_CR3_INTO_RDI
23248+ mov %rdi,%rbx
23249+ add $__START_KERNEL_map,%rbx
23250+ sub phys_base(%rip),%rbx
23251+
23252+#ifdef CONFIG_PARAVIRT
23253+ cmpl $0, pv_info+PARAVIRT_enabled
23254+ jz 1f
23255+ pushq %rdi
23256+ i = 0
23257+ .rept USER_PGD_PTRS
23258+ mov i*8(%rbx),%rsi
23259+ mov $0,%sil
23260+ lea i*8(%rbx),%rdi
23261+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23262+ i = i + 1
23263+ .endr
23264+ popq %rdi
23265+ jmp 2f
23266+1:
23267+#endif
23268+
23269+ i = 0
23270+ .rept USER_PGD_PTRS
23271+ movb $0,i*8(%rbx)
23272+ i = i + 1
23273+ .endr
23274+
23275+2: SET_RDI_INTO_CR3
23276+
23277+#ifdef CONFIG_PAX_KERNEXEC
23278+ GET_CR0_INTO_RDI
23279+ bts $16,%rdi
23280+ SET_RDI_INTO_CR0
23281+#endif
23282+
23283+3:
23284+
23285+#ifdef CONFIG_PARAVIRT
23286+ PV_RESTORE_REGS(CLBR_RDI)
23287+#endif
23288+
23289+ popq %rbx
23290+ popq %rdi
23291+ pax_force_retaddr
23292+ retq
23293+4: ud2
23294+ENDPROC(pax_enter_kernel_user)
23295+
23296+ENTRY(pax_exit_kernel_user)
23297+ pushq %rdi
23298+ pushq %rbx
23299+
23300+#ifdef CONFIG_PARAVIRT
23301+ PV_SAVE_REGS(CLBR_RDI)
23302+#endif
23303+
23304+ GET_CR3_INTO_RDI
23305+ 661: jmp 1f
23306+ .pushsection .altinstr_replacement, "a"
23307+ 662: ASM_NOP2
23308+ .popsection
23309+ .pushsection .altinstructions, "a"
23310+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23311+ .popsection
23312+ cmp $0,%dil
23313+ jnz 3f
23314+ add $4097,%rdi
23315+ bts $63,%rdi
23316+ SET_RDI_INTO_CR3
23317+ jmp 2f
23318+1:
23319+
23320+ mov %rdi,%rbx
23321+
23322+#ifdef CONFIG_PAX_KERNEXEC
23323+ GET_CR0_INTO_RDI
23324+ btr $16,%rdi
23325+ jnc 3f
23326+ SET_RDI_INTO_CR0
23327+#endif
23328+
23329+ add $__START_KERNEL_map,%rbx
23330+ sub phys_base(%rip),%rbx
23331+
23332+#ifdef CONFIG_PARAVIRT
23333+ cmpl $0, pv_info+PARAVIRT_enabled
23334+ jz 1f
23335+ i = 0
23336+ .rept USER_PGD_PTRS
23337+ mov i*8(%rbx),%rsi
23338+ mov $0x67,%sil
23339+ lea i*8(%rbx),%rdi
23340+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23341+ i = i + 1
23342+ .endr
23343+ jmp 2f
23344+1:
23345+#endif
23346+
23347+ i = 0
23348+ .rept USER_PGD_PTRS
23349+ movb $0x67,i*8(%rbx)
23350+ i = i + 1
23351+ .endr
23352+2:
23353+
23354+#ifdef CONFIG_PARAVIRT
23355+ PV_RESTORE_REGS(CLBR_RDI)
23356+#endif
23357+
23358+ popq %rbx
23359+ popq %rdi
23360+ pax_force_retaddr
23361+ retq
23362+3: ud2
23363+ENDPROC(pax_exit_kernel_user)
23364+#endif
23365+
23366+ .macro pax_enter_kernel_nmi
23367+ pax_set_fptr_mask
23368+
23369+#ifdef CONFIG_PAX_KERNEXEC
23370+ GET_CR0_INTO_RDI
23371+ bts $16,%rdi
23372+ jc 110f
23373+ SET_RDI_INTO_CR0
23374+ or $2,%ebx
23375+110:
23376+#endif
23377+
23378+#ifdef CONFIG_PAX_MEMORY_UDEREF
23379+ 661: jmp 111f
23380+ .pushsection .altinstr_replacement, "a"
23381+ 662: ASM_NOP2
23382+ .popsection
23383+ .pushsection .altinstructions, "a"
23384+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23385+ .popsection
23386+ GET_CR3_INTO_RDI
23387+ cmp $0,%dil
23388+ jz 111f
23389+ sub $4097,%rdi
23390+ or $4,%ebx
23391+ bts $63,%rdi
23392+ SET_RDI_INTO_CR3
23393+ mov $__UDEREF_KERNEL_DS,%edi
23394+ mov %edi,%ss
23395+111:
23396+#endif
23397+ .endm
23398+
23399+ .macro pax_exit_kernel_nmi
23400+#ifdef CONFIG_PAX_KERNEXEC
23401+ btr $1,%ebx
23402+ jnc 110f
23403+ GET_CR0_INTO_RDI
23404+ btr $16,%rdi
23405+ SET_RDI_INTO_CR0
23406+110:
23407+#endif
23408+
23409+#ifdef CONFIG_PAX_MEMORY_UDEREF
23410+ btr $2,%ebx
23411+ jnc 111f
23412+ GET_CR3_INTO_RDI
23413+ add $4097,%rdi
23414+ bts $63,%rdi
23415+ SET_RDI_INTO_CR3
23416+ mov $__KERNEL_DS,%edi
23417+ mov %edi,%ss
23418+111:
23419+#endif
23420+ .endm
23421+
23422+ .macro pax_erase_kstack
23423+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23424+ call pax_erase_kstack
23425+#endif
23426+ .endm
23427+
23428+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23429+ENTRY(pax_erase_kstack)
23430+ pushq %rdi
23431+ pushq %rcx
23432+ pushq %rax
23433+ pushq %r11
23434+
23435+ GET_THREAD_INFO(%r11)
23436+ mov TI_lowest_stack(%r11), %rdi
23437+ mov $-0xBEEF, %rax
23438+ std
23439+
23440+1: mov %edi, %ecx
23441+ and $THREAD_SIZE_asm - 1, %ecx
23442+ shr $3, %ecx
23443+ repne scasq
23444+ jecxz 2f
23445+
23446+ cmp $2*8, %ecx
23447+ jc 2f
23448+
23449+ mov $2*8, %ecx
23450+ repe scasq
23451+ jecxz 2f
23452+ jne 1b
23453+
23454+2: cld
23455+ or $2*8, %rdi
23456+ mov %esp, %ecx
23457+ sub %edi, %ecx
23458+
23459+ cmp $THREAD_SIZE_asm, %rcx
23460+ jb 3f
23461+ ud2
23462+3:
23463+
23464+ shr $3, %ecx
23465+ rep stosq
23466+
23467+ mov TI_task_thread_sp0(%r11), %rdi
23468+ sub $256, %rdi
23469+ mov %rdi, TI_lowest_stack(%r11)
23470+
23471+ popq %r11
23472+ popq %rax
23473+ popq %rcx
23474+ popq %rdi
23475+ pax_force_retaddr
23476+ ret
23477+ENDPROC(pax_erase_kstack)
23478+#endif
23479
23480 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23481 #ifdef CONFIG_TRACE_IRQFLAGS
23482@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23483 .endm
23484
23485 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23486- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23487+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23488 jnc 1f
23489 TRACE_IRQS_ON_DEBUG
23490 1:
23491@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23492 js 1f /* negative -> in kernel */
23493 SWAPGS
23494 xorl %ebx,%ebx
23495-1: ret
23496+1:
23497+#ifdef CONFIG_PAX_MEMORY_UDEREF
23498+ testb $3, CS+8(%rsp)
23499+ jnz 1f
23500+ pax_enter_kernel
23501+ jmp 2f
23502+1: pax_enter_kernel_user
23503+2:
23504+#else
23505+ pax_enter_kernel
23506+#endif
23507+ pax_force_retaddr
23508+ ret
23509 CFI_ENDPROC
23510-END(save_paranoid)
23511+ENDPROC(save_paranoid)
23512+
23513+ENTRY(save_paranoid_nmi)
23514+ XCPT_FRAME 1 RDI+8
23515+ cld
23516+ movq_cfi rdi, RDI+8
23517+ movq_cfi rsi, RSI+8
23518+ movq_cfi rdx, RDX+8
23519+ movq_cfi rcx, RCX+8
23520+ movq_cfi rax, RAX+8
23521+ movq_cfi r8, R8+8
23522+ movq_cfi r9, R9+8
23523+ movq_cfi r10, R10+8
23524+ movq_cfi r11, R11+8
23525+ movq_cfi rbx, RBX+8
23526+ movq_cfi rbp, RBP+8
23527+ movq_cfi r12, R12+8
23528+ movq_cfi r13, R13+8
23529+ movq_cfi r14, R14+8
23530+ movq_cfi r15, R15+8
23531+ movl $1,%ebx
23532+ movl $MSR_GS_BASE,%ecx
23533+ rdmsr
23534+ testl %edx,%edx
23535+ js 1f /* negative -> in kernel */
23536+ SWAPGS
23537+ xorl %ebx,%ebx
23538+1: pax_enter_kernel_nmi
23539+ pax_force_retaddr
23540+ ret
23541+ CFI_ENDPROC
23542+ENDPROC(save_paranoid_nmi)
23543
23544 /*
23545 * A newly forked process directly context switches into this address.
23546@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23547
23548 RESTORE_REST
23549
23550- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23551+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23552 jz 1f
23553
23554 /*
23555@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23556 jmp int_ret_from_sys_call
23557
23558 1:
23559- subq $REST_SKIP, %rsp # leave space for volatiles
23560- CFI_ADJUST_CFA_OFFSET REST_SKIP
23561 movq %rbp, %rdi
23562 call *%rbx
23563 movl $0, RAX(%rsp)
23564 RESTORE_REST
23565 jmp int_ret_from_sys_call
23566 CFI_ENDPROC
23567-END(ret_from_fork)
23568+ENDPROC(ret_from_fork)
23569
23570 /*
23571 * System call entry. Up to 6 arguments in registers are supported.
23572@@ -324,7 +792,7 @@ END(ret_from_fork)
23573 ENTRY(system_call)
23574 CFI_STARTPROC simple
23575 CFI_SIGNAL_FRAME
23576- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23577+ CFI_DEF_CFA rsp,0
23578 CFI_REGISTER rip,rcx
23579 /*CFI_REGISTER rflags,r11*/
23580 SWAPGS_UNSAFE_STACK
23581@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23582
23583 movq %rsp,PER_CPU_VAR(old_rsp)
23584 movq PER_CPU_VAR(kernel_stack),%rsp
23585+ SAVE_ARGS 8*6, 0, rax_enosys=1
23586+ pax_enter_kernel_user
23587+
23588+#ifdef CONFIG_PAX_RANDKSTACK
23589+ pax_erase_kstack
23590+#endif
23591+
23592 /*
23593 * No need to follow this irqs off/on section - it's straight
23594 * and short:
23595 */
23596 ENABLE_INTERRUPTS(CLBR_NONE)
23597- SAVE_ARGS 8, 0, rax_enosys=1
23598 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23599 movq %rcx,RIP-ARGOFFSET(%rsp)
23600 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23601- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23602+ GET_THREAD_INFO(%rcx)
23603+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23604 jnz tracesys
23605 system_call_fastpath:
23606 #if __SYSCALL_MASK == ~0
23607@@ -376,10 +851,13 @@ ret_from_sys_call:
23608 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23609 * very bad.
23610 */
23611- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23612+ GET_THREAD_INFO(%rcx)
23613+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23614 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23615
23616 CFI_REMEMBER_STATE
23617+ pax_exit_kernel_user
23618+ pax_erase_kstack
23619 /*
23620 * sysretq will re-enable interrupts:
23621 */
23622@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23623
23624 /* Do syscall tracing */
23625 tracesys:
23626- leaq -REST_SKIP(%rsp), %rdi
23627+ movq %rsp, %rdi
23628 movq $AUDIT_ARCH_X86_64, %rsi
23629 call syscall_trace_enter_phase1
23630 test %rax, %rax
23631 jnz tracesys_phase2 /* if needed, run the slow path */
23632- LOAD_ARGS 0 /* else restore clobbered regs */
23633+
23634+ pax_erase_kstack
23635+
23636+ LOAD_ARGS /* else restore clobbered regs */
23637 jmp system_call_fastpath /* and return to the fast path */
23638
23639 tracesys_phase2:
23640@@ -415,12 +896,14 @@ tracesys_phase2:
23641 movq %rax,%rdx
23642 call syscall_trace_enter_phase2
23643
23644+ pax_erase_kstack
23645+
23646 /*
23647 * Reload arg registers from stack in case ptrace changed them.
23648 * We don't reload %rax because syscall_trace_entry_phase2() returned
23649 * the value it wants us to use in the table lookup.
23650 */
23651- LOAD_ARGS ARGOFFSET, 1
23652+ LOAD_ARGS 1
23653 RESTORE_REST
23654 #if __SYSCALL_MASK == ~0
23655 cmpq $__NR_syscall_max,%rax
23656@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23657 andl %edi,%edx
23658 jnz int_careful
23659 andl $~TS_COMPAT,TI_status(%rcx)
23660- jmp retint_swapgs
23661+ pax_exit_kernel_user
23662+ pax_erase_kstack
23663+ jmp retint_swapgs_pax
23664
23665 /* Either reschedule or signal or syscall exit tracking needed. */
23666 /* First do a reschedule test. */
23667@@ -497,7 +982,7 @@ int_restore_rest:
23668 TRACE_IRQS_OFF
23669 jmp int_with_check
23670 CFI_ENDPROC
23671-END(system_call)
23672+ENDPROC(system_call)
23673
23674 .macro FORK_LIKE func
23675 ENTRY(stub_\func)
23676@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23677 DEFAULT_FRAME 0 8 /* offset 8: return address */
23678 call sys_\func
23679 RESTORE_TOP_OF_STACK %r11, 8
23680- ret $REST_SKIP /* pop extended registers */
23681+ pax_force_retaddr
23682+ ret
23683 CFI_ENDPROC
23684-END(stub_\func)
23685+ENDPROC(stub_\func)
23686 .endm
23687
23688 .macro FIXED_FRAME label,func
23689@@ -522,9 +1008,10 @@ ENTRY(\label)
23690 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23691 call \func
23692 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23693+ pax_force_retaddr
23694 ret
23695 CFI_ENDPROC
23696-END(\label)
23697+ENDPROC(\label)
23698 .endm
23699
23700 FORK_LIKE clone
23701@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23702 RESTORE_REST
23703 jmp int_ret_from_sys_call
23704 CFI_ENDPROC
23705-END(stub_execve)
23706+ENDPROC(stub_execve)
23707
23708 ENTRY(stub_execveat)
23709 CFI_STARTPROC
23710@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23711 RESTORE_REST
23712 jmp int_ret_from_sys_call
23713 CFI_ENDPROC
23714-END(stub_execveat)
23715+ENDPROC(stub_execveat)
23716
23717 /*
23718 * sigreturn is special because it needs to restore all registers on return.
23719@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23720 RESTORE_REST
23721 jmp int_ret_from_sys_call
23722 CFI_ENDPROC
23723-END(stub_rt_sigreturn)
23724+ENDPROC(stub_rt_sigreturn)
23725
23726 #ifdef CONFIG_X86_X32_ABI
23727 ENTRY(stub_x32_rt_sigreturn)
23728@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23729 RESTORE_REST
23730 jmp int_ret_from_sys_call
23731 CFI_ENDPROC
23732-END(stub_x32_rt_sigreturn)
23733+ENDPROC(stub_x32_rt_sigreturn)
23734
23735 ENTRY(stub_x32_execve)
23736 CFI_STARTPROC
23737@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23738 RESTORE_REST
23739 jmp int_ret_from_sys_call
23740 CFI_ENDPROC
23741-END(stub_x32_execve)
23742+ENDPROC(stub_x32_execve)
23743
23744 ENTRY(stub_x32_execveat)
23745 CFI_STARTPROC
23746@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23747 RESTORE_REST
23748 jmp int_ret_from_sys_call
23749 CFI_ENDPROC
23750-END(stub_x32_execveat)
23751+ENDPROC(stub_x32_execveat)
23752
23753 #endif
23754
23755@@ -653,7 +1140,7 @@ vector=vector+1
23756 2: jmp common_interrupt
23757 .endr
23758 CFI_ENDPROC
23759-END(irq_entries_start)
23760+ENDPROC(irq_entries_start)
23761
23762 .previous
23763 END(interrupt)
23764@@ -670,28 +1157,29 @@ END(interrupt)
23765 /* 0(%rsp): ~(interrupt number) */
23766 .macro interrupt func
23767 /* reserve pt_regs for scratch regs and rbp */
23768- subq $ORIG_RAX-RBP, %rsp
23769- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23770+ subq $ORIG_RAX, %rsp
23771+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23772 cld
23773- /* start from rbp in pt_regs and jump over */
23774- movq_cfi rdi, (RDI-RBP)
23775- movq_cfi rsi, (RSI-RBP)
23776- movq_cfi rdx, (RDX-RBP)
23777- movq_cfi rcx, (RCX-RBP)
23778- movq_cfi rax, (RAX-RBP)
23779- movq_cfi r8, (R8-RBP)
23780- movq_cfi r9, (R9-RBP)
23781- movq_cfi r10, (R10-RBP)
23782- movq_cfi r11, (R11-RBP)
23783+ /* start from r15 in pt_regs and jump over */
23784+ movq_cfi rdi, RDI
23785+ movq_cfi rsi, RSI
23786+ movq_cfi rdx, RDX
23787+ movq_cfi rcx, RCX
23788+ movq_cfi rax, RAX
23789+ movq_cfi r8, R8
23790+ movq_cfi r9, R9
23791+ movq_cfi r10, R10
23792+ movq_cfi r11, R11
23793+ movq_cfi r12, R12
23794
23795 /* Save rbp so that we can unwind from get_irq_regs() */
23796- movq_cfi rbp, 0
23797+ movq_cfi rbp, RBP
23798
23799 /* Save previous stack value */
23800 movq %rsp, %rsi
23801
23802- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23803- testl $3, CS-RBP(%rsi)
23804+ movq %rsp,%rdi /* arg1 for handler */
23805+ testb $3, CS(%rsi)
23806 je 1f
23807 SWAPGS
23808 /*
23809@@ -711,6 +1199,18 @@ END(interrupt)
23810 0x06 /* DW_OP_deref */, \
23811 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23812 0x22 /* DW_OP_plus */
23813+
23814+#ifdef CONFIG_PAX_MEMORY_UDEREF
23815+ testb $3, CS(%rdi)
23816+ jnz 1f
23817+ pax_enter_kernel
23818+ jmp 2f
23819+1: pax_enter_kernel_user
23820+2:
23821+#else
23822+ pax_enter_kernel
23823+#endif
23824+
23825 /* We entered an interrupt context - irqs are off: */
23826 TRACE_IRQS_OFF
23827
23828@@ -735,14 +1235,14 @@ ret_from_intr:
23829
23830 /* Restore saved previous stack */
23831 popq %rsi
23832- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23833- leaq ARGOFFSET-RBP(%rsi), %rsp
23834+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23835+ movq %rsi, %rsp
23836 CFI_DEF_CFA_REGISTER rsp
23837- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23838+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23839
23840 exit_intr:
23841 GET_THREAD_INFO(%rcx)
23842- testl $3,CS-ARGOFFSET(%rsp)
23843+ testb $3,CS-ARGOFFSET(%rsp)
23844 je retint_kernel
23845
23846 /* Interrupt came from user space */
23847@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23848 * The iretq could re-enable interrupts:
23849 */
23850 DISABLE_INTERRUPTS(CLBR_ANY)
23851+ pax_exit_kernel_user
23852+retint_swapgs_pax:
23853 TRACE_IRQS_IRETQ
23854
23855 /*
23856 * Try to use SYSRET instead of IRET if we're returning to
23857 * a completely clean 64-bit userspace context.
23858 */
23859- movq (RCX-R11)(%rsp), %rcx
23860- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23861+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23862+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23863 jne opportunistic_sysret_failed
23864
23865 /*
23866@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23867 shr $__VIRTUAL_MASK_SHIFT, %rcx
23868 jnz opportunistic_sysret_failed
23869
23870- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23871+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23872 jne opportunistic_sysret_failed
23873
23874 movq (R11-ARGOFFSET)(%rsp), %r11
23875@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23876
23877 retint_restore_args: /* return to kernel space */
23878 DISABLE_INTERRUPTS(CLBR_ANY)
23879+ pax_exit_kernel
23880+
23881+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23882+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23883+ * namely calling EFI runtime services with a phys mapping. We're
23884+ * starting off with NOPs and patch in the real instrumentation
23885+ * (BTS/OR) before starting any userland process; even before starting
23886+ * up the APs.
23887+ */
23888+ .pushsection .altinstr_replacement, "a"
23889+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23890+ 602:
23891+ .popsection
23892+ 603: .fill 602b-601b, 1, 0x90
23893+ .pushsection .altinstructions, "a"
23894+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23895+ .popsection
23896+#else
23897+ pax_force_retaddr (RIP-ARGOFFSET)
23898+#endif
23899+
23900 /*
23901 * The iretq could re-enable interrupts:
23902 */
23903@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23904 SWAPGS
23905 movq PER_CPU_VAR(espfix_waddr),%rdi
23906 movq %rax,(0*8)(%rdi) /* RAX */
23907- movq (2*8)(%rsp),%rax /* RIP */
23908+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23909 movq %rax,(1*8)(%rdi)
23910- movq (3*8)(%rsp),%rax /* CS */
23911+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23912 movq %rax,(2*8)(%rdi)
23913- movq (4*8)(%rsp),%rax /* RFLAGS */
23914+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23915 movq %rax,(3*8)(%rdi)
23916- movq (6*8)(%rsp),%rax /* SS */
23917+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23918 movq %rax,(5*8)(%rdi)
23919- movq (5*8)(%rsp),%rax /* RSP */
23920+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23921 movq %rax,(4*8)(%rdi)
23922 andl $0xffff0000,%eax
23923 popq_cfi %rdi
23924@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23925 jmp exit_intr
23926 #endif
23927 CFI_ENDPROC
23928-END(common_interrupt)
23929+ENDPROC(common_interrupt)
23930
23931 /*
23932 * APIC interrupts.
23933@@ -951,7 +1474,7 @@ ENTRY(\sym)
23934 interrupt \do_sym
23935 jmp ret_from_intr
23936 CFI_ENDPROC
23937-END(\sym)
23938+ENDPROC(\sym)
23939 .endm
23940
23941 #ifdef CONFIG_TRACING
23942@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23943 /*
23944 * Exception entry points.
23945 */
23946-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23947+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23948
23949 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23950 ENTRY(\sym)
23951@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23952 .endif
23953
23954 .if \shift_ist != -1
23955+#ifdef CONFIG_SMP
23956+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23957+ lea init_tss(%r13), %r13
23958+#else
23959+ lea init_tss(%rip), %r13
23960+#endif
23961 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23962 .endif
23963
23964@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23965 .endif
23966
23967 CFI_ENDPROC
23968-END(\sym)
23969+ENDPROC(\sym)
23970 .endm
23971
23972 #ifdef CONFIG_TRACING
23973@@ -1167,9 +1696,10 @@ gs_change:
23974 2: mfence /* workaround */
23975 SWAPGS
23976 popfq_cfi
23977+ pax_force_retaddr
23978 ret
23979 CFI_ENDPROC
23980-END(native_load_gs_index)
23981+ENDPROC(native_load_gs_index)
23982
23983 _ASM_EXTABLE(gs_change,bad_gs)
23984 .section .fixup,"ax"
23985@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
23986 CFI_DEF_CFA_REGISTER rsp
23987 CFI_ADJUST_CFA_OFFSET -8
23988 decl PER_CPU_VAR(irq_count)
23989+ pax_force_retaddr
23990 ret
23991 CFI_ENDPROC
23992-END(do_softirq_own_stack)
23993+ENDPROC(do_softirq_own_stack)
23994
23995 #ifdef CONFIG_XEN
23996 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
23997@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23998 #endif
23999 jmp error_exit
24000 CFI_ENDPROC
24001-END(xen_do_hypervisor_callback)
24002+ENDPROC(xen_do_hypervisor_callback)
24003
24004 /*
24005 * Hypervisor uses this for application faults while it executes.
24006@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24007 SAVE_ALL
24008 jmp error_exit
24009 CFI_ENDPROC
24010-END(xen_failsafe_callback)
24011+ENDPROC(xen_failsafe_callback)
24012
24013 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24014 xen_hvm_callback_vector xen_evtchn_do_upcall
24015@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24016 DEFAULT_FRAME
24017 DISABLE_INTERRUPTS(CLBR_NONE)
24018 TRACE_IRQS_OFF_DEBUG
24019- testl %ebx,%ebx /* swapgs needed? */
24020+ testl $1,%ebx /* swapgs needed? */
24021 jnz paranoid_restore
24022+#ifdef CONFIG_PAX_MEMORY_UDEREF
24023+ pax_exit_kernel_user
24024+#else
24025+ pax_exit_kernel
24026+#endif
24027 TRACE_IRQS_IRETQ 0
24028 SWAPGS_UNSAFE_STACK
24029 RESTORE_ALL 8
24030 INTERRUPT_RETURN
24031 paranoid_restore:
24032+ pax_exit_kernel
24033 TRACE_IRQS_IRETQ_DEBUG 0
24034 RESTORE_ALL 8
24035+ pax_force_retaddr_bts
24036 INTERRUPT_RETURN
24037 CFI_ENDPROC
24038-END(paranoid_exit)
24039+ENDPROC(paranoid_exit)
24040
24041 /*
24042 * Exception entry point. This expects an error code/orig_rax on the stack.
24043@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24044 movq %r14, R14+8(%rsp)
24045 movq %r15, R15+8(%rsp)
24046 xorl %ebx,%ebx
24047- testl $3,CS+8(%rsp)
24048+ testb $3,CS+8(%rsp)
24049 je error_kernelspace
24050 error_swapgs:
24051 SWAPGS
24052 error_sti:
24053+#ifdef CONFIG_PAX_MEMORY_UDEREF
24054+ testb $3, CS+8(%rsp)
24055+ jnz 1f
24056+ pax_enter_kernel
24057+ jmp 2f
24058+1: pax_enter_kernel_user
24059+2:
24060+#else
24061+ pax_enter_kernel
24062+#endif
24063 TRACE_IRQS_OFF
24064+ pax_force_retaddr
24065 ret
24066
24067 /*
24068@@ -1422,7 +1971,7 @@ error_bad_iret:
24069 decl %ebx /* Return to usergs */
24070 jmp error_sti
24071 CFI_ENDPROC
24072-END(error_entry)
24073+ENDPROC(error_entry)
24074
24075
24076 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24077@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24078 DISABLE_INTERRUPTS(CLBR_NONE)
24079 TRACE_IRQS_OFF
24080 GET_THREAD_INFO(%rcx)
24081- testl %eax,%eax
24082+ testl $1,%eax
24083 jne retint_kernel
24084 LOCKDEP_SYS_EXIT_IRQ
24085 movl TI_flags(%rcx),%edx
24086@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24087 jnz retint_careful
24088 jmp retint_swapgs
24089 CFI_ENDPROC
24090-END(error_exit)
24091+ENDPROC(error_exit)
24092
24093 /*
24094 * Test if a given stack is an NMI stack or not.
24095@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24096 * If %cs was not the kernel segment, then the NMI triggered in user
24097 * space, which means it is definitely not nested.
24098 */
24099+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24100+ je 1f
24101 cmpl $__KERNEL_CS, 16(%rsp)
24102 jne first_nmi
24103-
24104+1:
24105 /*
24106 * Check the special variable on the stack to see if NMIs are
24107 * executing.
24108@@ -1536,8 +2087,7 @@ nested_nmi:
24109
24110 1:
24111 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24112- leaq -1*8(%rsp), %rdx
24113- movq %rdx, %rsp
24114+ subq $8, %rsp
24115 CFI_ADJUST_CFA_OFFSET 1*8
24116 leaq -10*8(%rsp), %rdx
24117 pushq_cfi $__KERNEL_DS
24118@@ -1555,6 +2105,7 @@ nested_nmi_out:
24119 CFI_RESTORE rdx
24120
24121 /* No need to check faults here */
24122+# pax_force_retaddr_bts
24123 INTERRUPT_RETURN
24124
24125 CFI_RESTORE_STATE
24126@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24127 subq $ORIG_RAX-R15, %rsp
24128 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24129 /*
24130- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24131+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24132 * as we should not be calling schedule in NMI context.
24133 * Even with normal interrupts enabled. An NMI should not be
24134 * setting NEED_RESCHED or anything that normal interrupts and
24135 * exceptions might do.
24136 */
24137- call save_paranoid
24138+ call save_paranoid_nmi
24139 DEFAULT_FRAME 0
24140
24141 /*
24142@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24143 * NMI itself takes a page fault, the page fault that was preempted
24144 * will read the information from the NMI page fault and not the
24145 * origin fault. Save it off and restore it if it changes.
24146- * Use the r12 callee-saved register.
24147+ * Use the r13 callee-saved register.
24148 */
24149- movq %cr2, %r12
24150+ movq %cr2, %r13
24151
24152 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24153 movq %rsp,%rdi
24154@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24155
24156 /* Did the NMI take a page fault? Restore cr2 if it did */
24157 movq %cr2, %rcx
24158- cmpq %rcx, %r12
24159+ cmpq %rcx, %r13
24160 je 1f
24161- movq %r12, %cr2
24162+ movq %r13, %cr2
24163 1:
24164
24165- testl %ebx,%ebx /* swapgs needed? */
24166+ testl $1,%ebx /* swapgs needed? */
24167 jnz nmi_restore
24168 nmi_swapgs:
24169 SWAPGS_UNSAFE_STACK
24170 nmi_restore:
24171+ pax_exit_kernel_nmi
24172 /* Pop the extra iret frame at once */
24173 RESTORE_ALL 6*8
24174+ testb $3, 8(%rsp)
24175+ jnz 1f
24176+ pax_force_retaddr_bts
24177+1:
24178
24179 /* Clear the NMI executing stack variable */
24180 movq $0, 5*8(%rsp)
24181 jmp irq_return
24182 CFI_ENDPROC
24183-END(nmi)
24184+ENDPROC(nmi)
24185
24186 ENTRY(ignore_sysret)
24187 CFI_STARTPROC
24188 mov $-ENOSYS,%eax
24189 sysret
24190 CFI_ENDPROC
24191-END(ignore_sysret)
24192+ENDPROC(ignore_sysret)
24193
24194diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24195index f5d0730..5bce89c 100644
24196--- a/arch/x86/kernel/espfix_64.c
24197+++ b/arch/x86/kernel/espfix_64.c
24198@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24199 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24200 static void *espfix_pages[ESPFIX_MAX_PAGES];
24201
24202-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24203- __aligned(PAGE_SIZE);
24204+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24205
24206 static unsigned int page_random, slot_random;
24207
24208@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24209 void __init init_espfix_bsp(void)
24210 {
24211 pgd_t *pgd_p;
24212+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24213
24214 /* Install the espfix pud into the kernel page directory */
24215- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24216+ pgd_p = &init_level4_pgt[index];
24217 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24218
24219+#ifdef CONFIG_PAX_PER_CPU_PGD
24220+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24221+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24222+#endif
24223+
24224 /* Randomize the locations */
24225 init_espfix_random();
24226
24227@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24228 set_pte(&pte_p[n*PTE_STRIDE], pte);
24229
24230 /* Job is done for this CPU and any CPU which shares this page */
24231- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24232+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24233
24234 unlock_done:
24235 mutex_unlock(&espfix_init_mutex);
24236diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24237index 8b7b0a5..2395f29 100644
24238--- a/arch/x86/kernel/ftrace.c
24239+++ b/arch/x86/kernel/ftrace.c
24240@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24241 * kernel identity mapping to modify code.
24242 */
24243 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24244- ip = (unsigned long)__va(__pa_symbol(ip));
24245+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24246
24247 return ip;
24248 }
24249@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24250 {
24251 unsigned char replaced[MCOUNT_INSN_SIZE];
24252
24253+ ip = ktla_ktva(ip);
24254+
24255 /*
24256 * Note: Due to modules and __init, code can
24257 * disappear and change, we need to protect against faulting
24258@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24259 unsigned char old[MCOUNT_INSN_SIZE];
24260 int ret;
24261
24262- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24263+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24264
24265 ftrace_update_func = ip;
24266 /* Make sure the breakpoints see the ftrace_update_func update */
24267@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24268 unsigned char replaced[MCOUNT_INSN_SIZE];
24269 unsigned char brk = BREAKPOINT_INSTRUCTION;
24270
24271- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24272+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24273 return -EFAULT;
24274
24275 /* Make sure it is what we expect it to be */
24276diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24277index c4f8d46..2d63ae2 100644
24278--- a/arch/x86/kernel/head64.c
24279+++ b/arch/x86/kernel/head64.c
24280@@ -68,12 +68,12 @@ again:
24281 pgd = *pgd_p;
24282
24283 /*
24284- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24285- * critical -- __PAGE_OFFSET would point us back into the dynamic
24286+ * The use of __early_va rather than __va here is critical:
24287+ * __va would point us back into the dynamic
24288 * range and we might end up looping forever...
24289 */
24290 if (pgd)
24291- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24292+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24293 else {
24294 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24295 reset_early_page_tables();
24296@@ -83,13 +83,13 @@ again:
24297 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24298 for (i = 0; i < PTRS_PER_PUD; i++)
24299 pud_p[i] = 0;
24300- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24301+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24302 }
24303 pud_p += pud_index(address);
24304 pud = *pud_p;
24305
24306 if (pud)
24307- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24308+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24309 else {
24310 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24311 reset_early_page_tables();
24312@@ -99,7 +99,7 @@ again:
24313 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24314 for (i = 0; i < PTRS_PER_PMD; i++)
24315 pmd_p[i] = 0;
24316- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24317+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24318 }
24319 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24320 pmd_p[pmd_index(address)] = pmd;
24321@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24322 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24323 early_printk("Kernel alive\n");
24324
24325- clear_page(init_level4_pgt);
24326 /* set init_level4_pgt kernel high mapping*/
24327 init_level4_pgt[511] = early_level4_pgt[511];
24328
24329diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24330index f36bd42..0ab4474 100644
24331--- a/arch/x86/kernel/head_32.S
24332+++ b/arch/x86/kernel/head_32.S
24333@@ -26,6 +26,12 @@
24334 /* Physical address */
24335 #define pa(X) ((X) - __PAGE_OFFSET)
24336
24337+#ifdef CONFIG_PAX_KERNEXEC
24338+#define ta(X) (X)
24339+#else
24340+#define ta(X) ((X) - __PAGE_OFFSET)
24341+#endif
24342+
24343 /*
24344 * References to members of the new_cpu_data structure.
24345 */
24346@@ -55,11 +61,7 @@
24347 * and small than max_low_pfn, otherwise will waste some page table entries
24348 */
24349
24350-#if PTRS_PER_PMD > 1
24351-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24352-#else
24353-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24354-#endif
24355+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24356
24357 /* Number of possible pages in the lowmem region */
24358 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24359@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24360 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24361
24362 /*
24363+ * Real beginning of normal "text" segment
24364+ */
24365+ENTRY(stext)
24366+ENTRY(_stext)
24367+
24368+/*
24369 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24370 * %esi points to the real-mode code as a 32-bit pointer.
24371 * CS and DS must be 4 GB flat segments, but we don't depend on
24372@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24373 * can.
24374 */
24375 __HEAD
24376+
24377+#ifdef CONFIG_PAX_KERNEXEC
24378+ jmp startup_32
24379+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24380+.fill PAGE_SIZE-5,1,0xcc
24381+#endif
24382+
24383 ENTRY(startup_32)
24384 movl pa(stack_start),%ecx
24385
24386@@ -106,6 +121,59 @@ ENTRY(startup_32)
24387 2:
24388 leal -__PAGE_OFFSET(%ecx),%esp
24389
24390+#ifdef CONFIG_SMP
24391+ movl $pa(cpu_gdt_table),%edi
24392+ movl $__per_cpu_load,%eax
24393+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24394+ rorl $16,%eax
24395+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24396+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24397+ movl $__per_cpu_end - 1,%eax
24398+ subl $__per_cpu_start,%eax
24399+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24400+#endif
24401+
24402+#ifdef CONFIG_PAX_MEMORY_UDEREF
24403+ movl $NR_CPUS,%ecx
24404+ movl $pa(cpu_gdt_table),%edi
24405+1:
24406+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24407+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24408+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24409+ addl $PAGE_SIZE_asm,%edi
24410+ loop 1b
24411+#endif
24412+
24413+#ifdef CONFIG_PAX_KERNEXEC
24414+ movl $pa(boot_gdt),%edi
24415+ movl $__LOAD_PHYSICAL_ADDR,%eax
24416+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24417+ rorl $16,%eax
24418+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24419+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24420+ rorl $16,%eax
24421+
24422+ ljmp $(__BOOT_CS),$1f
24423+1:
24424+
24425+ movl $NR_CPUS,%ecx
24426+ movl $pa(cpu_gdt_table),%edi
24427+ addl $__PAGE_OFFSET,%eax
24428+1:
24429+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24430+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24431+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24432+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24433+ rorl $16,%eax
24434+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24435+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24436+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24437+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24438+ rorl $16,%eax
24439+ addl $PAGE_SIZE_asm,%edi
24440+ loop 1b
24441+#endif
24442+
24443 /*
24444 * Clear BSS first so that there are no surprises...
24445 */
24446@@ -201,8 +269,11 @@ ENTRY(startup_32)
24447 movl %eax, pa(max_pfn_mapped)
24448
24449 /* Do early initialization of the fixmap area */
24450- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24451- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24452+#ifdef CONFIG_COMPAT_VDSO
24453+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24454+#else
24455+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24456+#endif
24457 #else /* Not PAE */
24458
24459 page_pde_offset = (__PAGE_OFFSET >> 20);
24460@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24461 movl %eax, pa(max_pfn_mapped)
24462
24463 /* Do early initialization of the fixmap area */
24464- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24465- movl %eax,pa(initial_page_table+0xffc)
24466+#ifdef CONFIG_COMPAT_VDSO
24467+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24468+#else
24469+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24470+#endif
24471 #endif
24472
24473 #ifdef CONFIG_PARAVIRT
24474@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24475 cmpl $num_subarch_entries, %eax
24476 jae bad_subarch
24477
24478- movl pa(subarch_entries)(,%eax,4), %eax
24479- subl $__PAGE_OFFSET, %eax
24480- jmp *%eax
24481+ jmp *pa(subarch_entries)(,%eax,4)
24482
24483 bad_subarch:
24484 WEAK(lguest_entry)
24485@@ -261,10 +333,10 @@ WEAK(xen_entry)
24486 __INITDATA
24487
24488 subarch_entries:
24489- .long default_entry /* normal x86/PC */
24490- .long lguest_entry /* lguest hypervisor */
24491- .long xen_entry /* Xen hypervisor */
24492- .long default_entry /* Moorestown MID */
24493+ .long ta(default_entry) /* normal x86/PC */
24494+ .long ta(lguest_entry) /* lguest hypervisor */
24495+ .long ta(xen_entry) /* Xen hypervisor */
24496+ .long ta(default_entry) /* Moorestown MID */
24497 num_subarch_entries = (. - subarch_entries) / 4
24498 .previous
24499 #else
24500@@ -354,6 +426,7 @@ default_entry:
24501 movl pa(mmu_cr4_features),%eax
24502 movl %eax,%cr4
24503
24504+#ifdef CONFIG_X86_PAE
24505 testb $X86_CR4_PAE, %al # check if PAE is enabled
24506 jz enable_paging
24507
24508@@ -382,6 +455,9 @@ default_entry:
24509 /* Make changes effective */
24510 wrmsr
24511
24512+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24513+#endif
24514+
24515 enable_paging:
24516
24517 /*
24518@@ -449,14 +525,20 @@ is486:
24519 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24520 movl %eax,%ss # after changing gdt.
24521
24522- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24523+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24524 movl %eax,%ds
24525 movl %eax,%es
24526
24527 movl $(__KERNEL_PERCPU), %eax
24528 movl %eax,%fs # set this cpu's percpu
24529
24530+#ifdef CONFIG_CC_STACKPROTECTOR
24531 movl $(__KERNEL_STACK_CANARY),%eax
24532+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24533+ movl $(__USER_DS),%eax
24534+#else
24535+ xorl %eax,%eax
24536+#endif
24537 movl %eax,%gs
24538
24539 xorl %eax,%eax # Clear LDT
24540@@ -512,8 +594,11 @@ setup_once:
24541 * relocation. Manually set base address in stack canary
24542 * segment descriptor.
24543 */
24544- movl $gdt_page,%eax
24545+ movl $cpu_gdt_table,%eax
24546 movl $stack_canary,%ecx
24547+#ifdef CONFIG_SMP
24548+ addl $__per_cpu_load,%ecx
24549+#endif
24550 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24551 shrl $16, %ecx
24552 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24553@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24554 cmpl $2,(%esp) # X86_TRAP_NMI
24555 je is_nmi # Ignore NMI
24556
24557- cmpl $2,%ss:early_recursion_flag
24558+ cmpl $1,%ss:early_recursion_flag
24559 je hlt_loop
24560 incl %ss:early_recursion_flag
24561
24562@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24563 pushl (20+6*4)(%esp) /* trapno */
24564 pushl $fault_msg
24565 call printk
24566-#endif
24567 call dump_stack
24568+#endif
24569 hlt_loop:
24570 hlt
24571 jmp hlt_loop
24572@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24573 /* This is the default interrupt "handler" :-) */
24574 ALIGN
24575 ignore_int:
24576- cld
24577 #ifdef CONFIG_PRINTK
24578+ cmpl $2,%ss:early_recursion_flag
24579+ je hlt_loop
24580+ incl %ss:early_recursion_flag
24581+ cld
24582 pushl %eax
24583 pushl %ecx
24584 pushl %edx
24585@@ -617,9 +705,6 @@ ignore_int:
24586 movl $(__KERNEL_DS),%eax
24587 movl %eax,%ds
24588 movl %eax,%es
24589- cmpl $2,early_recursion_flag
24590- je hlt_loop
24591- incl early_recursion_flag
24592 pushl 16(%esp)
24593 pushl 24(%esp)
24594 pushl 32(%esp)
24595@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24596 /*
24597 * BSS section
24598 */
24599-__PAGE_ALIGNED_BSS
24600- .align PAGE_SIZE
24601 #ifdef CONFIG_X86_PAE
24602+.section .initial_pg_pmd,"a",@progbits
24603 initial_pg_pmd:
24604 .fill 1024*KPMDS,4,0
24605 #else
24606+.section .initial_page_table,"a",@progbits
24607 ENTRY(initial_page_table)
24608 .fill 1024,4,0
24609 #endif
24610+.section .initial_pg_fixmap,"a",@progbits
24611 initial_pg_fixmap:
24612 .fill 1024,4,0
24613+.section .empty_zero_page,"a",@progbits
24614 ENTRY(empty_zero_page)
24615 .fill 4096,1,0
24616+.section .swapper_pg_dir,"a",@progbits
24617 ENTRY(swapper_pg_dir)
24618+#ifdef CONFIG_X86_PAE
24619+ .fill 4,8,0
24620+#else
24621 .fill 1024,4,0
24622+#endif
24623
24624 /*
24625 * This starts the data section.
24626 */
24627 #ifdef CONFIG_X86_PAE
24628-__PAGE_ALIGNED_DATA
24629- /* Page-aligned for the benefit of paravirt? */
24630- .align PAGE_SIZE
24631+.section .initial_page_table,"a",@progbits
24632 ENTRY(initial_page_table)
24633 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24634 # if KPMDS == 3
24635@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24636 # error "Kernel PMDs should be 1, 2 or 3"
24637 # endif
24638 .align PAGE_SIZE /* needs to be page-sized too */
24639+
24640+#ifdef CONFIG_PAX_PER_CPU_PGD
24641+ENTRY(cpu_pgd)
24642+ .rept 2*NR_CPUS
24643+ .fill 4,8,0
24644+ .endr
24645+#endif
24646+
24647 #endif
24648
24649 .data
24650 .balign 4
24651 ENTRY(stack_start)
24652- .long init_thread_union+THREAD_SIZE
24653+ .long init_thread_union+THREAD_SIZE-8
24654
24655 __INITRODATA
24656 int_msg:
24657@@ -727,7 +825,7 @@ fault_msg:
24658 * segment size, and 32-bit linear address value:
24659 */
24660
24661- .data
24662+.section .rodata,"a",@progbits
24663 .globl boot_gdt_descr
24664 .globl idt_descr
24665
24666@@ -736,7 +834,7 @@ fault_msg:
24667 .word 0 # 32 bit align gdt_desc.address
24668 boot_gdt_descr:
24669 .word __BOOT_DS+7
24670- .long boot_gdt - __PAGE_OFFSET
24671+ .long pa(boot_gdt)
24672
24673 .word 0 # 32-bit align idt_desc.address
24674 idt_descr:
24675@@ -747,7 +845,7 @@ idt_descr:
24676 .word 0 # 32 bit align gdt_desc.address
24677 ENTRY(early_gdt_descr)
24678 .word GDT_ENTRIES*8-1
24679- .long gdt_page /* Overwritten for secondary CPUs */
24680+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24681
24682 /*
24683 * The boot_gdt must mirror the equivalent in setup.S and is
24684@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24685 .align L1_CACHE_BYTES
24686 ENTRY(boot_gdt)
24687 .fill GDT_ENTRY_BOOT_CS,8,0
24688- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24689- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24690+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24691+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24692+
24693+ .align PAGE_SIZE_asm
24694+ENTRY(cpu_gdt_table)
24695+ .rept NR_CPUS
24696+ .quad 0x0000000000000000 /* NULL descriptor */
24697+ .quad 0x0000000000000000 /* 0x0b reserved */
24698+ .quad 0x0000000000000000 /* 0x13 reserved */
24699+ .quad 0x0000000000000000 /* 0x1b reserved */
24700+
24701+#ifdef CONFIG_PAX_KERNEXEC
24702+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24703+#else
24704+ .quad 0x0000000000000000 /* 0x20 unused */
24705+#endif
24706+
24707+ .quad 0x0000000000000000 /* 0x28 unused */
24708+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24709+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24710+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24711+ .quad 0x0000000000000000 /* 0x4b reserved */
24712+ .quad 0x0000000000000000 /* 0x53 reserved */
24713+ .quad 0x0000000000000000 /* 0x5b reserved */
24714+
24715+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24716+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24717+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24718+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24719+
24720+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24721+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24722+
24723+ /*
24724+ * Segments used for calling PnP BIOS have byte granularity.
24725+ * The code segments and data segments have fixed 64k limits,
24726+ * the transfer segment sizes are set at run time.
24727+ */
24728+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24729+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24730+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24731+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24732+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24733+
24734+ /*
24735+ * The APM segments have byte granularity and their bases
24736+ * are set at run time. All have 64k limits.
24737+ */
24738+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24739+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24740+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24741+
24742+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24743+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24744+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24745+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24746+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24747+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24748+
24749+ /* Be sure this is zeroed to avoid false validations in Xen */
24750+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24751+ .endr
24752diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24753index 6fd514d9..c4221b8 100644
24754--- a/arch/x86/kernel/head_64.S
24755+++ b/arch/x86/kernel/head_64.S
24756@@ -20,6 +20,8 @@
24757 #include <asm/processor-flags.h>
24758 #include <asm/percpu.h>
24759 #include <asm/nops.h>
24760+#include <asm/cpufeature.h>
24761+#include <asm/alternative-asm.h>
24762
24763 #ifdef CONFIG_PARAVIRT
24764 #include <asm/asm-offsets.h>
24765@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24766 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24767 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24768 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24769+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24770+L3_VMALLOC_START = pud_index(VMALLOC_START)
24771+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24772+L3_VMALLOC_END = pud_index(VMALLOC_END)
24773+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24774+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24775
24776 .text
24777 __HEAD
24778@@ -89,11 +97,24 @@ startup_64:
24779 * Fixup the physical addresses in the page table
24780 */
24781 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24782+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24783+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24784+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24787
24788- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24789- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24790+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24791+#ifndef CONFIG_XEN
24792+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24793+#endif
24794+
24795+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24796+
24797+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24798+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24799
24800 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24801+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24802
24803 /*
24804 * Set up the identity mapping for the switchover. These
24805@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24806 * after the boot processor executes this code.
24807 */
24808
24809+ orq $-1, %rbp
24810 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24811 1:
24812
24813- /* Enable PAE mode and PGE */
24814- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24815+ /* Enable PAE mode and PSE/PGE */
24816+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24817 movq %rcx, %cr4
24818
24819 /* Setup early boot stage 4 level pagetables. */
24820@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24821 movl $MSR_EFER, %ecx
24822 rdmsr
24823 btsl $_EFER_SCE, %eax /* Enable System Call */
24824- btl $20,%edi /* No Execute supported? */
24825+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24826 jnc 1f
24827 btsl $_EFER_NX, %eax
24828+ cmpq $-1, %rbp
24829+ je 1f
24830 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24831+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24832+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24833+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24834+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24835+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24836+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24837+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24838 1: wrmsr /* Make changes effective */
24839
24840 /* Setup cr0 */
24841@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
24842 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24843 * address given in m16:64.
24844 */
24845+ pax_set_fptr_mask
24846 movq initial_code(%rip),%rax
24847 pushq $0 # fake return address to stop unwinder
24848 pushq $__KERNEL_CS # set correct cs
24849@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
24850 .quad INIT_PER_CPU_VAR(irq_stack_union)
24851
24852 GLOBAL(stack_start)
24853- .quad init_thread_union+THREAD_SIZE-8
24854+ .quad init_thread_union+THREAD_SIZE-16
24855 .word 0
24856 __FINITDATA
24857
24858@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
24859 call dump_stack
24860 #ifdef CONFIG_KALLSYMS
24861 leaq early_idt_ripmsg(%rip),%rdi
24862- movq 40(%rsp),%rsi # %rip again
24863+ movq 88(%rsp),%rsi # %rip again
24864 call __print_symbol
24865 #endif
24866 #endif /* EARLY_PRINTK */
24867@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
24868 early_recursion_flag:
24869 .long 0
24870
24871+ .section .rodata,"a",@progbits
24872 #ifdef CONFIG_EARLY_PRINTK
24873 early_idt_msg:
24874 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24875@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
24876 NEXT_PAGE(early_dynamic_pgts)
24877 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24878
24879- .data
24880+ .section .rodata,"a",@progbits
24881
24882-#ifndef CONFIG_XEN
24883 NEXT_PAGE(init_level4_pgt)
24884- .fill 512,8,0
24885-#else
24886-NEXT_PAGE(init_level4_pgt)
24887- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24888 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24889 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24890+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24891+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24892+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24893+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24894+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24895+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24896 .org init_level4_pgt + L4_START_KERNEL*8, 0
24897 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24898 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24899
24900+#ifdef CONFIG_PAX_PER_CPU_PGD
24901+NEXT_PAGE(cpu_pgd)
24902+ .rept 2*NR_CPUS
24903+ .fill 512,8,0
24904+ .endr
24905+#endif
24906+
24907 NEXT_PAGE(level3_ident_pgt)
24908 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24909+#ifdef CONFIG_XEN
24910 .fill 511, 8, 0
24911+#else
24912+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24913+ .fill 510,8,0
24914+#endif
24915+
24916+NEXT_PAGE(level3_vmalloc_start_pgt)
24917+ .fill 512,8,0
24918+
24919+NEXT_PAGE(level3_vmalloc_end_pgt)
24920+ .fill 512,8,0
24921+
24922+NEXT_PAGE(level3_vmemmap_pgt)
24923+ .fill L3_VMEMMAP_START,8,0
24924+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24925+
24926 NEXT_PAGE(level2_ident_pgt)
24927- /* Since I easily can, map the first 1G.
24928+ /* Since I easily can, map the first 2G.
24929 * Don't set NX because code runs from these pages.
24930 */
24931- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24932-#endif
24933+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24934
24935 NEXT_PAGE(level3_kernel_pgt)
24936 .fill L3_START_KERNEL,8,0
24937@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
24938 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24939 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24940
24941+NEXT_PAGE(level2_vmemmap_pgt)
24942+ .fill 512,8,0
24943+
24944 NEXT_PAGE(level2_kernel_pgt)
24945 /*
24946 * 512 MB kernel mapping. We spend a full page on this pagetable
24947@@ -494,21 +553,57 @@ NEXT_PAGE(level2_kernel_pgt)
24948 NEXT_PAGE(level2_fixmap_pgt)
24949 .fill 506,8,0
24950 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24951- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24952- .fill 5,8,0
24953+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24954+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24955+ .fill 4,8,0
24956
24957 NEXT_PAGE(level1_fixmap_pgt)
24958 .fill 512,8,0
24959
24960+NEXT_PAGE(level1_vsyscall_pgt)
24961+ .fill 512,8,0
24962+
24963 #undef PMDS
24964
24965- .data
24966+ .align PAGE_SIZE
24967+ENTRY(cpu_gdt_table)
24968+ .rept NR_CPUS
24969+ .quad 0x0000000000000000 /* NULL descriptor */
24970+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24971+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24972+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24973+ .quad 0x00cffb000000ffff /* __USER32_CS */
24974+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24975+ .quad 0x00affb000000ffff /* __USER_CS */
24976+
24977+#ifdef CONFIG_PAX_KERNEXEC
24978+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24979+#else
24980+ .quad 0x0 /* unused */
24981+#endif
24982+
24983+ .quad 0,0 /* TSS */
24984+ .quad 0,0 /* LDT */
24985+ .quad 0,0,0 /* three TLS descriptors */
24986+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24987+ /* asm/segment.h:GDT_ENTRIES must match this */
24988+
24989+#ifdef CONFIG_PAX_MEMORY_UDEREF
24990+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24991+#else
24992+ .quad 0x0 /* unused */
24993+#endif
24994+
24995+ /* zero the remaining page */
24996+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24997+ .endr
24998+
24999 .align 16
25000 .globl early_gdt_descr
25001 early_gdt_descr:
25002 .word GDT_ENTRIES*8-1
25003 early_gdt_descr_base:
25004- .quad INIT_PER_CPU_VAR(gdt_page)
25005+ .quad cpu_gdt_table
25006
25007 ENTRY(phys_base)
25008 /* This must match the first entry in level2_kernel_pgt */
25009@@ -532,8 +627,8 @@ NEXT_PAGE(kasan_zero_pud)
25010
25011
25012 #include "../../x86/xen/xen-head.S"
25013-
25014- __PAGE_ALIGNED_BSS
25015+
25016+ .section .rodata,"a",@progbits
25017 NEXT_PAGE(empty_zero_page)
25018 .skip PAGE_SIZE
25019
25020diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25021index 05fd74f..c3548b1 100644
25022--- a/arch/x86/kernel/i386_ksyms_32.c
25023+++ b/arch/x86/kernel/i386_ksyms_32.c
25024@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25025 EXPORT_SYMBOL(cmpxchg8b_emu);
25026 #endif
25027
25028+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25029+
25030 /* Networking helper routines. */
25031 EXPORT_SYMBOL(csum_partial_copy_generic);
25032+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25033+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25034
25035 EXPORT_SYMBOL(__get_user_1);
25036 EXPORT_SYMBOL(__get_user_2);
25037@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25038 EXPORT_SYMBOL(___preempt_schedule_context);
25039 #endif
25040 #endif
25041+
25042+#ifdef CONFIG_PAX_KERNEXEC
25043+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25044+#endif
25045+
25046+#ifdef CONFIG_PAX_PER_CPU_PGD
25047+EXPORT_SYMBOL(cpu_pgd);
25048+#endif
25049diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25050index d5651fc..29c740d 100644
25051--- a/arch/x86/kernel/i387.c
25052+++ b/arch/x86/kernel/i387.c
25053@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25054 static inline bool interrupted_user_mode(void)
25055 {
25056 struct pt_regs *regs = get_irq_regs();
25057- return regs && user_mode_vm(regs);
25058+ return regs && user_mode(regs);
25059 }
25060
25061 /*
25062diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25063index e7cc537..67d7372 100644
25064--- a/arch/x86/kernel/i8259.c
25065+++ b/arch/x86/kernel/i8259.c
25066@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25067 static void make_8259A_irq(unsigned int irq)
25068 {
25069 disable_irq_nosync(irq);
25070- io_apic_irqs &= ~(1<<irq);
25071+ io_apic_irqs &= ~(1UL<<irq);
25072 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25073 enable_irq(irq);
25074 }
25075@@ -208,7 +208,7 @@ spurious_8259A_irq:
25076 "spurious 8259A interrupt: IRQ%d.\n", irq);
25077 spurious_irq_mask |= irqmask;
25078 }
25079- atomic_inc(&irq_err_count);
25080+ atomic_inc_unchecked(&irq_err_count);
25081 /*
25082 * Theoretically we do not have to handle this IRQ,
25083 * but in Linux this does not cause problems and is
25084@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25085 /* (slave's support for AEOI in flat mode is to be investigated) */
25086 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25087
25088+ pax_open_kernel();
25089 if (auto_eoi)
25090 /*
25091 * In AEOI mode we just have to mask the interrupt
25092 * when acking.
25093 */
25094- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25095+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25096 else
25097- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25098+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25099+ pax_close_kernel();
25100
25101 udelay(100); /* wait for 8259A to initialize */
25102
25103diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25104index a979b5b..1d6db75 100644
25105--- a/arch/x86/kernel/io_delay.c
25106+++ b/arch/x86/kernel/io_delay.c
25107@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25108 * Quirk table for systems that misbehave (lock up, etc.) if port
25109 * 0x80 is used:
25110 */
25111-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25112+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25113 {
25114 .callback = dmi_io_delay_0xed_port,
25115 .ident = "Compaq Presario V6000",
25116diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25117index 4ddaf66..49d5c18 100644
25118--- a/arch/x86/kernel/ioport.c
25119+++ b/arch/x86/kernel/ioport.c
25120@@ -6,6 +6,7 @@
25121 #include <linux/sched.h>
25122 #include <linux/kernel.h>
25123 #include <linux/capability.h>
25124+#include <linux/security.h>
25125 #include <linux/errno.h>
25126 #include <linux/types.h>
25127 #include <linux/ioport.h>
25128@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25129 return -EINVAL;
25130 if (turn_on && !capable(CAP_SYS_RAWIO))
25131 return -EPERM;
25132+#ifdef CONFIG_GRKERNSEC_IO
25133+ if (turn_on && grsec_disable_privio) {
25134+ gr_handle_ioperm();
25135+ return -ENODEV;
25136+ }
25137+#endif
25138
25139 /*
25140 * If it's the first ioperm() call in this thread's lifetime, set the
25141@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25142 * because the ->io_bitmap_max value must match the bitmap
25143 * contents:
25144 */
25145- tss = &per_cpu(init_tss, get_cpu());
25146+ tss = init_tss + get_cpu();
25147
25148 if (turn_on)
25149 bitmap_clear(t->io_bitmap_ptr, from, num);
25150@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25151 if (level > old) {
25152 if (!capable(CAP_SYS_RAWIO))
25153 return -EPERM;
25154+#ifdef CONFIG_GRKERNSEC_IO
25155+ if (grsec_disable_privio) {
25156+ gr_handle_iopl();
25157+ return -ENODEV;
25158+ }
25159+#endif
25160 }
25161 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25162 t->iopl = level << 12;
25163diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25164index 67b1cbe..6ad4cbc 100644
25165--- a/arch/x86/kernel/irq.c
25166+++ b/arch/x86/kernel/irq.c
25167@@ -22,7 +22,7 @@
25168 #define CREATE_TRACE_POINTS
25169 #include <asm/trace/irq_vectors.h>
25170
25171-atomic_t irq_err_count;
25172+atomic_unchecked_t irq_err_count;
25173
25174 /* Function pointer for generic interrupt vector handling */
25175 void (*x86_platform_ipi_callback)(void) = NULL;
25176@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25177 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25178 seq_puts(p, " Hypervisor callback interrupts\n");
25179 #endif
25180- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25181+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25182 #if defined(CONFIG_X86_IO_APIC)
25183- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25184+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25185 #endif
25186 return 0;
25187 }
25188@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25189
25190 u64 arch_irq_stat(void)
25191 {
25192- u64 sum = atomic_read(&irq_err_count);
25193+ u64 sum = atomic_read_unchecked(&irq_err_count);
25194 return sum;
25195 }
25196
25197diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25198index 28d28f5..e6cc9ae 100644
25199--- a/arch/x86/kernel/irq_32.c
25200+++ b/arch/x86/kernel/irq_32.c
25201@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25202
25203 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25204
25205+extern void gr_handle_kernel_exploit(void);
25206+
25207 int sysctl_panic_on_stackoverflow __read_mostly;
25208
25209 /* Debugging check for stack overflow: is there less than 1KB free? */
25210@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25211 __asm__ __volatile__("andl %%esp,%0" :
25212 "=r" (sp) : "0" (THREAD_SIZE - 1));
25213
25214- return sp < (sizeof(struct thread_info) + STACK_WARN);
25215+ return sp < STACK_WARN;
25216 }
25217
25218 static void print_stack_overflow(void)
25219 {
25220 printk(KERN_WARNING "low stack detected by irq handler\n");
25221 dump_stack();
25222+ gr_handle_kernel_exploit();
25223 if (sysctl_panic_on_stackoverflow)
25224 panic("low stack detected by irq handler - check messages\n");
25225 }
25226@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25227 static inline int
25228 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25229 {
25230- struct irq_stack *curstk, *irqstk;
25231+ struct irq_stack *irqstk;
25232 u32 *isp, *prev_esp, arg1, arg2;
25233
25234- curstk = (struct irq_stack *) current_stack();
25235 irqstk = __this_cpu_read(hardirq_stack);
25236
25237 /*
25238@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25239 * handler) we can't do that and just have to keep using the
25240 * current stack (which is the irq stack already after all)
25241 */
25242- if (unlikely(curstk == irqstk))
25243+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25244 return 0;
25245
25246- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25247+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25248
25249 /* Save the next esp at the bottom of the stack */
25250 prev_esp = (u32 *)irqstk;
25251 *prev_esp = current_stack_pointer();
25252
25253+#ifdef CONFIG_PAX_MEMORY_UDEREF
25254+ __set_fs(MAKE_MM_SEG(0));
25255+#endif
25256+
25257 if (unlikely(overflow))
25258 call_on_stack(print_stack_overflow, isp);
25259
25260@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25261 : "0" (irq), "1" (desc), "2" (isp),
25262 "D" (desc->handle_irq)
25263 : "memory", "cc", "ecx");
25264+
25265+#ifdef CONFIG_PAX_MEMORY_UDEREF
25266+ __set_fs(current_thread_info()->addr_limit);
25267+#endif
25268+
25269 return 1;
25270 }
25271
25272@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25273 */
25274 void irq_ctx_init(int cpu)
25275 {
25276- struct irq_stack *irqstk;
25277-
25278 if (per_cpu(hardirq_stack, cpu))
25279 return;
25280
25281- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25282- THREADINFO_GFP,
25283- THREAD_SIZE_ORDER));
25284- per_cpu(hardirq_stack, cpu) = irqstk;
25285-
25286- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25287- THREADINFO_GFP,
25288- THREAD_SIZE_ORDER));
25289- per_cpu(softirq_stack, cpu) = irqstk;
25290-
25291- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25292- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25293+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25294+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25295 }
25296
25297 void do_softirq_own_stack(void)
25298 {
25299- struct thread_info *curstk;
25300 struct irq_stack *irqstk;
25301 u32 *isp, *prev_esp;
25302
25303- curstk = current_stack();
25304 irqstk = __this_cpu_read(softirq_stack);
25305
25306 /* build the stack frame on the softirq stack */
25307@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25308 prev_esp = (u32 *)irqstk;
25309 *prev_esp = current_stack_pointer();
25310
25311+#ifdef CONFIG_PAX_MEMORY_UDEREF
25312+ __set_fs(MAKE_MM_SEG(0));
25313+#endif
25314+
25315 call_on_stack(__do_softirq, isp);
25316+
25317+#ifdef CONFIG_PAX_MEMORY_UDEREF
25318+ __set_fs(current_thread_info()->addr_limit);
25319+#endif
25320+
25321 }
25322
25323 bool handle_irq(unsigned irq, struct pt_regs *regs)
25324@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25325 if (unlikely(!desc))
25326 return false;
25327
25328- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25329+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25330 if (unlikely(overflow))
25331 print_stack_overflow();
25332 desc->handle_irq(irq, desc);
25333diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25334index e4b503d..824fce8 100644
25335--- a/arch/x86/kernel/irq_64.c
25336+++ b/arch/x86/kernel/irq_64.c
25337@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25338 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25339 EXPORT_PER_CPU_SYMBOL(irq_regs);
25340
25341+extern void gr_handle_kernel_exploit(void);
25342+
25343 int sysctl_panic_on_stackoverflow;
25344
25345 /*
25346@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25347 u64 estack_top, estack_bottom;
25348 u64 curbase = (u64)task_stack_page(current);
25349
25350- if (user_mode_vm(regs))
25351+ if (user_mode(regs))
25352 return;
25353
25354 if (regs->sp >= curbase + sizeof(struct thread_info) +
25355@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25356 irq_stack_top, irq_stack_bottom,
25357 estack_top, estack_bottom);
25358
25359+ gr_handle_kernel_exploit();
25360+
25361 if (sysctl_panic_on_stackoverflow)
25362 panic("low stack detected by irq handler - check messages\n");
25363 #endif
25364diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25365index 26d5a55..a01160a 100644
25366--- a/arch/x86/kernel/jump_label.c
25367+++ b/arch/x86/kernel/jump_label.c
25368@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25369 * Jump label is enabled for the first time.
25370 * So we expect a default_nop...
25371 */
25372- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25373+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25374 != 0))
25375 bug_at((void *)entry->code, __LINE__);
25376 } else {
25377@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25378 * ...otherwise expect an ideal_nop. Otherwise
25379 * something went horribly wrong.
25380 */
25381- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25382+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25383 != 0))
25384 bug_at((void *)entry->code, __LINE__);
25385 }
25386@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25387 * are converting the default nop to the ideal nop.
25388 */
25389 if (init) {
25390- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25391+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25392 bug_at((void *)entry->code, __LINE__);
25393 } else {
25394 code.jump = 0xe9;
25395 code.offset = entry->target -
25396 (entry->code + JUMP_LABEL_NOP_SIZE);
25397- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25398+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25399 bug_at((void *)entry->code, __LINE__);
25400 }
25401 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25402diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25403index 25ecd56..e12482f 100644
25404--- a/arch/x86/kernel/kgdb.c
25405+++ b/arch/x86/kernel/kgdb.c
25406@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25407 #ifdef CONFIG_X86_32
25408 switch (regno) {
25409 case GDB_SS:
25410- if (!user_mode_vm(regs))
25411+ if (!user_mode(regs))
25412 *(unsigned long *)mem = __KERNEL_DS;
25413 break;
25414 case GDB_SP:
25415- if (!user_mode_vm(regs))
25416+ if (!user_mode(regs))
25417 *(unsigned long *)mem = kernel_stack_pointer(regs);
25418 break;
25419 case GDB_GS:
25420@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25421 bp->attr.bp_addr = breakinfo[breakno].addr;
25422 bp->attr.bp_len = breakinfo[breakno].len;
25423 bp->attr.bp_type = breakinfo[breakno].type;
25424- info->address = breakinfo[breakno].addr;
25425+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25426+ info->address = ktla_ktva(breakinfo[breakno].addr);
25427+ else
25428+ info->address = breakinfo[breakno].addr;
25429 info->len = breakinfo[breakno].len;
25430 info->type = breakinfo[breakno].type;
25431 val = arch_install_hw_breakpoint(bp);
25432@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25433 case 'k':
25434 /* clear the trace bit */
25435 linux_regs->flags &= ~X86_EFLAGS_TF;
25436- atomic_set(&kgdb_cpu_doing_single_step, -1);
25437+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25438
25439 /* set the trace bit if we're stepping */
25440 if (remcomInBuffer[0] == 's') {
25441 linux_regs->flags |= X86_EFLAGS_TF;
25442- atomic_set(&kgdb_cpu_doing_single_step,
25443+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25444 raw_smp_processor_id());
25445 }
25446
25447@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25448
25449 switch (cmd) {
25450 case DIE_DEBUG:
25451- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25452+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25453 if (user_mode(regs))
25454 return single_step_cont(regs, args);
25455 break;
25456@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25457 #endif /* CONFIG_DEBUG_RODATA */
25458
25459 bpt->type = BP_BREAKPOINT;
25460- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25461+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25462 BREAK_INSTR_SIZE);
25463 if (err)
25464 return err;
25465- err = probe_kernel_write((char *)bpt->bpt_addr,
25466+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25467 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25468 #ifdef CONFIG_DEBUG_RODATA
25469 if (!err)
25470@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25471 return -EBUSY;
25472 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25473 BREAK_INSTR_SIZE);
25474- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25475+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25476 if (err)
25477 return err;
25478 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25479@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25480 if (mutex_is_locked(&text_mutex))
25481 goto knl_write;
25482 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25483- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25484+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25485 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25486 goto knl_write;
25487 return err;
25488 knl_write:
25489 #endif /* CONFIG_DEBUG_RODATA */
25490- return probe_kernel_write((char *)bpt->bpt_addr,
25491+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25492 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25493 }
25494
25495diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25496index 4e3d5a9..03fffd8 100644
25497--- a/arch/x86/kernel/kprobes/core.c
25498+++ b/arch/x86/kernel/kprobes/core.c
25499@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25500 s32 raddr;
25501 } __packed *insn;
25502
25503- insn = (struct __arch_relative_insn *)from;
25504+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25505+
25506+ pax_open_kernel();
25507 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25508 insn->op = op;
25509+ pax_close_kernel();
25510 }
25511
25512 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25513@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25514 kprobe_opcode_t opcode;
25515 kprobe_opcode_t *orig_opcodes = opcodes;
25516
25517- if (search_exception_tables((unsigned long)opcodes))
25518+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25519 return 0; /* Page fault may occur on this address. */
25520
25521 retry:
25522@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25523 * Fortunately, we know that the original code is the ideal 5-byte
25524 * long NOP.
25525 */
25526- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25527+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25528 if (faddr)
25529 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25530 else
25531 buf[0] = kp->opcode;
25532- return (unsigned long)buf;
25533+ return ktva_ktla((unsigned long)buf);
25534 }
25535
25536 /*
25537@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25538 /* Another subsystem puts a breakpoint, failed to recover */
25539 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25540 return 0;
25541+ pax_open_kernel();
25542 memcpy(dest, insn.kaddr, insn.length);
25543+ pax_close_kernel();
25544
25545 #ifdef CONFIG_X86_64
25546 if (insn_rip_relative(&insn)) {
25547@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25548 return 0;
25549 }
25550 disp = (u8 *) dest + insn_offset_displacement(&insn);
25551+ pax_open_kernel();
25552 *(s32 *) disp = (s32) newdisp;
25553+ pax_close_kernel();
25554 }
25555 #endif
25556 return insn.length;
25557@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25558 * nor set current_kprobe, because it doesn't use single
25559 * stepping.
25560 */
25561- regs->ip = (unsigned long)p->ainsn.insn;
25562+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25563 preempt_enable_no_resched();
25564 return;
25565 }
25566@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25567 regs->flags &= ~X86_EFLAGS_IF;
25568 /* single step inline if the instruction is an int3 */
25569 if (p->opcode == BREAKPOINT_INSTRUCTION)
25570- regs->ip = (unsigned long)p->addr;
25571+ regs->ip = ktla_ktva((unsigned long)p->addr);
25572 else
25573- regs->ip = (unsigned long)p->ainsn.insn;
25574+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25575 }
25576 NOKPROBE_SYMBOL(setup_singlestep);
25577
25578@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25579 struct kprobe *p;
25580 struct kprobe_ctlblk *kcb;
25581
25582- if (user_mode_vm(regs))
25583+ if (user_mode(regs))
25584 return 0;
25585
25586 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25587@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25588 setup_singlestep(p, regs, kcb, 0);
25589 return 1;
25590 }
25591- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25592+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25593 /*
25594 * The breakpoint instruction was removed right
25595 * after we hit it. Another cpu has removed
25596@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25597 " movq %rax, 152(%rsp)\n"
25598 RESTORE_REGS_STRING
25599 " popfq\n"
25600+#ifdef KERNEXEC_PLUGIN
25601+ " btsq $63,(%rsp)\n"
25602+#endif
25603 #else
25604 " pushf\n"
25605 SAVE_REGS_STRING
25606@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25607 struct kprobe_ctlblk *kcb)
25608 {
25609 unsigned long *tos = stack_addr(regs);
25610- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25611+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25612 unsigned long orig_ip = (unsigned long)p->addr;
25613 kprobe_opcode_t *insn = p->ainsn.insn;
25614
25615@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25616 struct die_args *args = data;
25617 int ret = NOTIFY_DONE;
25618
25619- if (args->regs && user_mode_vm(args->regs))
25620+ if (args->regs && user_mode(args->regs))
25621 return ret;
25622
25623 if (val == DIE_GPF) {
25624diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25625index 7b3b9d1..e2478b91 100644
25626--- a/arch/x86/kernel/kprobes/opt.c
25627+++ b/arch/x86/kernel/kprobes/opt.c
25628@@ -79,6 +79,7 @@ found:
25629 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25630 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25631 {
25632+ pax_open_kernel();
25633 #ifdef CONFIG_X86_64
25634 *addr++ = 0x48;
25635 *addr++ = 0xbf;
25636@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25637 *addr++ = 0xb8;
25638 #endif
25639 *(unsigned long *)addr = val;
25640+ pax_close_kernel();
25641 }
25642
25643 asm (
25644@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25645 * Verify if the address gap is in 2GB range, because this uses
25646 * a relative jump.
25647 */
25648- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25649+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25650 if (abs(rel) > 0x7fffffff) {
25651 __arch_remove_optimized_kprobe(op, 0);
25652 return -ERANGE;
25653@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25654 op->optinsn.size = ret;
25655
25656 /* Copy arch-dep-instance from template */
25657- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25658+ pax_open_kernel();
25659+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25660+ pax_close_kernel();
25661
25662 /* Set probe information */
25663 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25664
25665 /* Set probe function call */
25666- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25667+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25668
25669 /* Set returning jmp instruction at the tail of out-of-line buffer */
25670- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25671+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25672 (u8 *)op->kp.addr + op->optinsn.size);
25673
25674 flush_icache_range((unsigned long) buf,
25675@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25676 WARN_ON(kprobe_disabled(&op->kp));
25677
25678 /* Backup instructions which will be replaced by jump address */
25679- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25680+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25681 RELATIVE_ADDR_SIZE);
25682
25683 insn_buf[0] = RELATIVEJUMP_OPCODE;
25684@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25685 /* This kprobe is really able to run optimized path. */
25686 op = container_of(p, struct optimized_kprobe, kp);
25687 /* Detour through copied instructions */
25688- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25689+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25690 if (!reenter)
25691 reset_current_kprobe();
25692 preempt_enable_no_resched();
25693diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25694index c2bedae..25e7ab60 100644
25695--- a/arch/x86/kernel/ksysfs.c
25696+++ b/arch/x86/kernel/ksysfs.c
25697@@ -184,7 +184,7 @@ out:
25698
25699 static struct kobj_attribute type_attr = __ATTR_RO(type);
25700
25701-static struct bin_attribute data_attr = {
25702+static bin_attribute_no_const data_attr __read_only = {
25703 .attr = {
25704 .name = "data",
25705 .mode = S_IRUGO,
25706diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25707index c37886d..d851d32 100644
25708--- a/arch/x86/kernel/ldt.c
25709+++ b/arch/x86/kernel/ldt.c
25710@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25711 if (reload) {
25712 #ifdef CONFIG_SMP
25713 preempt_disable();
25714- load_LDT(pc);
25715+ load_LDT_nolock(pc);
25716 if (!cpumask_equal(mm_cpumask(current->mm),
25717 cpumask_of(smp_processor_id())))
25718 smp_call_function(flush_ldt, current->mm, 1);
25719 preempt_enable();
25720 #else
25721- load_LDT(pc);
25722+ load_LDT_nolock(pc);
25723 #endif
25724 }
25725 if (oldsize) {
25726@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25727 return err;
25728
25729 for (i = 0; i < old->size; i++)
25730- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25731+ write_ldt_entry(new->ldt, i, old->ldt + i);
25732 return 0;
25733 }
25734
25735@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25736 retval = copy_ldt(&mm->context, &old_mm->context);
25737 mutex_unlock(&old_mm->context.lock);
25738 }
25739+
25740+ if (tsk == current) {
25741+ mm->context.vdso = 0;
25742+
25743+#ifdef CONFIG_X86_32
25744+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25745+ mm->context.user_cs_base = 0UL;
25746+ mm->context.user_cs_limit = ~0UL;
25747+
25748+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25749+ cpus_clear(mm->context.cpu_user_cs_mask);
25750+#endif
25751+
25752+#endif
25753+#endif
25754+
25755+ }
25756+
25757 return retval;
25758 }
25759
25760@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25761 }
25762 }
25763
25764+#ifdef CONFIG_PAX_SEGMEXEC
25765+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25766+ error = -EINVAL;
25767+ goto out_unlock;
25768+ }
25769+#endif
25770+
25771 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25772 error = -EINVAL;
25773 goto out_unlock;
25774diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25775index ff3c3101d..d7c0cd8 100644
25776--- a/arch/x86/kernel/livepatch.c
25777+++ b/arch/x86/kernel/livepatch.c
25778@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25779 int ret, numpages, size = 4;
25780 bool readonly;
25781 unsigned long val;
25782- unsigned long core = (unsigned long)mod->module_core;
25783- unsigned long core_ro_size = mod->core_ro_size;
25784- unsigned long core_size = mod->core_size;
25785+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25786+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25787+ unsigned long core_size_rx = mod->core_size_rx;
25788+ unsigned long core_size_rw = mod->core_size_rw;
25789
25790 switch (type) {
25791 case R_X86_64_NONE:
25792@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25793 return -EINVAL;
25794 }
25795
25796- if (loc < core || loc >= core + core_size)
25797+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25798+ (loc < core_rw || loc >= core_rw + core_size_rw))
25799 /* loc does not point to any symbol inside the module */
25800 return -EINVAL;
25801
25802- if (loc < core + core_ro_size)
25803+ if (loc < core_rx + core_size_rx)
25804 readonly = true;
25805 else
25806 readonly = false;
25807diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25808index 469b23d..5449cfe 100644
25809--- a/arch/x86/kernel/machine_kexec_32.c
25810+++ b/arch/x86/kernel/machine_kexec_32.c
25811@@ -26,7 +26,7 @@
25812 #include <asm/cacheflush.h>
25813 #include <asm/debugreg.h>
25814
25815-static void set_idt(void *newidt, __u16 limit)
25816+static void set_idt(struct desc_struct *newidt, __u16 limit)
25817 {
25818 struct desc_ptr curidt;
25819
25820@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25821 }
25822
25823
25824-static void set_gdt(void *newgdt, __u16 limit)
25825+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25826 {
25827 struct desc_ptr curgdt;
25828
25829@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25830 }
25831
25832 control_page = page_address(image->control_code_page);
25833- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25834+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25835
25836 relocate_kernel_ptr = control_page;
25837 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25838diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25839index 94ea120..4154cea 100644
25840--- a/arch/x86/kernel/mcount_64.S
25841+++ b/arch/x86/kernel/mcount_64.S
25842@@ -7,7 +7,7 @@
25843 #include <linux/linkage.h>
25844 #include <asm/ptrace.h>
25845 #include <asm/ftrace.h>
25846-
25847+#include <asm/alternative-asm.h>
25848
25849 .code64
25850 .section .entry.text, "ax"
25851@@ -148,8 +148,9 @@
25852 #ifdef CONFIG_DYNAMIC_FTRACE
25853
25854 ENTRY(function_hook)
25855+ pax_force_retaddr
25856 retq
25857-END(function_hook)
25858+ENDPROC(function_hook)
25859
25860 ENTRY(ftrace_caller)
25861 /* save_mcount_regs fills in first two parameters */
25862@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25863 #endif
25864
25865 GLOBAL(ftrace_stub)
25866+ pax_force_retaddr
25867 retq
25868-END(ftrace_caller)
25869+ENDPROC(ftrace_caller)
25870
25871 ENTRY(ftrace_regs_caller)
25872 /* Save the current flags before any operations that can change them */
25873@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25874
25875 jmp ftrace_return
25876
25877-END(ftrace_regs_caller)
25878+ENDPROC(ftrace_regs_caller)
25879
25880
25881 #else /* ! CONFIG_DYNAMIC_FTRACE */
25882@@ -272,18 +274,20 @@ fgraph_trace:
25883 #endif
25884
25885 GLOBAL(ftrace_stub)
25886+ pax_force_retaddr
25887 retq
25888
25889 trace:
25890 /* save_mcount_regs fills in first two parameters */
25891 save_mcount_regs
25892
25893+ pax_force_fptr ftrace_trace_function
25894 call *ftrace_trace_function
25895
25896 restore_mcount_regs
25897
25898 jmp fgraph_trace
25899-END(function_hook)
25900+ENDPROC(function_hook)
25901 #endif /* CONFIG_DYNAMIC_FTRACE */
25902 #endif /* CONFIG_FUNCTION_TRACER */
25903
25904@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25905
25906 restore_mcount_regs
25907
25908+ pax_force_retaddr
25909 retq
25910-END(ftrace_graph_caller)
25911+ENDPROC(ftrace_graph_caller)
25912
25913 GLOBAL(return_to_handler)
25914 subq $24, %rsp
25915@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25916 movq 8(%rsp), %rdx
25917 movq (%rsp), %rax
25918 addq $24, %rsp
25919+ pax_force_fptr %rdi
25920 jmp *%rdi
25921+ENDPROC(return_to_handler)
25922 #endif
25923diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25924index d1ac80b..f593701 100644
25925--- a/arch/x86/kernel/module.c
25926+++ b/arch/x86/kernel/module.c
25927@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25928 }
25929 #endif
25930
25931-void *module_alloc(unsigned long size)
25932+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25933 {
25934 void *p;
25935
25936- if (PAGE_ALIGN(size) > MODULES_LEN)
25937+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25938 return NULL;
25939
25940 p = __vmalloc_node_range(size, MODULE_ALIGN,
25941 MODULES_VADDR + get_module_load_offset(),
25942- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25943- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25944+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25945+ prot, 0, NUMA_NO_NODE,
25946 __builtin_return_address(0));
25947 if (p && (kasan_module_alloc(p, size) < 0)) {
25948 vfree(p);
25949@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25950 return p;
25951 }
25952
25953+void *module_alloc(unsigned long size)
25954+{
25955+
25956+#ifdef CONFIG_PAX_KERNEXEC
25957+ return __module_alloc(size, PAGE_KERNEL);
25958+#else
25959+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25960+#endif
25961+
25962+}
25963+
25964+#ifdef CONFIG_PAX_KERNEXEC
25965+#ifdef CONFIG_X86_32
25966+void *module_alloc_exec(unsigned long size)
25967+{
25968+ struct vm_struct *area;
25969+
25970+ if (size == 0)
25971+ return NULL;
25972+
25973+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25974+return area ? area->addr : NULL;
25975+}
25976+EXPORT_SYMBOL(module_alloc_exec);
25977+
25978+void module_memfree_exec(void *module_region)
25979+{
25980+ vunmap(module_region);
25981+}
25982+EXPORT_SYMBOL(module_memfree_exec);
25983+#else
25984+void module_memfree_exec(void *module_region)
25985+{
25986+ module_memfree(module_region);
25987+}
25988+EXPORT_SYMBOL(module_memfree_exec);
25989+
25990+void *module_alloc_exec(unsigned long size)
25991+{
25992+ return __module_alloc(size, PAGE_KERNEL_RX);
25993+}
25994+EXPORT_SYMBOL(module_alloc_exec);
25995+#endif
25996+#endif
25997+
25998 #ifdef CONFIG_X86_32
25999 int apply_relocate(Elf32_Shdr *sechdrs,
26000 const char *strtab,
26001@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26002 unsigned int i;
26003 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26004 Elf32_Sym *sym;
26005- uint32_t *location;
26006+ uint32_t *plocation, location;
26007
26008 DEBUGP("Applying relocate section %u to %u\n",
26009 relsec, sechdrs[relsec].sh_info);
26010 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26011 /* This is where to make the change */
26012- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26013- + rel[i].r_offset;
26014+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26015+ location = (uint32_t)plocation;
26016+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26017+ plocation = ktla_ktva((void *)plocation);
26018 /* This is the symbol it is referring to. Note that all
26019 undefined symbols have been resolved. */
26020 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26021@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26022 switch (ELF32_R_TYPE(rel[i].r_info)) {
26023 case R_386_32:
26024 /* We add the value into the location given */
26025- *location += sym->st_value;
26026+ pax_open_kernel();
26027+ *plocation += sym->st_value;
26028+ pax_close_kernel();
26029 break;
26030 case R_386_PC32:
26031 /* Add the value, subtract its position */
26032- *location += sym->st_value - (uint32_t)location;
26033+ pax_open_kernel();
26034+ *plocation += sym->st_value - location;
26035+ pax_close_kernel();
26036 break;
26037 default:
26038 pr_err("%s: Unknown relocation: %u\n",
26039@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26040 case R_X86_64_NONE:
26041 break;
26042 case R_X86_64_64:
26043+ pax_open_kernel();
26044 *(u64 *)loc = val;
26045+ pax_close_kernel();
26046 break;
26047 case R_X86_64_32:
26048+ pax_open_kernel();
26049 *(u32 *)loc = val;
26050+ pax_close_kernel();
26051 if (val != *(u32 *)loc)
26052 goto overflow;
26053 break;
26054 case R_X86_64_32S:
26055+ pax_open_kernel();
26056 *(s32 *)loc = val;
26057+ pax_close_kernel();
26058 if ((s64)val != *(s32 *)loc)
26059 goto overflow;
26060 break;
26061 case R_X86_64_PC32:
26062 val -= (u64)loc;
26063+ pax_open_kernel();
26064 *(u32 *)loc = val;
26065+ pax_close_kernel();
26066+
26067 #if 0
26068 if ((s64)val != *(s32 *)loc)
26069 goto overflow;
26070diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26071index 113e707..0a690e1 100644
26072--- a/arch/x86/kernel/msr.c
26073+++ b/arch/x86/kernel/msr.c
26074@@ -39,6 +39,7 @@
26075 #include <linux/notifier.h>
26076 #include <linux/uaccess.h>
26077 #include <linux/gfp.h>
26078+#include <linux/grsecurity.h>
26079
26080 #include <asm/processor.h>
26081 #include <asm/msr.h>
26082@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26083 int err = 0;
26084 ssize_t bytes = 0;
26085
26086+#ifdef CONFIG_GRKERNSEC_KMEM
26087+ gr_handle_msr_write();
26088+ return -EPERM;
26089+#endif
26090+
26091 if (count % 8)
26092 return -EINVAL; /* Invalid chunk size */
26093
26094@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26095 err = -EBADF;
26096 break;
26097 }
26098+#ifdef CONFIG_GRKERNSEC_KMEM
26099+ gr_handle_msr_write();
26100+ return -EPERM;
26101+#endif
26102 if (copy_from_user(&regs, uregs, sizeof regs)) {
26103 err = -EFAULT;
26104 break;
26105@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26106 return notifier_from_errno(err);
26107 }
26108
26109-static struct notifier_block __refdata msr_class_cpu_notifier = {
26110+static struct notifier_block msr_class_cpu_notifier = {
26111 .notifier_call = msr_class_cpu_callback,
26112 };
26113
26114diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26115index c3e985d..110a36a 100644
26116--- a/arch/x86/kernel/nmi.c
26117+++ b/arch/x86/kernel/nmi.c
26118@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26119
26120 static void nmi_max_handler(struct irq_work *w)
26121 {
26122- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26123+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26124 int remainder_ns, decimal_msecs;
26125- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26126+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26127
26128 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26129 decimal_msecs = remainder_ns / 1000;
26130
26131 printk_ratelimited(KERN_INFO
26132 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26133- a->handler, whole_msecs, decimal_msecs);
26134+ n->action->handler, whole_msecs, decimal_msecs);
26135 }
26136
26137 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26138@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26139 delta = sched_clock() - delta;
26140 trace_nmi_handler(a->handler, (int)delta, thishandled);
26141
26142- if (delta < nmi_longest_ns || delta < a->max_duration)
26143+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26144 continue;
26145
26146- a->max_duration = delta;
26147- irq_work_queue(&a->irq_work);
26148+ a->work->max_duration = delta;
26149+ irq_work_queue(&a->work->irq_work);
26150 }
26151
26152 rcu_read_unlock();
26153@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26154 }
26155 NOKPROBE_SYMBOL(nmi_handle);
26156
26157-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26158+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26159 {
26160 struct nmi_desc *desc = nmi_to_desc(type);
26161 unsigned long flags;
26162@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26163 if (!action->handler)
26164 return -EINVAL;
26165
26166- init_irq_work(&action->irq_work, nmi_max_handler);
26167+ action->work->action = action;
26168+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26169
26170 spin_lock_irqsave(&desc->lock, flags);
26171
26172@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26173 * event confuses some handlers (kdump uses this flag)
26174 */
26175 if (action->flags & NMI_FLAG_FIRST)
26176- list_add_rcu(&action->list, &desc->head);
26177+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26178 else
26179- list_add_tail_rcu(&action->list, &desc->head);
26180+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26181
26182 spin_unlock_irqrestore(&desc->lock, flags);
26183 return 0;
26184@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26185 if (!strcmp(n->name, name)) {
26186 WARN(in_nmi(),
26187 "Trying to free NMI (%s) from NMI context!\n", n->name);
26188- list_del_rcu(&n->list);
26189+ pax_list_del_rcu((struct list_head *)&n->list);
26190 break;
26191 }
26192 }
26193@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26194 dotraplinkage notrace void
26195 do_nmi(struct pt_regs *regs, long error_code)
26196 {
26197+
26198+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26199+ if (!user_mode(regs)) {
26200+ unsigned long cs = regs->cs & 0xFFFF;
26201+ unsigned long ip = ktva_ktla(regs->ip);
26202+
26203+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26204+ regs->ip = ip;
26205+ }
26206+#endif
26207+
26208 nmi_nesting_preprocess(regs);
26209
26210 nmi_enter();
26211diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26212index 6d9582e..f746287 100644
26213--- a/arch/x86/kernel/nmi_selftest.c
26214+++ b/arch/x86/kernel/nmi_selftest.c
26215@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26216 {
26217 /* trap all the unknown NMIs we may generate */
26218 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26219- __initdata);
26220+ __initconst);
26221 }
26222
26223 static void __init cleanup_nmi_testsuite(void)
26224@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26225 unsigned long timeout;
26226
26227 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26228- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26229+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26230 nmi_fail = FAILURE;
26231 return;
26232 }
26233diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26234index bbb6c73..24a58ef 100644
26235--- a/arch/x86/kernel/paravirt-spinlocks.c
26236+++ b/arch/x86/kernel/paravirt-spinlocks.c
26237@@ -8,7 +8,7 @@
26238
26239 #include <asm/paravirt.h>
26240
26241-struct pv_lock_ops pv_lock_ops = {
26242+struct pv_lock_ops pv_lock_ops __read_only = {
26243 #ifdef CONFIG_SMP
26244 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26245 .unlock_kick = paravirt_nop,
26246diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26247index 548d25f..f8fb99c 100644
26248--- a/arch/x86/kernel/paravirt.c
26249+++ b/arch/x86/kernel/paravirt.c
26250@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26251 {
26252 return x;
26253 }
26254+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26255+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26256+#endif
26257
26258 void __init default_banner(void)
26259 {
26260@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26261
26262 if (opfunc == NULL)
26263 /* If there's no function, patch it with a ud2a (BUG) */
26264- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26265- else if (opfunc == _paravirt_nop)
26266+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26267+ else if (opfunc == (void *)_paravirt_nop)
26268 /* If the operation is a nop, then nop the callsite */
26269 ret = paravirt_patch_nop();
26270
26271 /* identity functions just return their single argument */
26272- else if (opfunc == _paravirt_ident_32)
26273+ else if (opfunc == (void *)_paravirt_ident_32)
26274 ret = paravirt_patch_ident_32(insnbuf, len);
26275- else if (opfunc == _paravirt_ident_64)
26276+ else if (opfunc == (void *)_paravirt_ident_64)
26277 ret = paravirt_patch_ident_64(insnbuf, len);
26278+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26279+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26280+ ret = paravirt_patch_ident_64(insnbuf, len);
26281+#endif
26282
26283 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26284 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26285@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26286 if (insn_len > len || start == NULL)
26287 insn_len = len;
26288 else
26289- memcpy(insnbuf, start, insn_len);
26290+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26291
26292 return insn_len;
26293 }
26294@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26295 return this_cpu_read(paravirt_lazy_mode);
26296 }
26297
26298-struct pv_info pv_info = {
26299+struct pv_info pv_info __read_only = {
26300 .name = "bare hardware",
26301 .paravirt_enabled = 0,
26302 .kernel_rpl = 0,
26303@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26304 #endif
26305 };
26306
26307-struct pv_init_ops pv_init_ops = {
26308+struct pv_init_ops pv_init_ops __read_only = {
26309 .patch = native_patch,
26310 };
26311
26312-struct pv_time_ops pv_time_ops = {
26313+struct pv_time_ops pv_time_ops __read_only = {
26314 .sched_clock = native_sched_clock,
26315 .steal_clock = native_steal_clock,
26316 };
26317
26318-__visible struct pv_irq_ops pv_irq_ops = {
26319+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26320 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26321 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26322 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26323@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26324 #endif
26325 };
26326
26327-__visible struct pv_cpu_ops pv_cpu_ops = {
26328+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26329 .cpuid = native_cpuid,
26330 .get_debugreg = native_get_debugreg,
26331 .set_debugreg = native_set_debugreg,
26332@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26333 NOKPROBE_SYMBOL(native_set_debugreg);
26334 NOKPROBE_SYMBOL(native_load_idt);
26335
26336-struct pv_apic_ops pv_apic_ops = {
26337+struct pv_apic_ops pv_apic_ops __read_only= {
26338 #ifdef CONFIG_X86_LOCAL_APIC
26339 .startup_ipi_hook = paravirt_nop,
26340 #endif
26341 };
26342
26343-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26344+#ifdef CONFIG_X86_32
26345+#ifdef CONFIG_X86_PAE
26346+/* 64-bit pagetable entries */
26347+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26348+#else
26349 /* 32-bit pagetable entries */
26350 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26351+#endif
26352 #else
26353 /* 64-bit pagetable entries */
26354 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26355 #endif
26356
26357-struct pv_mmu_ops pv_mmu_ops = {
26358+struct pv_mmu_ops pv_mmu_ops __read_only = {
26359
26360 .read_cr2 = native_read_cr2,
26361 .write_cr2 = native_write_cr2,
26362@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26363 .make_pud = PTE_IDENT,
26364
26365 .set_pgd = native_set_pgd,
26366+ .set_pgd_batched = native_set_pgd_batched,
26367 #endif
26368 #endif /* PAGETABLE_LEVELS >= 3 */
26369
26370@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26371 },
26372
26373 .set_fixmap = native_set_fixmap,
26374+
26375+#ifdef CONFIG_PAX_KERNEXEC
26376+ .pax_open_kernel = native_pax_open_kernel,
26377+ .pax_close_kernel = native_pax_close_kernel,
26378+#endif
26379+
26380 };
26381
26382 EXPORT_SYMBOL_GPL(pv_time_ops);
26383diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26384index a1da673..b6f5831 100644
26385--- a/arch/x86/kernel/paravirt_patch_64.c
26386+++ b/arch/x86/kernel/paravirt_patch_64.c
26387@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26388 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26389 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26390 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26391+
26392+#ifndef CONFIG_PAX_MEMORY_UDEREF
26393 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26394+#endif
26395+
26396 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26397 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26398
26399@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26400 PATCH_SITE(pv_mmu_ops, read_cr3);
26401 PATCH_SITE(pv_mmu_ops, write_cr3);
26402 PATCH_SITE(pv_cpu_ops, clts);
26403+
26404+#ifndef CONFIG_PAX_MEMORY_UDEREF
26405 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26406+#endif
26407+
26408 PATCH_SITE(pv_cpu_ops, wbinvd);
26409
26410 patch_site:
26411diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26412index 0497f71..7186c0d 100644
26413--- a/arch/x86/kernel/pci-calgary_64.c
26414+++ b/arch/x86/kernel/pci-calgary_64.c
26415@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26416 tce_space = be64_to_cpu(readq(target));
26417 tce_space = tce_space & TAR_SW_BITS;
26418
26419- tce_space = tce_space & (~specified_table_size);
26420+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26421 info->tce_space = (u64 *)__va(tce_space);
26422 }
26423 }
26424diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26425index 35ccf75..7a15747 100644
26426--- a/arch/x86/kernel/pci-iommu_table.c
26427+++ b/arch/x86/kernel/pci-iommu_table.c
26428@@ -2,7 +2,7 @@
26429 #include <asm/iommu_table.h>
26430 #include <linux/string.h>
26431 #include <linux/kallsyms.h>
26432-
26433+#include <linux/sched.h>
26434
26435 #define DEBUG 1
26436
26437diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26438index 77dd0ad..9ec4723 100644
26439--- a/arch/x86/kernel/pci-swiotlb.c
26440+++ b/arch/x86/kernel/pci-swiotlb.c
26441@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26442 struct dma_attrs *attrs)
26443 {
26444 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26445- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26446+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26447 else
26448 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26449 }
26450diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26451index 046e2d6..2cc8ad2 100644
26452--- a/arch/x86/kernel/process.c
26453+++ b/arch/x86/kernel/process.c
26454@@ -37,7 +37,8 @@
26455 * section. Since TSS's are completely CPU-local, we want them
26456 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26457 */
26458-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26459+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26460+EXPORT_SYMBOL(init_tss);
26461
26462 #ifdef CONFIG_X86_64
26463 static DEFINE_PER_CPU(unsigned char, is_idle);
26464@@ -95,7 +96,7 @@ void arch_task_cache_init(void)
26465 task_xstate_cachep =
26466 kmem_cache_create("task_xstate", xstate_size,
26467 __alignof__(union thread_xstate),
26468- SLAB_PANIC | SLAB_NOTRACK, NULL);
26469+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26470 setup_xstate_comp();
26471 }
26472
26473@@ -109,7 +110,7 @@ void exit_thread(void)
26474 unsigned long *bp = t->io_bitmap_ptr;
26475
26476 if (bp) {
26477- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26478+ struct tss_struct *tss = init_tss + get_cpu();
26479
26480 t->io_bitmap_ptr = NULL;
26481 clear_thread_flag(TIF_IO_BITMAP);
26482@@ -129,6 +130,9 @@ void flush_thread(void)
26483 {
26484 struct task_struct *tsk = current;
26485
26486+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26487+ loadsegment(gs, 0);
26488+#endif
26489 flush_ptrace_hw_breakpoint(tsk);
26490 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26491 drop_init_fpu(tsk);
26492@@ -275,7 +279,7 @@ static void __exit_idle(void)
26493 void exit_idle(void)
26494 {
26495 /* idle loop has pid 0 */
26496- if (current->pid)
26497+ if (task_pid_nr(current))
26498 return;
26499 __exit_idle();
26500 }
26501@@ -328,7 +332,7 @@ bool xen_set_default_idle(void)
26502 return ret;
26503 }
26504 #endif
26505-void stop_this_cpu(void *dummy)
26506+__noreturn void stop_this_cpu(void *dummy)
26507 {
26508 local_irq_disable();
26509 /*
26510@@ -457,16 +461,37 @@ static int __init idle_setup(char *str)
26511 }
26512 early_param("idle", idle_setup);
26513
26514-unsigned long arch_align_stack(unsigned long sp)
26515+#ifdef CONFIG_PAX_RANDKSTACK
26516+void pax_randomize_kstack(struct pt_regs *regs)
26517 {
26518- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26519- sp -= get_random_int() % 8192;
26520- return sp & ~0xf;
26521-}
26522+ struct thread_struct *thread = &current->thread;
26523+ unsigned long time;
26524
26525-unsigned long arch_randomize_brk(struct mm_struct *mm)
26526-{
26527- unsigned long range_end = mm->brk + 0x02000000;
26528- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26529-}
26530+ if (!randomize_va_space)
26531+ return;
26532+
26533+ if (v8086_mode(regs))
26534+ return;
26535
26536+ rdtscl(time);
26537+
26538+ /* P4 seems to return a 0 LSB, ignore it */
26539+#ifdef CONFIG_MPENTIUM4
26540+ time &= 0x3EUL;
26541+ time <<= 2;
26542+#elif defined(CONFIG_X86_64)
26543+ time &= 0xFUL;
26544+ time <<= 4;
26545+#else
26546+ time &= 0x1FUL;
26547+ time <<= 3;
26548+#endif
26549+
26550+ thread->sp0 ^= time;
26551+ load_sp0(init_tss + smp_processor_id(), thread);
26552+
26553+#ifdef CONFIG_X86_64
26554+ this_cpu_write(kernel_stack, thread->sp0);
26555+#endif
26556+}
26557+#endif
26558diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26559index 603c4f9..3a105d7 100644
26560--- a/arch/x86/kernel/process_32.c
26561+++ b/arch/x86/kernel/process_32.c
26562@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26563 unsigned long thread_saved_pc(struct task_struct *tsk)
26564 {
26565 return ((unsigned long *)tsk->thread.sp)[3];
26566+//XXX return tsk->thread.eip;
26567 }
26568
26569 void __show_regs(struct pt_regs *regs, int all)
26570@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26571 unsigned long sp;
26572 unsigned short ss, gs;
26573
26574- if (user_mode_vm(regs)) {
26575+ if (user_mode(regs)) {
26576 sp = regs->sp;
26577 ss = regs->ss & 0xffff;
26578- gs = get_user_gs(regs);
26579 } else {
26580 sp = kernel_stack_pointer(regs);
26581 savesegment(ss, ss);
26582- savesegment(gs, gs);
26583 }
26584+ gs = get_user_gs(regs);
26585
26586 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26587 (u16)regs->cs, regs->ip, regs->flags,
26588- smp_processor_id());
26589+ raw_smp_processor_id());
26590 print_symbol("EIP is at %s\n", regs->ip);
26591
26592 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26593@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26594 int copy_thread(unsigned long clone_flags, unsigned long sp,
26595 unsigned long arg, struct task_struct *p)
26596 {
26597- struct pt_regs *childregs = task_pt_regs(p);
26598+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26599 struct task_struct *tsk;
26600 int err;
26601
26602 p->thread.sp = (unsigned long) childregs;
26603 p->thread.sp0 = (unsigned long) (childregs+1);
26604+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26605 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26606
26607 if (unlikely(p->flags & PF_KTHREAD)) {
26608 /* kernel thread */
26609 memset(childregs, 0, sizeof(struct pt_regs));
26610 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26611- task_user_gs(p) = __KERNEL_STACK_CANARY;
26612- childregs->ds = __USER_DS;
26613- childregs->es = __USER_DS;
26614+ savesegment(gs, childregs->gs);
26615+ childregs->ds = __KERNEL_DS;
26616+ childregs->es = __KERNEL_DS;
26617 childregs->fs = __KERNEL_PERCPU;
26618 childregs->bx = sp; /* function */
26619 childregs->bp = arg;
26620@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26621 struct thread_struct *prev = &prev_p->thread,
26622 *next = &next_p->thread;
26623 int cpu = smp_processor_id();
26624- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26625+ struct tss_struct *tss = init_tss + cpu;
26626 fpu_switch_t fpu;
26627
26628 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26629@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26630 */
26631 lazy_save_gs(prev->gs);
26632
26633+#ifdef CONFIG_PAX_MEMORY_UDEREF
26634+ __set_fs(task_thread_info(next_p)->addr_limit);
26635+#endif
26636+
26637 /*
26638 * Load the per-thread Thread-Local Storage descriptor.
26639 */
26640@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26641 */
26642 arch_end_context_switch(next_p);
26643
26644- this_cpu_write(kernel_stack,
26645- (unsigned long)task_stack_page(next_p) +
26646- THREAD_SIZE - KERNEL_STACK_OFFSET);
26647+ this_cpu_write(current_task, next_p);
26648+ this_cpu_write(current_tinfo, &next_p->tinfo);
26649+ this_cpu_write(kernel_stack, next->sp0);
26650
26651 /*
26652 * Restore %gs if needed (which is common)
26653@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654
26655 switch_fpu_finish(next_p, fpu);
26656
26657- this_cpu_write(current_task, next_p);
26658-
26659 return prev_p;
26660 }
26661
26662@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26663 } while (count++ < 16);
26664 return 0;
26665 }
26666-
26667diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26668index 67fcc43..0d2c630 100644
26669--- a/arch/x86/kernel/process_64.c
26670+++ b/arch/x86/kernel/process_64.c
26671@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26672 struct pt_regs *childregs;
26673 struct task_struct *me = current;
26674
26675- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26676+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26677 childregs = task_pt_regs(p);
26678 p->thread.sp = (unsigned long) childregs;
26679 p->thread.usersp = me->thread.usersp;
26680+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26681 set_tsk_thread_flag(p, TIF_FORK);
26682 p->thread.io_bitmap_ptr = NULL;
26683
26684@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26685 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26686 savesegment(es, p->thread.es);
26687 savesegment(ds, p->thread.ds);
26688+ savesegment(ss, p->thread.ss);
26689+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26690 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26691
26692 if (unlikely(p->flags & PF_KTHREAD)) {
26693@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26694 struct thread_struct *prev = &prev_p->thread;
26695 struct thread_struct *next = &next_p->thread;
26696 int cpu = smp_processor_id();
26697- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26698+ struct tss_struct *tss = init_tss + cpu;
26699 unsigned fsindex, gsindex;
26700 fpu_switch_t fpu;
26701
26702@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26703 if (unlikely(next->ds | prev->ds))
26704 loadsegment(ds, next->ds);
26705
26706+ savesegment(ss, prev->ss);
26707+ if (unlikely(next->ss != prev->ss))
26708+ loadsegment(ss, next->ss);
26709+
26710 /*
26711 * Switch FS and GS.
26712 *
26713@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26714 prev->usersp = this_cpu_read(old_rsp);
26715 this_cpu_write(old_rsp, next->usersp);
26716 this_cpu_write(current_task, next_p);
26717+ this_cpu_write(current_tinfo, &next_p->tinfo);
26718
26719 /*
26720 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26721@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26722 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26723 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26724
26725- this_cpu_write(kernel_stack,
26726- (unsigned long)task_stack_page(next_p) +
26727- THREAD_SIZE - KERNEL_STACK_OFFSET);
26728+ this_cpu_write(kernel_stack, next->sp0);
26729
26730 /*
26731 * Now maybe reload the debug registers and handle I/O bitmaps
26732@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26733 if (!p || p == current || p->state == TASK_RUNNING)
26734 return 0;
26735 stack = (unsigned long)task_stack_page(p);
26736- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26737+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26738 return 0;
26739 fp = *(u64 *)(p->thread.sp);
26740 do {
26741- if (fp < (unsigned long)stack ||
26742- fp >= (unsigned long)stack+THREAD_SIZE)
26743+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26744 return 0;
26745 ip = *(u64 *)(fp+8);
26746 if (!in_sched_functions(ip))
26747diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26748index e510618..5165ac0 100644
26749--- a/arch/x86/kernel/ptrace.c
26750+++ b/arch/x86/kernel/ptrace.c
26751@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26752 unsigned long sp = (unsigned long)&regs->sp;
26753 u32 *prev_esp;
26754
26755- if (context == (sp & ~(THREAD_SIZE - 1)))
26756+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26757 return sp;
26758
26759- prev_esp = (u32 *)(context);
26760+ prev_esp = *(u32 **)(context);
26761 if (prev_esp)
26762 return (unsigned long)prev_esp;
26763
26764@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26765 if (child->thread.gs != value)
26766 return do_arch_prctl(child, ARCH_SET_GS, value);
26767 return 0;
26768+
26769+ case offsetof(struct user_regs_struct,ip):
26770+ /*
26771+ * Protect against any attempt to set ip to an
26772+ * impossible address. There are dragons lurking if the
26773+ * address is noncanonical. (This explicitly allows
26774+ * setting ip to TASK_SIZE_MAX, because user code can do
26775+ * that all by itself by running off the end of its
26776+ * address space.
26777+ */
26778+ if (value > TASK_SIZE_MAX)
26779+ return -EIO;
26780+ break;
26781+
26782 #endif
26783 }
26784
26785@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26786 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26787 {
26788 int i;
26789- int dr7 = 0;
26790+ unsigned long dr7 = 0;
26791 struct arch_hw_breakpoint *info;
26792
26793 for (i = 0; i < HBP_NUM; i++) {
26794@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26795 unsigned long addr, unsigned long data)
26796 {
26797 int ret;
26798- unsigned long __user *datap = (unsigned long __user *)data;
26799+ unsigned long __user *datap = (__force unsigned long __user *)data;
26800
26801 switch (request) {
26802 /* read the word at location addr in the USER area. */
26803@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26804 if ((int) addr < 0)
26805 return -EIO;
26806 ret = do_get_thread_area(child, addr,
26807- (struct user_desc __user *)data);
26808+ (__force struct user_desc __user *) data);
26809 break;
26810
26811 case PTRACE_SET_THREAD_AREA:
26812 if ((int) addr < 0)
26813 return -EIO;
26814 ret = do_set_thread_area(child, addr,
26815- (struct user_desc __user *)data, 0);
26816+ (__force struct user_desc __user *) data, 0);
26817 break;
26818 #endif
26819
26820@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26821
26822 #ifdef CONFIG_X86_64
26823
26824-static struct user_regset x86_64_regsets[] __read_mostly = {
26825+static user_regset_no_const x86_64_regsets[] __read_only = {
26826 [REGSET_GENERAL] = {
26827 .core_note_type = NT_PRSTATUS,
26828 .n = sizeof(struct user_regs_struct) / sizeof(long),
26829@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26830 #endif /* CONFIG_X86_64 */
26831
26832 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26833-static struct user_regset x86_32_regsets[] __read_mostly = {
26834+static user_regset_no_const x86_32_regsets[] __read_only = {
26835 [REGSET_GENERAL] = {
26836 .core_note_type = NT_PRSTATUS,
26837 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26838@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26839 */
26840 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26841
26842-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26843+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26844 {
26845 #ifdef CONFIG_X86_64
26846 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26847@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26848 memset(info, 0, sizeof(*info));
26849 info->si_signo = SIGTRAP;
26850 info->si_code = si_code;
26851- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26852+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26853 }
26854
26855 void user_single_step_siginfo(struct task_struct *tsk,
26856@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26857 }
26858 }
26859
26860+#ifdef CONFIG_GRKERNSEC_SETXID
26861+extern void gr_delayed_cred_worker(void);
26862+#endif
26863+
26864 /*
26865 * We can return 0 to resume the syscall or anything else to go to phase
26866 * 2. If we resume the syscall, we need to put something appropriate in
26867@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26868
26869 BUG_ON(regs != task_pt_regs(current));
26870
26871+#ifdef CONFIG_GRKERNSEC_SETXID
26872+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26873+ gr_delayed_cred_worker();
26874+#endif
26875+
26876 /*
26877 * If we stepped into a sysenter/syscall insn, it trapped in
26878 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26879@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26880 */
26881 user_exit();
26882
26883+#ifdef CONFIG_GRKERNSEC_SETXID
26884+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26885+ gr_delayed_cred_worker();
26886+#endif
26887+
26888 audit_syscall_exit(regs);
26889
26890 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26891diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26892index 2f355d2..e75ed0a 100644
26893--- a/arch/x86/kernel/pvclock.c
26894+++ b/arch/x86/kernel/pvclock.c
26895@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26896 reset_hung_task_detector();
26897 }
26898
26899-static atomic64_t last_value = ATOMIC64_INIT(0);
26900+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26901
26902 void pvclock_resume(void)
26903 {
26904- atomic64_set(&last_value, 0);
26905+ atomic64_set_unchecked(&last_value, 0);
26906 }
26907
26908 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26909@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26910 * updating at the same time, and one of them could be slightly behind,
26911 * making the assumption that last_value always go forward fail to hold.
26912 */
26913- last = atomic64_read(&last_value);
26914+ last = atomic64_read_unchecked(&last_value);
26915 do {
26916 if (ret < last)
26917 return last;
26918- last = atomic64_cmpxchg(&last_value, last, ret);
26919+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26920 } while (unlikely(last != ret));
26921
26922 return ret;
26923diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26924index 86db4bc..a50a54a 100644
26925--- a/arch/x86/kernel/reboot.c
26926+++ b/arch/x86/kernel/reboot.c
26927@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26928
26929 void __noreturn machine_real_restart(unsigned int type)
26930 {
26931+
26932+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26933+ struct desc_struct *gdt;
26934+#endif
26935+
26936 local_irq_disable();
26937
26938 /*
26939@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26940
26941 /* Jump to the identity-mapped low memory code */
26942 #ifdef CONFIG_X86_32
26943- asm volatile("jmpl *%0" : :
26944+
26945+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26946+ gdt = get_cpu_gdt_table(smp_processor_id());
26947+ pax_open_kernel();
26948+#ifdef CONFIG_PAX_MEMORY_UDEREF
26949+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26950+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26951+ loadsegment(ds, __KERNEL_DS);
26952+ loadsegment(es, __KERNEL_DS);
26953+ loadsegment(ss, __KERNEL_DS);
26954+#endif
26955+#ifdef CONFIG_PAX_KERNEXEC
26956+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26957+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26958+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26959+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26960+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26961+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26962+#endif
26963+ pax_close_kernel();
26964+#endif
26965+
26966+ asm volatile("ljmpl *%0" : :
26967 "rm" (real_mode_header->machine_real_restart_asm),
26968 "a" (type));
26969 #else
26970@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26971 /*
26972 * This is a single dmi_table handling all reboot quirks.
26973 */
26974-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26975+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
26976
26977 /* Acer */
26978 { /* Handle reboot issue on Acer Aspire one */
26979@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26980 * This means that this function can never return, it can misbehave
26981 * by not rebooting properly and hanging.
26982 */
26983-static void native_machine_emergency_restart(void)
26984+static void __noreturn native_machine_emergency_restart(void)
26985 {
26986 int i;
26987 int attempt = 0;
26988@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
26989 #endif
26990 }
26991
26992-static void __machine_emergency_restart(int emergency)
26993+static void __noreturn __machine_emergency_restart(int emergency)
26994 {
26995 reboot_emergency = emergency;
26996 machine_ops.emergency_restart();
26997 }
26998
26999-static void native_machine_restart(char *__unused)
27000+static void __noreturn native_machine_restart(char *__unused)
27001 {
27002 pr_notice("machine restart\n");
27003
27004@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27005 __machine_emergency_restart(0);
27006 }
27007
27008-static void native_machine_halt(void)
27009+static void __noreturn native_machine_halt(void)
27010 {
27011 /* Stop other cpus and apics */
27012 machine_shutdown();
27013@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27014 stop_this_cpu(NULL);
27015 }
27016
27017-static void native_machine_power_off(void)
27018+static void __noreturn native_machine_power_off(void)
27019 {
27020 if (pm_power_off) {
27021 if (!reboot_force)
27022@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27023 }
27024 /* A fallback in case there is no PM info available */
27025 tboot_shutdown(TB_SHUTDOWN_HALT);
27026+ unreachable();
27027 }
27028
27029-struct machine_ops machine_ops = {
27030+struct machine_ops machine_ops __read_only = {
27031 .power_off = native_machine_power_off,
27032 .shutdown = native_machine_shutdown,
27033 .emergency_restart = native_machine_emergency_restart,
27034diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27035index c8e41e9..64049ef 100644
27036--- a/arch/x86/kernel/reboot_fixups_32.c
27037+++ b/arch/x86/kernel/reboot_fixups_32.c
27038@@ -57,7 +57,7 @@ struct device_fixup {
27039 unsigned int vendor;
27040 unsigned int device;
27041 void (*reboot_fixup)(struct pci_dev *);
27042-};
27043+} __do_const;
27044
27045 /*
27046 * PCI ids solely used for fixups_table go here
27047diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27048index 3fd2c69..a444264 100644
27049--- a/arch/x86/kernel/relocate_kernel_64.S
27050+++ b/arch/x86/kernel/relocate_kernel_64.S
27051@@ -96,8 +96,7 @@ relocate_kernel:
27052
27053 /* jump to identity mapped page */
27054 addq $(identity_mapped - relocate_kernel), %r8
27055- pushq %r8
27056- ret
27057+ jmp *%r8
27058
27059 identity_mapped:
27060 /* set return address to 0 if not preserving context */
27061diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27062index 0a2421c..11f3f36 100644
27063--- a/arch/x86/kernel/setup.c
27064+++ b/arch/x86/kernel/setup.c
27065@@ -111,6 +111,7 @@
27066 #include <asm/mce.h>
27067 #include <asm/alternative.h>
27068 #include <asm/prom.h>
27069+#include <asm/boot.h>
27070
27071 /*
27072 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27073@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27074 #endif
27075
27076
27077-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27078-__visible unsigned long mmu_cr4_features;
27079+#ifdef CONFIG_X86_64
27080+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27081+#elif defined(CONFIG_X86_PAE)
27082+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27083 #else
27084-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27085+__visible unsigned long mmu_cr4_features __read_only;
27086 #endif
27087
27088 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27089@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27090 * area (640->1Mb) as ram even though it is not.
27091 * take them out.
27092 */
27093- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27094+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27095
27096 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27097 }
27098@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27099 /* called before trim_bios_range() to spare extra sanitize */
27100 static void __init e820_add_kernel_range(void)
27101 {
27102- u64 start = __pa_symbol(_text);
27103+ u64 start = __pa_symbol(ktla_ktva(_text));
27104 u64 size = __pa_symbol(_end) - start;
27105
27106 /*
27107@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27108
27109 void __init setup_arch(char **cmdline_p)
27110 {
27111+#ifdef CONFIG_X86_32
27112+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27113+#else
27114 memblock_reserve(__pa_symbol(_text),
27115 (unsigned long)__bss_stop - (unsigned long)_text);
27116+#endif
27117
27118 early_reserve_initrd();
27119
27120@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27121
27122 if (!boot_params.hdr.root_flags)
27123 root_mountflags &= ~MS_RDONLY;
27124- init_mm.start_code = (unsigned long) _text;
27125- init_mm.end_code = (unsigned long) _etext;
27126+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27127+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27128 init_mm.end_data = (unsigned long) _edata;
27129 init_mm.brk = _brk_end;
27130
27131 mpx_mm_init(&init_mm);
27132
27133- code_resource.start = __pa_symbol(_text);
27134- code_resource.end = __pa_symbol(_etext)-1;
27135- data_resource.start = __pa_symbol(_etext);
27136+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27137+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27138+ data_resource.start = __pa_symbol(_sdata);
27139 data_resource.end = __pa_symbol(_edata)-1;
27140 bss_resource.start = __pa_symbol(__bss_start);
27141 bss_resource.end = __pa_symbol(__bss_stop)-1;
27142diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27143index e4fcb87..9c06c55 100644
27144--- a/arch/x86/kernel/setup_percpu.c
27145+++ b/arch/x86/kernel/setup_percpu.c
27146@@ -21,19 +21,17 @@
27147 #include <asm/cpu.h>
27148 #include <asm/stackprotector.h>
27149
27150-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27151+#ifdef CONFIG_SMP
27152+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27153 EXPORT_PER_CPU_SYMBOL(cpu_number);
27154+#endif
27155
27156-#ifdef CONFIG_X86_64
27157 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27158-#else
27159-#define BOOT_PERCPU_OFFSET 0
27160-#endif
27161
27162 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27163 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27164
27165-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27166+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27167 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27168 };
27169 EXPORT_SYMBOL(__per_cpu_offset);
27170@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27171 {
27172 #ifdef CONFIG_NEED_MULTIPLE_NODES
27173 pg_data_t *last = NULL;
27174- unsigned int cpu;
27175+ int cpu;
27176
27177 for_each_possible_cpu(cpu) {
27178 int node = early_cpu_to_node(cpu);
27179@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27180 {
27181 #ifdef CONFIG_X86_32
27182 struct desc_struct gdt;
27183+ unsigned long base = per_cpu_offset(cpu);
27184
27185- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27186- 0x2 | DESCTYPE_S, 0x8);
27187- gdt.s = 1;
27188+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27189+ 0x83 | DESCTYPE_S, 0xC);
27190 write_gdt_entry(get_cpu_gdt_table(cpu),
27191 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27192 #endif
27193@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27194 /* alrighty, percpu areas up and running */
27195 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27196 for_each_possible_cpu(cpu) {
27197+#ifdef CONFIG_CC_STACKPROTECTOR
27198+#ifdef CONFIG_X86_32
27199+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27200+#endif
27201+#endif
27202 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27203 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27204 per_cpu(cpu_number, cpu) = cpu;
27205@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27206 */
27207 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27208 #endif
27209+#ifdef CONFIG_CC_STACKPROTECTOR
27210+#ifdef CONFIG_X86_32
27211+ if (!cpu)
27212+ per_cpu(stack_canary.canary, cpu) = canary;
27213+#endif
27214+#endif
27215 /*
27216 * Up to this point, the boot CPU has been using .init.data
27217 * area. Reload any changed state for the boot CPU.
27218diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27219index e504246..ba10432 100644
27220--- a/arch/x86/kernel/signal.c
27221+++ b/arch/x86/kernel/signal.c
27222@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27223 * Align the stack pointer according to the i386 ABI,
27224 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27225 */
27226- sp = ((sp + 4) & -16ul) - 4;
27227+ sp = ((sp - 12) & -16ul) - 4;
27228 #else /* !CONFIG_X86_32 */
27229 sp = round_down(sp, 16) - 8;
27230 #endif
27231@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27232 }
27233
27234 if (current->mm->context.vdso)
27235- restorer = current->mm->context.vdso +
27236- selected_vdso32->sym___kernel_sigreturn;
27237+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27238 else
27239- restorer = &frame->retcode;
27240+ restorer = (void __user *)&frame->retcode;
27241 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27242 restorer = ksig->ka.sa.sa_restorer;
27243
27244@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27245 * reasons and because gdb uses it as a signature to notice
27246 * signal handler stack frames.
27247 */
27248- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27249+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27250
27251 if (err)
27252 return -EFAULT;
27253@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27254 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27255
27256 /* Set up to return from userspace. */
27257- restorer = current->mm->context.vdso +
27258- selected_vdso32->sym___kernel_rt_sigreturn;
27259+ if (current->mm->context.vdso)
27260+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27261+ else
27262+ restorer = (void __user *)&frame->retcode;
27263 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27264 restorer = ksig->ka.sa.sa_restorer;
27265 put_user_ex(restorer, &frame->pretcode);
27266@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27267 * reasons and because gdb uses it as a signature to notice
27268 * signal handler stack frames.
27269 */
27270- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27271+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27272 } put_user_catch(err);
27273
27274 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27275@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27276 {
27277 int usig = signr_convert(ksig->sig);
27278 sigset_t *set = sigmask_to_save();
27279- compat_sigset_t *cset = (compat_sigset_t *) set;
27280+ sigset_t sigcopy;
27281+ compat_sigset_t *cset;
27282+
27283+ sigcopy = *set;
27284+
27285+ cset = (compat_sigset_t *) &sigcopy;
27286
27287 /* Set up the stack frame */
27288 if (is_ia32_frame()) {
27289@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27290 } else if (is_x32_frame()) {
27291 return x32_setup_rt_frame(ksig, cset, regs);
27292 } else {
27293- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27294+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27295 }
27296 }
27297
27298diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27299index be8e1bd..a3d93fa 100644
27300--- a/arch/x86/kernel/smp.c
27301+++ b/arch/x86/kernel/smp.c
27302@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27303
27304 __setup("nonmi_ipi", nonmi_ipi_setup);
27305
27306-struct smp_ops smp_ops = {
27307+struct smp_ops smp_ops __read_only = {
27308 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27309 .smp_prepare_cpus = native_smp_prepare_cpus,
27310 .smp_cpus_done = native_smp_cpus_done,
27311diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27312index febc6aa..37d8edf 100644
27313--- a/arch/x86/kernel/smpboot.c
27314+++ b/arch/x86/kernel/smpboot.c
27315@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27316
27317 enable_start_cpu0 = 0;
27318
27319-#ifdef CONFIG_X86_32
27320+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27321+ barrier();
27322+
27323 /* switch away from the initial page table */
27324+#ifdef CONFIG_PAX_PER_CPU_PGD
27325+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27326+#else
27327 load_cr3(swapper_pg_dir);
27328+#endif
27329 __flush_tlb_all();
27330-#endif
27331
27332- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27333- barrier();
27334 /*
27335 * Check TSC synchronization with the BP:
27336 */
27337@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27338 alternatives_enable_smp();
27339
27340 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27341- (THREAD_SIZE + task_stack_page(idle))) - 1);
27342+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27343 per_cpu(current_task, cpu) = idle;
27344+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27345
27346 #ifdef CONFIG_X86_32
27347 /* Stack for startup_32 can be just as for start_secondary onwards */
27348@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27349 clear_tsk_thread_flag(idle, TIF_FORK);
27350 initial_gs = per_cpu_offset(cpu);
27351 #endif
27352- per_cpu(kernel_stack, cpu) =
27353- (unsigned long)task_stack_page(idle) -
27354- KERNEL_STACK_OFFSET + THREAD_SIZE;
27355+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27356+ pax_open_kernel();
27357 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27358+ pax_close_kernel();
27359 initial_code = (unsigned long)start_secondary;
27360 stack_start = idle->thread.sp;
27361
27362@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27363 /* the FPU context is blank, nobody can own it */
27364 __cpu_disable_lazy_restore(cpu);
27365
27366+#ifdef CONFIG_PAX_PER_CPU_PGD
27367+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27368+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27369+ KERNEL_PGD_PTRS);
27370+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27371+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27372+ KERNEL_PGD_PTRS);
27373+#endif
27374+
27375 err = do_boot_cpu(apicid, cpu, tidle);
27376 if (err) {
27377 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27378diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27379index 9b4d51d..5d28b58 100644
27380--- a/arch/x86/kernel/step.c
27381+++ b/arch/x86/kernel/step.c
27382@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27383 struct desc_struct *desc;
27384 unsigned long base;
27385
27386- seg &= ~7UL;
27387+ seg >>= 3;
27388
27389 mutex_lock(&child->mm->context.lock);
27390- if (unlikely((seg >> 3) >= child->mm->context.size))
27391+ if (unlikely(seg >= child->mm->context.size))
27392 addr = -1L; /* bogus selector, access would fault */
27393 else {
27394 desc = child->mm->context.ldt + seg;
27395@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27396 addr += base;
27397 }
27398 mutex_unlock(&child->mm->context.lock);
27399- }
27400+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27401+ addr = ktla_ktva(addr);
27402
27403 return addr;
27404 }
27405@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27406 unsigned char opcode[15];
27407 unsigned long addr = convert_ip_to_linear(child, regs);
27408
27409+ if (addr == -EINVAL)
27410+ return 0;
27411+
27412 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27413 for (i = 0; i < copied; i++) {
27414 switch (opcode[i]) {
27415diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27416new file mode 100644
27417index 0000000..5877189
27418--- /dev/null
27419+++ b/arch/x86/kernel/sys_i386_32.c
27420@@ -0,0 +1,189 @@
27421+/*
27422+ * This file contains various random system calls that
27423+ * have a non-standard calling sequence on the Linux/i386
27424+ * platform.
27425+ */
27426+
27427+#include <linux/errno.h>
27428+#include <linux/sched.h>
27429+#include <linux/mm.h>
27430+#include <linux/fs.h>
27431+#include <linux/smp.h>
27432+#include <linux/sem.h>
27433+#include <linux/msg.h>
27434+#include <linux/shm.h>
27435+#include <linux/stat.h>
27436+#include <linux/syscalls.h>
27437+#include <linux/mman.h>
27438+#include <linux/file.h>
27439+#include <linux/utsname.h>
27440+#include <linux/ipc.h>
27441+#include <linux/elf.h>
27442+
27443+#include <linux/uaccess.h>
27444+#include <linux/unistd.h>
27445+
27446+#include <asm/syscalls.h>
27447+
27448+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27449+{
27450+ unsigned long pax_task_size = TASK_SIZE;
27451+
27452+#ifdef CONFIG_PAX_SEGMEXEC
27453+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27454+ pax_task_size = SEGMEXEC_TASK_SIZE;
27455+#endif
27456+
27457+ if (flags & MAP_FIXED)
27458+ if (len > pax_task_size || addr > pax_task_size - len)
27459+ return -EINVAL;
27460+
27461+ return 0;
27462+}
27463+
27464+/*
27465+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27466+ */
27467+static unsigned long get_align_mask(void)
27468+{
27469+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27470+ return 0;
27471+
27472+ if (!(current->flags & PF_RANDOMIZE))
27473+ return 0;
27474+
27475+ return va_align.mask;
27476+}
27477+
27478+unsigned long
27479+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27480+ unsigned long len, unsigned long pgoff, unsigned long flags)
27481+{
27482+ struct mm_struct *mm = current->mm;
27483+ struct vm_area_struct *vma;
27484+ unsigned long pax_task_size = TASK_SIZE;
27485+ struct vm_unmapped_area_info info;
27486+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27487+
27488+#ifdef CONFIG_PAX_SEGMEXEC
27489+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27490+ pax_task_size = SEGMEXEC_TASK_SIZE;
27491+#endif
27492+
27493+ pax_task_size -= PAGE_SIZE;
27494+
27495+ if (len > pax_task_size)
27496+ return -ENOMEM;
27497+
27498+ if (flags & MAP_FIXED)
27499+ return addr;
27500+
27501+#ifdef CONFIG_PAX_RANDMMAP
27502+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27503+#endif
27504+
27505+ if (addr) {
27506+ addr = PAGE_ALIGN(addr);
27507+ if (pax_task_size - len >= addr) {
27508+ vma = find_vma(mm, addr);
27509+ if (check_heap_stack_gap(vma, addr, len, offset))
27510+ return addr;
27511+ }
27512+ }
27513+
27514+ info.flags = 0;
27515+ info.length = len;
27516+ info.align_mask = filp ? get_align_mask() : 0;
27517+ info.align_offset = pgoff << PAGE_SHIFT;
27518+ info.threadstack_offset = offset;
27519+
27520+#ifdef CONFIG_PAX_PAGEEXEC
27521+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27522+ info.low_limit = 0x00110000UL;
27523+ info.high_limit = mm->start_code;
27524+
27525+#ifdef CONFIG_PAX_RANDMMAP
27526+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27527+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27528+#endif
27529+
27530+ if (info.low_limit < info.high_limit) {
27531+ addr = vm_unmapped_area(&info);
27532+ if (!IS_ERR_VALUE(addr))
27533+ return addr;
27534+ }
27535+ } else
27536+#endif
27537+
27538+ info.low_limit = mm->mmap_base;
27539+ info.high_limit = pax_task_size;
27540+
27541+ return vm_unmapped_area(&info);
27542+}
27543+
27544+unsigned long
27545+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27546+ const unsigned long len, const unsigned long pgoff,
27547+ const unsigned long flags)
27548+{
27549+ struct vm_area_struct *vma;
27550+ struct mm_struct *mm = current->mm;
27551+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27552+ struct vm_unmapped_area_info info;
27553+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27554+
27555+#ifdef CONFIG_PAX_SEGMEXEC
27556+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27557+ pax_task_size = SEGMEXEC_TASK_SIZE;
27558+#endif
27559+
27560+ pax_task_size -= PAGE_SIZE;
27561+
27562+ /* requested length too big for entire address space */
27563+ if (len > pax_task_size)
27564+ return -ENOMEM;
27565+
27566+ if (flags & MAP_FIXED)
27567+ return addr;
27568+
27569+#ifdef CONFIG_PAX_PAGEEXEC
27570+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27571+ goto bottomup;
27572+#endif
27573+
27574+#ifdef CONFIG_PAX_RANDMMAP
27575+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27576+#endif
27577+
27578+ /* requesting a specific address */
27579+ if (addr) {
27580+ addr = PAGE_ALIGN(addr);
27581+ if (pax_task_size - len >= addr) {
27582+ vma = find_vma(mm, addr);
27583+ if (check_heap_stack_gap(vma, addr, len, offset))
27584+ return addr;
27585+ }
27586+ }
27587+
27588+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27589+ info.length = len;
27590+ info.low_limit = PAGE_SIZE;
27591+ info.high_limit = mm->mmap_base;
27592+ info.align_mask = filp ? get_align_mask() : 0;
27593+ info.align_offset = pgoff << PAGE_SHIFT;
27594+ info.threadstack_offset = offset;
27595+
27596+ addr = vm_unmapped_area(&info);
27597+ if (!(addr & ~PAGE_MASK))
27598+ return addr;
27599+ VM_BUG_ON(addr != -ENOMEM);
27600+
27601+bottomup:
27602+ /*
27603+ * A failed mmap() very likely causes application failure,
27604+ * so fall back to the bottom-up function here. This scenario
27605+ * can happen with large stack limits and large mmap()
27606+ * allocations.
27607+ */
27608+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27609+}
27610diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27611index 30277e2..5664a29 100644
27612--- a/arch/x86/kernel/sys_x86_64.c
27613+++ b/arch/x86/kernel/sys_x86_64.c
27614@@ -81,8 +81,8 @@ out:
27615 return error;
27616 }
27617
27618-static void find_start_end(unsigned long flags, unsigned long *begin,
27619- unsigned long *end)
27620+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27621+ unsigned long *begin, unsigned long *end)
27622 {
27623 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27624 unsigned long new_begin;
27625@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27626 *begin = new_begin;
27627 }
27628 } else {
27629- *begin = current->mm->mmap_legacy_base;
27630+ *begin = mm->mmap_legacy_base;
27631 *end = TASK_SIZE;
27632 }
27633 }
27634@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27635 struct vm_area_struct *vma;
27636 struct vm_unmapped_area_info info;
27637 unsigned long begin, end;
27638+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27639
27640 if (flags & MAP_FIXED)
27641 return addr;
27642
27643- find_start_end(flags, &begin, &end);
27644+ find_start_end(mm, flags, &begin, &end);
27645
27646 if (len > end)
27647 return -ENOMEM;
27648
27649+#ifdef CONFIG_PAX_RANDMMAP
27650+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27651+#endif
27652+
27653 if (addr) {
27654 addr = PAGE_ALIGN(addr);
27655 vma = find_vma(mm, addr);
27656- if (end - len >= addr &&
27657- (!vma || addr + len <= vma->vm_start))
27658+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27659 return addr;
27660 }
27661
27662@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27663 info.high_limit = end;
27664 info.align_mask = filp ? get_align_mask() : 0;
27665 info.align_offset = pgoff << PAGE_SHIFT;
27666+ info.threadstack_offset = offset;
27667 return vm_unmapped_area(&info);
27668 }
27669
27670@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27671 struct mm_struct *mm = current->mm;
27672 unsigned long addr = addr0;
27673 struct vm_unmapped_area_info info;
27674+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27675
27676 /* requested length too big for entire address space */
27677 if (len > TASK_SIZE)
27678@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27679 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27680 goto bottomup;
27681
27682+#ifdef CONFIG_PAX_RANDMMAP
27683+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27684+#endif
27685+
27686 /* requesting a specific address */
27687 if (addr) {
27688 addr = PAGE_ALIGN(addr);
27689 vma = find_vma(mm, addr);
27690- if (TASK_SIZE - len >= addr &&
27691- (!vma || addr + len <= vma->vm_start))
27692+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27693 return addr;
27694 }
27695
27696@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27697 info.high_limit = mm->mmap_base;
27698 info.align_mask = filp ? get_align_mask() : 0;
27699 info.align_offset = pgoff << PAGE_SHIFT;
27700+ info.threadstack_offset = offset;
27701 addr = vm_unmapped_area(&info);
27702 if (!(addr & ~PAGE_MASK))
27703 return addr;
27704diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27705index 91a4496..bb87552 100644
27706--- a/arch/x86/kernel/tboot.c
27707+++ b/arch/x86/kernel/tboot.c
27708@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27709
27710 void tboot_shutdown(u32 shutdown_type)
27711 {
27712- void (*shutdown)(void);
27713+ void (* __noreturn shutdown)(void);
27714
27715 if (!tboot_enabled())
27716 return;
27717@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27718
27719 switch_to_tboot_pt();
27720
27721- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27722+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27723 shutdown();
27724
27725 /* should not reach here */
27726@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27727 return -ENODEV;
27728 }
27729
27730-static atomic_t ap_wfs_count;
27731+static atomic_unchecked_t ap_wfs_count;
27732
27733 static int tboot_wait_for_aps(int num_aps)
27734 {
27735@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27736 {
27737 switch (action) {
27738 case CPU_DYING:
27739- atomic_inc(&ap_wfs_count);
27740+ atomic_inc_unchecked(&ap_wfs_count);
27741 if (num_online_cpus() == 1)
27742- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27743+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27744 return NOTIFY_BAD;
27745 break;
27746 }
27747@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27748
27749 tboot_create_trampoline();
27750
27751- atomic_set(&ap_wfs_count, 0);
27752+ atomic_set_unchecked(&ap_wfs_count, 0);
27753 register_hotcpu_notifier(&tboot_cpu_notifier);
27754
27755 #ifdef CONFIG_DEBUG_FS
27756diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27757index 25adc0e..1df4349 100644
27758--- a/arch/x86/kernel/time.c
27759+++ b/arch/x86/kernel/time.c
27760@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27761 {
27762 unsigned long pc = instruction_pointer(regs);
27763
27764- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27765+ if (!user_mode(regs) && in_lock_functions(pc)) {
27766 #ifdef CONFIG_FRAME_POINTER
27767- return *(unsigned long *)(regs->bp + sizeof(long));
27768+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27769 #else
27770 unsigned long *sp =
27771 (unsigned long *)kernel_stack_pointer(regs);
27772@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27773 * or above a saved flags. Eflags has bits 22-31 zero,
27774 * kernel addresses don't.
27775 */
27776+
27777+#ifdef CONFIG_PAX_KERNEXEC
27778+ return ktla_ktva(sp[0]);
27779+#else
27780 if (sp[0] >> 22)
27781 return sp[0];
27782 if (sp[1] >> 22)
27783 return sp[1];
27784 #endif
27785+
27786+#endif
27787 }
27788 return pc;
27789 }
27790diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27791index 7fc5e84..c6e445a 100644
27792--- a/arch/x86/kernel/tls.c
27793+++ b/arch/x86/kernel/tls.c
27794@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27795 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27796 return -EINVAL;
27797
27798+#ifdef CONFIG_PAX_SEGMEXEC
27799+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27800+ return -EINVAL;
27801+#endif
27802+
27803 set_tls_desc(p, idx, &info, 1);
27804
27805 return 0;
27806@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27807
27808 if (kbuf)
27809 info = kbuf;
27810- else if (__copy_from_user(infobuf, ubuf, count))
27811+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27812 return -EFAULT;
27813 else
27814 info = infobuf;
27815diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27816index 1c113db..287b42e 100644
27817--- a/arch/x86/kernel/tracepoint.c
27818+++ b/arch/x86/kernel/tracepoint.c
27819@@ -9,11 +9,11 @@
27820 #include <linux/atomic.h>
27821
27822 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27823-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27824+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27825 (unsigned long) trace_idt_table };
27826
27827 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27828-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27829+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27830
27831 static int trace_irq_vector_refcount;
27832 static DEFINE_MUTEX(irq_vector_mutex);
27833diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27834index 4ff5d16..736e3e1 100644
27835--- a/arch/x86/kernel/traps.c
27836+++ b/arch/x86/kernel/traps.c
27837@@ -68,7 +68,7 @@
27838 #include <asm/proto.h>
27839
27840 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27841-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27842+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27843 #else
27844 #include <asm/processor-flags.h>
27845 #include <asm/setup.h>
27846@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27847 #endif
27848
27849 /* Must be page-aligned because the real IDT is used in a fixmap. */
27850-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27851+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27852
27853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27854 EXPORT_SYMBOL_GPL(used_vectors);
27855@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27856 {
27857 enum ctx_state prev_state;
27858
27859- if (user_mode_vm(regs)) {
27860+ if (user_mode(regs)) {
27861 /* Other than that, we're just an exception. */
27862 prev_state = exception_enter();
27863 } else {
27864@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27865 /* Must be before exception_exit. */
27866 preempt_count_sub(HARDIRQ_OFFSET);
27867
27868- if (user_mode_vm(regs))
27869+ if (user_mode(regs))
27870 return exception_exit(prev_state);
27871 else
27872 rcu_nmi_exit();
27873@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27874 *
27875 * IST exception handlers normally cannot schedule. As a special
27876 * exception, if the exception interrupted userspace code (i.e.
27877- * user_mode_vm(regs) would return true) and the exception was not
27878+ * user_mode(regs) would return true) and the exception was not
27879 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27880 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27881 * Callers are responsible for enabling interrupts themselves inside
27882@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27883 */
27884 void ist_begin_non_atomic(struct pt_regs *regs)
27885 {
27886- BUG_ON(!user_mode_vm(regs));
27887+ BUG_ON(!user_mode(regs));
27888
27889 /*
27890 * Sanity check: we need to be on the normal thread stack. This
27891@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27892 }
27893
27894 static nokprobe_inline int
27895-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27896+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27897 struct pt_regs *regs, long error_code)
27898 {
27899 #ifdef CONFIG_X86_32
27900- if (regs->flags & X86_VM_MASK) {
27901+ if (v8086_mode(regs)) {
27902 /*
27903 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27904 * On nmi (interrupt 2), do_trap should not be called.
27905@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27906 return -1;
27907 }
27908 #endif
27909- if (!user_mode(regs)) {
27910+ if (!user_mode_novm(regs)) {
27911 if (!fixup_exception(regs)) {
27912 tsk->thread.error_code = error_code;
27913 tsk->thread.trap_nr = trapnr;
27914+
27915+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27916+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27917+ str = "PAX: suspicious stack segment fault";
27918+#endif
27919+
27920 die(str, regs, error_code);
27921 }
27922+
27923+#ifdef CONFIG_PAX_REFCOUNT
27924+ if (trapnr == X86_TRAP_OF)
27925+ pax_report_refcount_overflow(regs);
27926+#endif
27927+
27928 return 0;
27929 }
27930
27931@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27932 }
27933
27934 static void
27935-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27936+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27937 long error_code, siginfo_t *info)
27938 {
27939 struct task_struct *tsk = current;
27940@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27941 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27942 printk_ratelimit()) {
27943 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27944- tsk->comm, tsk->pid, str,
27945+ tsk->comm, task_pid_nr(tsk), str,
27946 regs->ip, regs->sp, error_code);
27947 print_vma_addr(" in ", regs->ip);
27948 pr_cont("\n");
27949@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27950 tsk->thread.error_code = error_code;
27951 tsk->thread.trap_nr = X86_TRAP_DF;
27952
27953+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27954+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27955+ die("grsec: kernel stack overflow detected", regs, error_code);
27956+#endif
27957+
27958 #ifdef CONFIG_DOUBLEFAULT
27959 df_debug(regs, error_code);
27960 #endif
27961@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27962 goto exit;
27963 conditional_sti(regs);
27964
27965- if (!user_mode_vm(regs))
27966+ if (!user_mode(regs))
27967 die("bounds", regs, error_code);
27968
27969 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27970@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27971 conditional_sti(regs);
27972
27973 #ifdef CONFIG_X86_32
27974- if (regs->flags & X86_VM_MASK) {
27975+ if (v8086_mode(regs)) {
27976 local_irq_enable();
27977 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27978 goto exit;
27979@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27980 #endif
27981
27982 tsk = current;
27983- if (!user_mode(regs)) {
27984+ if (!user_mode_novm(regs)) {
27985 if (fixup_exception(regs))
27986 goto exit;
27987
27988 tsk->thread.error_code = error_code;
27989 tsk->thread.trap_nr = X86_TRAP_GP;
27990 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27991- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27992+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27993+
27994+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27995+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
27996+ die("PAX: suspicious general protection fault", regs, error_code);
27997+ else
27998+#endif
27999+
28000 die("general protection fault", regs, error_code);
28001+ }
28002 goto exit;
28003 }
28004
28005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28006+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28007+ struct mm_struct *mm = tsk->mm;
28008+ unsigned long limit;
28009+
28010+ down_write(&mm->mmap_sem);
28011+ limit = mm->context.user_cs_limit;
28012+ if (limit < TASK_SIZE) {
28013+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28014+ up_write(&mm->mmap_sem);
28015+ return;
28016+ }
28017+ up_write(&mm->mmap_sem);
28018+ }
28019+#endif
28020+
28021 tsk->thread.error_code = error_code;
28022 tsk->thread.trap_nr = X86_TRAP_GP;
28023
28024@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28025 container_of(task_pt_regs(current),
28026 struct bad_iret_stack, regs);
28027
28028+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28029+ new_stack = s;
28030+
28031 /* Copy the IRET target to the new stack. */
28032 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28033
28034 /* Copy the remainder of the stack from the current stack. */
28035 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28036
28037- BUG_ON(!user_mode_vm(&new_stack->regs));
28038+ BUG_ON(!user_mode(&new_stack->regs));
28039 return new_stack;
28040 }
28041 NOKPROBE_SYMBOL(fixup_bad_iret);
28042@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28043 * then it's very likely the result of an icebp/int01 trap.
28044 * User wants a sigtrap for that.
28045 */
28046- if (!dr6 && user_mode_vm(regs))
28047+ if (!dr6 && user_mode(regs))
28048 user_icebp = 1;
28049
28050 /* Catch kmemcheck conditions first of all! */
28051@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28052 /* It's safe to allow irq's after DR6 has been saved */
28053 preempt_conditional_sti(regs);
28054
28055- if (regs->flags & X86_VM_MASK) {
28056+ if (v8086_mode(regs)) {
28057 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28058 X86_TRAP_DB);
28059 preempt_conditional_cli(regs);
28060@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28061 * We already checked v86 mode above, so we can check for kernel mode
28062 * by just checking the CPL of CS.
28063 */
28064- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28065+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28066 tsk->thread.debugreg6 &= ~DR_STEP;
28067 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28068 regs->flags &= ~X86_EFLAGS_TF;
28069@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28070 return;
28071 conditional_sti(regs);
28072
28073- if (!user_mode_vm(regs))
28074+ if (!user_mode(regs))
28075 {
28076 if (!fixup_exception(regs)) {
28077 task->thread.error_code = error_code;
28078diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28079index 5054497..139f8f8 100644
28080--- a/arch/x86/kernel/tsc.c
28081+++ b/arch/x86/kernel/tsc.c
28082@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28083 */
28084 smp_wmb();
28085
28086- ACCESS_ONCE(c2n->head) = data;
28087+ ACCESS_ONCE_RW(c2n->head) = data;
28088 }
28089
28090 /*
28091diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28092index 81f8adb0..fff670e 100644
28093--- a/arch/x86/kernel/uprobes.c
28094+++ b/arch/x86/kernel/uprobes.c
28095@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28096 int ret = NOTIFY_DONE;
28097
28098 /* We are only interested in userspace traps */
28099- if (regs && !user_mode_vm(regs))
28100+ if (regs && !user_mode(regs))
28101 return NOTIFY_DONE;
28102
28103 switch (val) {
28104@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28105
28106 if (nleft != rasize) {
28107 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28108- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28109+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28110
28111 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28112 }
28113diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28114index b9242ba..50c5edd 100644
28115--- a/arch/x86/kernel/verify_cpu.S
28116+++ b/arch/x86/kernel/verify_cpu.S
28117@@ -20,6 +20,7 @@
28118 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28119 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28120 * arch/x86/kernel/head_32.S: processor startup
28121+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28122 *
28123 * verify_cpu, returns the status of longmode and SSE in register %eax.
28124 * 0: Success 1: Failure
28125diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28126index e8edcf5..27f9344 100644
28127--- a/arch/x86/kernel/vm86_32.c
28128+++ b/arch/x86/kernel/vm86_32.c
28129@@ -44,6 +44,7 @@
28130 #include <linux/ptrace.h>
28131 #include <linux/audit.h>
28132 #include <linux/stddef.h>
28133+#include <linux/grsecurity.h>
28134
28135 #include <asm/uaccess.h>
28136 #include <asm/io.h>
28137@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28138 do_exit(SIGSEGV);
28139 }
28140
28141- tss = &per_cpu(init_tss, get_cpu());
28142+ tss = init_tss + get_cpu();
28143 current->thread.sp0 = current->thread.saved_sp0;
28144 current->thread.sysenter_cs = __KERNEL_CS;
28145 load_sp0(tss, &current->thread);
28146@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28147
28148 if (tsk->thread.saved_sp0)
28149 return -EPERM;
28150+
28151+#ifdef CONFIG_GRKERNSEC_VM86
28152+ if (!capable(CAP_SYS_RAWIO)) {
28153+ gr_handle_vm86();
28154+ return -EPERM;
28155+ }
28156+#endif
28157+
28158 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28159 offsetof(struct kernel_vm86_struct, vm86plus) -
28160 sizeof(info.regs));
28161@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28162 int tmp;
28163 struct vm86plus_struct __user *v86;
28164
28165+#ifdef CONFIG_GRKERNSEC_VM86
28166+ if (!capable(CAP_SYS_RAWIO)) {
28167+ gr_handle_vm86();
28168+ return -EPERM;
28169+ }
28170+#endif
28171+
28172 tsk = current;
28173 switch (cmd) {
28174 case VM86_REQUEST_IRQ:
28175@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28176 tsk->thread.saved_fs = info->regs32->fs;
28177 tsk->thread.saved_gs = get_user_gs(info->regs32);
28178
28179- tss = &per_cpu(init_tss, get_cpu());
28180+ tss = init_tss + get_cpu();
28181 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28182 if (cpu_has_sep)
28183 tsk->thread.sysenter_cs = 0;
28184@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28185 goto cannot_handle;
28186 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28187 goto cannot_handle;
28188- intr_ptr = (unsigned long __user *) (i << 2);
28189+ intr_ptr = (__force unsigned long __user *) (i << 2);
28190 if (get_user(segoffs, intr_ptr))
28191 goto cannot_handle;
28192 if ((segoffs >> 16) == BIOSSEG)
28193diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28194index 00bf300..129df8e 100644
28195--- a/arch/x86/kernel/vmlinux.lds.S
28196+++ b/arch/x86/kernel/vmlinux.lds.S
28197@@ -26,6 +26,13 @@
28198 #include <asm/page_types.h>
28199 #include <asm/cache.h>
28200 #include <asm/boot.h>
28201+#include <asm/segment.h>
28202+
28203+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28204+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28205+#else
28206+#define __KERNEL_TEXT_OFFSET 0
28207+#endif
28208
28209 #undef i386 /* in case the preprocessor is a 32bit one */
28210
28211@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28212
28213 PHDRS {
28214 text PT_LOAD FLAGS(5); /* R_E */
28215+#ifdef CONFIG_X86_32
28216+ module PT_LOAD FLAGS(5); /* R_E */
28217+#endif
28218+#ifdef CONFIG_XEN
28219+ rodata PT_LOAD FLAGS(5); /* R_E */
28220+#else
28221+ rodata PT_LOAD FLAGS(4); /* R__ */
28222+#endif
28223 data PT_LOAD FLAGS(6); /* RW_ */
28224-#ifdef CONFIG_X86_64
28225+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28226 #ifdef CONFIG_SMP
28227 percpu PT_LOAD FLAGS(6); /* RW_ */
28228 #endif
28229+ text.init PT_LOAD FLAGS(5); /* R_E */
28230+ text.exit PT_LOAD FLAGS(5); /* R_E */
28231 init PT_LOAD FLAGS(7); /* RWE */
28232-#endif
28233 note PT_NOTE FLAGS(0); /* ___ */
28234 }
28235
28236 SECTIONS
28237 {
28238 #ifdef CONFIG_X86_32
28239- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28240- phys_startup_32 = startup_32 - LOAD_OFFSET;
28241+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28242 #else
28243- . = __START_KERNEL;
28244- phys_startup_64 = startup_64 - LOAD_OFFSET;
28245+ . = __START_KERNEL;
28246 #endif
28247
28248 /* Text and read-only data */
28249- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28250- _text = .;
28251+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28252 /* bootstrapping code */
28253+#ifdef CONFIG_X86_32
28254+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28255+#else
28256+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28257+#endif
28258+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28259+ _text = .;
28260 HEAD_TEXT
28261 . = ALIGN(8);
28262 _stext = .;
28263@@ -104,13 +124,47 @@ SECTIONS
28264 IRQENTRY_TEXT
28265 *(.fixup)
28266 *(.gnu.warning)
28267- /* End of text section */
28268- _etext = .;
28269 } :text = 0x9090
28270
28271- NOTES :text :note
28272+ . += __KERNEL_TEXT_OFFSET;
28273
28274- EXCEPTION_TABLE(16) :text = 0x9090
28275+#ifdef CONFIG_X86_32
28276+ . = ALIGN(PAGE_SIZE);
28277+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28278+
28279+#ifdef CONFIG_PAX_KERNEXEC
28280+ MODULES_EXEC_VADDR = .;
28281+ BYTE(0)
28282+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28283+ . = ALIGN(HPAGE_SIZE) - 1;
28284+ MODULES_EXEC_END = .;
28285+#endif
28286+
28287+ } :module
28288+#endif
28289+
28290+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28291+ /* End of text section */
28292+ BYTE(0)
28293+ _etext = . - __KERNEL_TEXT_OFFSET;
28294+ }
28295+
28296+#ifdef CONFIG_X86_32
28297+ . = ALIGN(PAGE_SIZE);
28298+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28299+ . = ALIGN(PAGE_SIZE);
28300+ *(.empty_zero_page)
28301+ *(.initial_pg_fixmap)
28302+ *(.initial_pg_pmd)
28303+ *(.initial_page_table)
28304+ *(.swapper_pg_dir)
28305+ } :rodata
28306+#endif
28307+
28308+ . = ALIGN(PAGE_SIZE);
28309+ NOTES :rodata :note
28310+
28311+ EXCEPTION_TABLE(16) :rodata
28312
28313 #if defined(CONFIG_DEBUG_RODATA)
28314 /* .text should occupy whole number of pages */
28315@@ -122,16 +176,20 @@ SECTIONS
28316
28317 /* Data */
28318 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28319+
28320+#ifdef CONFIG_PAX_KERNEXEC
28321+ . = ALIGN(HPAGE_SIZE);
28322+#else
28323+ . = ALIGN(PAGE_SIZE);
28324+#endif
28325+
28326 /* Start of data section */
28327 _sdata = .;
28328
28329 /* init_task */
28330 INIT_TASK_DATA(THREAD_SIZE)
28331
28332-#ifdef CONFIG_X86_32
28333- /* 32 bit has nosave before _edata */
28334 NOSAVE_DATA
28335-#endif
28336
28337 PAGE_ALIGNED_DATA(PAGE_SIZE)
28338
28339@@ -174,12 +232,19 @@ SECTIONS
28340 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28341
28342 /* Init code and data - will be freed after init */
28343- . = ALIGN(PAGE_SIZE);
28344 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28345+ BYTE(0)
28346+
28347+#ifdef CONFIG_PAX_KERNEXEC
28348+ . = ALIGN(HPAGE_SIZE);
28349+#else
28350+ . = ALIGN(PAGE_SIZE);
28351+#endif
28352+
28353 __init_begin = .; /* paired with __init_end */
28354- }
28355+ } :init.begin
28356
28357-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28358+#ifdef CONFIG_SMP
28359 /*
28360 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28361 * output PHDR, so the next output section - .init.text - should
28362@@ -190,12 +255,27 @@ SECTIONS
28363 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28364 #endif
28365
28366- INIT_TEXT_SECTION(PAGE_SIZE)
28367-#ifdef CONFIG_X86_64
28368- :init
28369-#endif
28370+ . = ALIGN(PAGE_SIZE);
28371+ init_begin = .;
28372+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28373+ VMLINUX_SYMBOL(_sinittext) = .;
28374+ INIT_TEXT
28375+ . = ALIGN(PAGE_SIZE);
28376+ } :text.init
28377
28378- INIT_DATA_SECTION(16)
28379+ /*
28380+ * .exit.text is discard at runtime, not link time, to deal with
28381+ * references from .altinstructions and .eh_frame
28382+ */
28383+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28384+ EXIT_TEXT
28385+ VMLINUX_SYMBOL(_einittext) = .;
28386+ . = ALIGN(16);
28387+ } :text.exit
28388+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28389+
28390+ . = ALIGN(PAGE_SIZE);
28391+ INIT_DATA_SECTION(16) :init
28392
28393 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28394 __x86_cpu_dev_start = .;
28395@@ -266,19 +346,12 @@ SECTIONS
28396 }
28397
28398 . = ALIGN(8);
28399- /*
28400- * .exit.text is discard at runtime, not link time, to deal with
28401- * references from .altinstructions and .eh_frame
28402- */
28403- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28404- EXIT_TEXT
28405- }
28406
28407 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28408 EXIT_DATA
28409 }
28410
28411-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28412+#ifndef CONFIG_SMP
28413 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28414 #endif
28415
28416@@ -297,16 +370,10 @@ SECTIONS
28417 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28418 __smp_locks = .;
28419 *(.smp_locks)
28420- . = ALIGN(PAGE_SIZE);
28421 __smp_locks_end = .;
28422+ . = ALIGN(PAGE_SIZE);
28423 }
28424
28425-#ifdef CONFIG_X86_64
28426- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28427- NOSAVE_DATA
28428- }
28429-#endif
28430-
28431 /* BSS */
28432 . = ALIGN(PAGE_SIZE);
28433 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28434@@ -322,6 +389,7 @@ SECTIONS
28435 __brk_base = .;
28436 . += 64 * 1024; /* 64k alignment slop space */
28437 *(.brk_reservation) /* areas brk users have reserved */
28438+ . = ALIGN(HPAGE_SIZE);
28439 __brk_limit = .;
28440 }
28441
28442@@ -348,13 +416,12 @@ SECTIONS
28443 * for the boot processor.
28444 */
28445 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28446-INIT_PER_CPU(gdt_page);
28447 INIT_PER_CPU(irq_stack_union);
28448
28449 /*
28450 * Build-time check on the image size:
28451 */
28452-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28453+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28454 "kernel image bigger than KERNEL_IMAGE_SIZE");
28455
28456 #ifdef CONFIG_SMP
28457diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28458index 2dcc6ff..082dc7a 100644
28459--- a/arch/x86/kernel/vsyscall_64.c
28460+++ b/arch/x86/kernel/vsyscall_64.c
28461@@ -38,15 +38,13 @@
28462 #define CREATE_TRACE_POINTS
28463 #include "vsyscall_trace.h"
28464
28465-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28466+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28467
28468 static int __init vsyscall_setup(char *str)
28469 {
28470 if (str) {
28471 if (!strcmp("emulate", str))
28472 vsyscall_mode = EMULATE;
28473- else if (!strcmp("native", str))
28474- vsyscall_mode = NATIVE;
28475 else if (!strcmp("none", str))
28476 vsyscall_mode = NONE;
28477 else
28478@@ -264,8 +262,7 @@ do_ret:
28479 return true;
28480
28481 sigsegv:
28482- force_sig(SIGSEGV, current);
28483- return true;
28484+ do_group_exit(SIGKILL);
28485 }
28486
28487 /*
28488@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28489 static struct vm_area_struct gate_vma = {
28490 .vm_start = VSYSCALL_ADDR,
28491 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28492- .vm_page_prot = PAGE_READONLY_EXEC,
28493- .vm_flags = VM_READ | VM_EXEC,
28494+ .vm_page_prot = PAGE_READONLY,
28495+ .vm_flags = VM_READ,
28496 .vm_ops = &gate_vma_ops,
28497 };
28498
28499@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28500 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28501
28502 if (vsyscall_mode != NONE)
28503- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28504- vsyscall_mode == NATIVE
28505- ? PAGE_KERNEL_VSYSCALL
28506- : PAGE_KERNEL_VVAR);
28507+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28508
28509 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28510 (unsigned long)VSYSCALL_ADDR);
28511diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28512index 37d8fa4..66e319a 100644
28513--- a/arch/x86/kernel/x8664_ksyms_64.c
28514+++ b/arch/x86/kernel/x8664_ksyms_64.c
28515@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28516 EXPORT_SYMBOL(copy_user_generic_unrolled);
28517 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28518 EXPORT_SYMBOL(__copy_user_nocache);
28519-EXPORT_SYMBOL(_copy_from_user);
28520-EXPORT_SYMBOL(_copy_to_user);
28521
28522 EXPORT_SYMBOL(copy_page);
28523 EXPORT_SYMBOL(clear_page);
28524@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28525 EXPORT_SYMBOL(___preempt_schedule_context);
28526 #endif
28527 #endif
28528+
28529+#ifdef CONFIG_PAX_PER_CPU_PGD
28530+EXPORT_SYMBOL(cpu_pgd);
28531+#endif
28532diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28533index 234b072..b7ab191 100644
28534--- a/arch/x86/kernel/x86_init.c
28535+++ b/arch/x86/kernel/x86_init.c
28536@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28537 static void default_nmi_init(void) { };
28538 static int default_i8042_detect(void) { return 1; };
28539
28540-struct x86_platform_ops x86_platform = {
28541+struct x86_platform_ops x86_platform __read_only = {
28542 .calibrate_tsc = native_calibrate_tsc,
28543 .get_wallclock = mach_get_cmos_time,
28544 .set_wallclock = mach_set_rtc_mmss,
28545@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28546 EXPORT_SYMBOL_GPL(x86_platform);
28547
28548 #if defined(CONFIG_PCI_MSI)
28549-struct x86_msi_ops x86_msi = {
28550+struct x86_msi_ops x86_msi __read_only = {
28551 .setup_msi_irqs = native_setup_msi_irqs,
28552 .compose_msi_msg = native_compose_msi_msg,
28553 .teardown_msi_irq = native_teardown_msi_irq,
28554@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28555 }
28556 #endif
28557
28558-struct x86_io_apic_ops x86_io_apic_ops = {
28559+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28560 .init = native_io_apic_init_mappings,
28561 .read = native_io_apic_read,
28562 .write = native_io_apic_write,
28563diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28564index cdc6cf9..e04f495 100644
28565--- a/arch/x86/kernel/xsave.c
28566+++ b/arch/x86/kernel/xsave.c
28567@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28568
28569 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28570 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28571- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28572+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28573
28574 if (!use_xsave())
28575 return err;
28576
28577- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28578+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28579
28580 /*
28581 * Read the xstate_bv which we copied (directly from the cpu or
28582 * from the state in task struct) to the user buffers.
28583 */
28584- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28585+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28586
28587 /*
28588 * For legacy compatible, we always set FP/SSE bits in the bit
28589@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28590 */
28591 xstate_bv |= XSTATE_FPSSE;
28592
28593- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28594+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28595
28596 return err;
28597 }
28598@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28599 {
28600 int err;
28601
28602+ buf = (struct xsave_struct __user *)____m(buf);
28603 if (use_xsave())
28604 err = xsave_user(buf);
28605 else if (use_fxsr())
28606@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28607 */
28608 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28609 {
28610+ buf = (void __user *)____m(buf);
28611 if (use_xsave()) {
28612 if ((unsigned long)buf % 64 || fx_only) {
28613 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28614diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28615index 8a80737..bac4961 100644
28616--- a/arch/x86/kvm/cpuid.c
28617+++ b/arch/x86/kvm/cpuid.c
28618@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28619 struct kvm_cpuid2 *cpuid,
28620 struct kvm_cpuid_entry2 __user *entries)
28621 {
28622- int r;
28623+ int r, i;
28624
28625 r = -E2BIG;
28626 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28627 goto out;
28628 r = -EFAULT;
28629- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28630- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28631+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28632 goto out;
28633+ for (i = 0; i < cpuid->nent; ++i) {
28634+ struct kvm_cpuid_entry2 cpuid_entry;
28635+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28636+ goto out;
28637+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28638+ }
28639 vcpu->arch.cpuid_nent = cpuid->nent;
28640 kvm_apic_set_version(vcpu);
28641 kvm_x86_ops->cpuid_update(vcpu);
28642@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28643 struct kvm_cpuid2 *cpuid,
28644 struct kvm_cpuid_entry2 __user *entries)
28645 {
28646- int r;
28647+ int r, i;
28648
28649 r = -E2BIG;
28650 if (cpuid->nent < vcpu->arch.cpuid_nent)
28651 goto out;
28652 r = -EFAULT;
28653- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28654- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28655+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28656 goto out;
28657+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28658+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28659+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28660+ goto out;
28661+ }
28662 return 0;
28663
28664 out:
28665diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28666index 106c015..2db7161 100644
28667--- a/arch/x86/kvm/emulate.c
28668+++ b/arch/x86/kvm/emulate.c
28669@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28670 int cr = ctxt->modrm_reg;
28671 u64 efer = 0;
28672
28673- static u64 cr_reserved_bits[] = {
28674+ static const u64 cr_reserved_bits[] = {
28675 0xffffffff00000000ULL,
28676 0, 0, 0, /* CR3 checked later */
28677 CR4_RESERVED_BITS,
28678diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28679index 4ee827d..a14eff9 100644
28680--- a/arch/x86/kvm/lapic.c
28681+++ b/arch/x86/kvm/lapic.c
28682@@ -56,7 +56,7 @@
28683 #define APIC_BUS_CYCLE_NS 1
28684
28685 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28686-#define apic_debug(fmt, arg...)
28687+#define apic_debug(fmt, arg...) do {} while (0)
28688
28689 #define APIC_LVT_NUM 6
28690 /* 14 is the version for Xeon and Pentium 8.4.8*/
28691diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28692index fd49c86..77e1aa0 100644
28693--- a/arch/x86/kvm/paging_tmpl.h
28694+++ b/arch/x86/kvm/paging_tmpl.h
28695@@ -343,7 +343,7 @@ retry_walk:
28696 if (unlikely(kvm_is_error_hva(host_addr)))
28697 goto error;
28698
28699- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28700+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28701 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28702 goto error;
28703 walker->ptep_user[walker->level - 1] = ptep_user;
28704diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28705index cc618c8..3f72f76 100644
28706--- a/arch/x86/kvm/svm.c
28707+++ b/arch/x86/kvm/svm.c
28708@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28709 int cpu = raw_smp_processor_id();
28710
28711 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28712+
28713+ pax_open_kernel();
28714 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28715+ pax_close_kernel();
28716+
28717 load_TR_desc();
28718 }
28719
28720@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28721 #endif
28722 #endif
28723
28724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28725+ __set_fs(current_thread_info()->addr_limit);
28726+#endif
28727+
28728 reload_tss(vcpu);
28729
28730 local_irq_disable();
28731diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28732index ae4f6d3..7f5f59b 100644
28733--- a/arch/x86/kvm/vmx.c
28734+++ b/arch/x86/kvm/vmx.c
28735@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28736 #endif
28737 }
28738
28739-static void vmcs_clear_bits(unsigned long field, u32 mask)
28740+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28741 {
28742 vmcs_writel(field, vmcs_readl(field) & ~mask);
28743 }
28744
28745-static void vmcs_set_bits(unsigned long field, u32 mask)
28746+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28747 {
28748 vmcs_writel(field, vmcs_readl(field) | mask);
28749 }
28750@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28751 struct desc_struct *descs;
28752
28753 descs = (void *)gdt->address;
28754+
28755+ pax_open_kernel();
28756 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28757+ pax_close_kernel();
28758+
28759 load_TR_desc();
28760 }
28761
28762@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28763 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28764 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28765
28766+#ifdef CONFIG_PAX_PER_CPU_PGD
28767+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28768+#endif
28769+
28770 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28771 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28772 vmx->loaded_vmcs->cpu = cpu;
28773@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28774 * reads and returns guest's timestamp counter "register"
28775 * guest_tsc = host_tsc + tsc_offset -- 21.3
28776 */
28777-static u64 guest_read_tsc(void)
28778+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28779 {
28780 u64 host_tsc, tsc_offset;
28781
28782@@ -4458,7 +4466,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28783 unsigned long cr4;
28784
28785 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28786+
28787+#ifndef CONFIG_PAX_PER_CPU_PGD
28788 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28789+#endif
28790
28791 /* Save the most likely value for this task's CR4 in the VMCS. */
28792 cr4 = cr4_read_shadow();
28793@@ -4485,7 +4496,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28794 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28795 vmx->host_idt_base = dt.address;
28796
28797- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28798+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28799
28800 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28801 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28802@@ -6096,11 +6107,17 @@ static __init int hardware_setup(void)
28803 * page upon invalidation. No need to do anything if not
28804 * using the APIC_ACCESS_ADDR VMCS field.
28805 */
28806- if (!flexpriority_enabled)
28807- kvm_x86_ops->set_apic_access_page_addr = NULL;
28808+ if (!flexpriority_enabled) {
28809+ pax_open_kernel();
28810+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28811+ pax_close_kernel();
28812+ }
28813
28814- if (!cpu_has_vmx_tpr_shadow())
28815- kvm_x86_ops->update_cr8_intercept = NULL;
28816+ if (!cpu_has_vmx_tpr_shadow()) {
28817+ pax_open_kernel();
28818+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28819+ pax_close_kernel();
28820+ }
28821
28822 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28823 kvm_disable_largepages();
28824@@ -6111,14 +6128,16 @@ static __init int hardware_setup(void)
28825 if (!cpu_has_vmx_apicv())
28826 enable_apicv = 0;
28827
28828+ pax_open_kernel();
28829 if (enable_apicv)
28830- kvm_x86_ops->update_cr8_intercept = NULL;
28831+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28832 else {
28833- kvm_x86_ops->hwapic_irr_update = NULL;
28834- kvm_x86_ops->hwapic_isr_update = NULL;
28835- kvm_x86_ops->deliver_posted_interrupt = NULL;
28836- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28837+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28838+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28839+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28840+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28841 }
28842+ pax_close_kernel();
28843
28844 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28845 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28846@@ -6171,10 +6190,12 @@ static __init int hardware_setup(void)
28847 enable_pml = 0;
28848
28849 if (!enable_pml) {
28850- kvm_x86_ops->slot_enable_log_dirty = NULL;
28851- kvm_x86_ops->slot_disable_log_dirty = NULL;
28852- kvm_x86_ops->flush_log_dirty = NULL;
28853- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28854+ pax_open_kernel();
28855+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28856+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28857+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28858+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28859+ pax_close_kernel();
28860 }
28861
28862 return alloc_kvm_area();
28863@@ -8219,6 +8240,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28864 "jmp 2f \n\t"
28865 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28866 "2: "
28867+
28868+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28869+ "ljmp %[cs],$3f\n\t"
28870+ "3: "
28871+#endif
28872+
28873 /* Save guest registers, load host registers, keep flags */
28874 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28875 "pop %0 \n\t"
28876@@ -8271,6 +8298,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28877 #endif
28878 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28879 [wordsize]"i"(sizeof(ulong))
28880+
28881+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28882+ ,[cs]"i"(__KERNEL_CS)
28883+#endif
28884+
28885 : "cc", "memory"
28886 #ifdef CONFIG_X86_64
28887 , "rax", "rbx", "rdi", "rsi"
28888@@ -8284,7 +8316,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28889 if (debugctlmsr)
28890 update_debugctlmsr(debugctlmsr);
28891
28892-#ifndef CONFIG_X86_64
28893+#ifdef CONFIG_X86_32
28894 /*
28895 * The sysexit path does not restore ds/es, so we must set them to
28896 * a reasonable value ourselves.
28897@@ -8293,8 +8325,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28898 * may be executed in interrupt context, which saves and restore segments
28899 * around it, nullifying its effect.
28900 */
28901- loadsegment(ds, __USER_DS);
28902- loadsegment(es, __USER_DS);
28903+ loadsegment(ds, __KERNEL_DS);
28904+ loadsegment(es, __KERNEL_DS);
28905+ loadsegment(ss, __KERNEL_DS);
28906+
28907+#ifdef CONFIG_PAX_KERNEXEC
28908+ loadsegment(fs, __KERNEL_PERCPU);
28909+#endif
28910+
28911+#ifdef CONFIG_PAX_MEMORY_UDEREF
28912+ __set_fs(current_thread_info()->addr_limit);
28913+#endif
28914+
28915 #endif
28916
28917 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28918diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28919index 32bf19e..c8de1b5 100644
28920--- a/arch/x86/kvm/x86.c
28921+++ b/arch/x86/kvm/x86.c
28922@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28923 {
28924 struct kvm *kvm = vcpu->kvm;
28925 int lm = is_long_mode(vcpu);
28926- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28927- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28928+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28929+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28930 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28931 : kvm->arch.xen_hvm_config.blob_size_32;
28932 u32 page_num = data & ~PAGE_MASK;
28933@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28934 if (n < msr_list.nmsrs)
28935 goto out;
28936 r = -EFAULT;
28937+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28938+ goto out;
28939 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28940 num_msrs_to_save * sizeof(u32)))
28941 goto out;
28942@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28943 };
28944 #endif
28945
28946-int kvm_arch_init(void *opaque)
28947+int kvm_arch_init(const void *opaque)
28948 {
28949 int r;
28950 struct kvm_x86_ops *ops = opaque;
28951diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28952index ac4453d..1f43bf3 100644
28953--- a/arch/x86/lguest/boot.c
28954+++ b/arch/x86/lguest/boot.c
28955@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28956 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28957 * Launcher to reboot us.
28958 */
28959-static void lguest_restart(char *reason)
28960+static __noreturn void lguest_restart(char *reason)
28961 {
28962 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28963+ BUG();
28964 }
28965
28966 /*G:050
28967diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28968index 00933d5..3a64af9 100644
28969--- a/arch/x86/lib/atomic64_386_32.S
28970+++ b/arch/x86/lib/atomic64_386_32.S
28971@@ -48,6 +48,10 @@ BEGIN(read)
28972 movl (v), %eax
28973 movl 4(v), %edx
28974 RET_ENDP
28975+BEGIN(read_unchecked)
28976+ movl (v), %eax
28977+ movl 4(v), %edx
28978+RET_ENDP
28979 #undef v
28980
28981 #define v %esi
28982@@ -55,6 +59,10 @@ BEGIN(set)
28983 movl %ebx, (v)
28984 movl %ecx, 4(v)
28985 RET_ENDP
28986+BEGIN(set_unchecked)
28987+ movl %ebx, (v)
28988+ movl %ecx, 4(v)
28989+RET_ENDP
28990 #undef v
28991
28992 #define v %esi
28993@@ -70,6 +78,20 @@ RET_ENDP
28994 BEGIN(add)
28995 addl %eax, (v)
28996 adcl %edx, 4(v)
28997+
28998+#ifdef CONFIG_PAX_REFCOUNT
28999+ jno 0f
29000+ subl %eax, (v)
29001+ sbbl %edx, 4(v)
29002+ int $4
29003+0:
29004+ _ASM_EXTABLE(0b, 0b)
29005+#endif
29006+
29007+RET_ENDP
29008+BEGIN(add_unchecked)
29009+ addl %eax, (v)
29010+ adcl %edx, 4(v)
29011 RET_ENDP
29012 #undef v
29013
29014@@ -77,6 +99,24 @@ RET_ENDP
29015 BEGIN(add_return)
29016 addl (v), %eax
29017 adcl 4(v), %edx
29018+
29019+#ifdef CONFIG_PAX_REFCOUNT
29020+ into
29021+1234:
29022+ _ASM_EXTABLE(1234b, 2f)
29023+#endif
29024+
29025+ movl %eax, (v)
29026+ movl %edx, 4(v)
29027+
29028+#ifdef CONFIG_PAX_REFCOUNT
29029+2:
29030+#endif
29031+
29032+RET_ENDP
29033+BEGIN(add_return_unchecked)
29034+ addl (v), %eax
29035+ adcl 4(v), %edx
29036 movl %eax, (v)
29037 movl %edx, 4(v)
29038 RET_ENDP
29039@@ -86,6 +126,20 @@ RET_ENDP
29040 BEGIN(sub)
29041 subl %eax, (v)
29042 sbbl %edx, 4(v)
29043+
29044+#ifdef CONFIG_PAX_REFCOUNT
29045+ jno 0f
29046+ addl %eax, (v)
29047+ adcl %edx, 4(v)
29048+ int $4
29049+0:
29050+ _ASM_EXTABLE(0b, 0b)
29051+#endif
29052+
29053+RET_ENDP
29054+BEGIN(sub_unchecked)
29055+ subl %eax, (v)
29056+ sbbl %edx, 4(v)
29057 RET_ENDP
29058 #undef v
29059
29060@@ -96,6 +150,27 @@ BEGIN(sub_return)
29061 sbbl $0, %edx
29062 addl (v), %eax
29063 adcl 4(v), %edx
29064+
29065+#ifdef CONFIG_PAX_REFCOUNT
29066+ into
29067+1234:
29068+ _ASM_EXTABLE(1234b, 2f)
29069+#endif
29070+
29071+ movl %eax, (v)
29072+ movl %edx, 4(v)
29073+
29074+#ifdef CONFIG_PAX_REFCOUNT
29075+2:
29076+#endif
29077+
29078+RET_ENDP
29079+BEGIN(sub_return_unchecked)
29080+ negl %edx
29081+ negl %eax
29082+ sbbl $0, %edx
29083+ addl (v), %eax
29084+ adcl 4(v), %edx
29085 movl %eax, (v)
29086 movl %edx, 4(v)
29087 RET_ENDP
29088@@ -105,6 +180,20 @@ RET_ENDP
29089 BEGIN(inc)
29090 addl $1, (v)
29091 adcl $0, 4(v)
29092+
29093+#ifdef CONFIG_PAX_REFCOUNT
29094+ jno 0f
29095+ subl $1, (v)
29096+ sbbl $0, 4(v)
29097+ int $4
29098+0:
29099+ _ASM_EXTABLE(0b, 0b)
29100+#endif
29101+
29102+RET_ENDP
29103+BEGIN(inc_unchecked)
29104+ addl $1, (v)
29105+ adcl $0, 4(v)
29106 RET_ENDP
29107 #undef v
29108
29109@@ -114,6 +203,26 @@ BEGIN(inc_return)
29110 movl 4(v), %edx
29111 addl $1, %eax
29112 adcl $0, %edx
29113+
29114+#ifdef CONFIG_PAX_REFCOUNT
29115+ into
29116+1234:
29117+ _ASM_EXTABLE(1234b, 2f)
29118+#endif
29119+
29120+ movl %eax, (v)
29121+ movl %edx, 4(v)
29122+
29123+#ifdef CONFIG_PAX_REFCOUNT
29124+2:
29125+#endif
29126+
29127+RET_ENDP
29128+BEGIN(inc_return_unchecked)
29129+ movl (v), %eax
29130+ movl 4(v), %edx
29131+ addl $1, %eax
29132+ adcl $0, %edx
29133 movl %eax, (v)
29134 movl %edx, 4(v)
29135 RET_ENDP
29136@@ -123,6 +232,20 @@ RET_ENDP
29137 BEGIN(dec)
29138 subl $1, (v)
29139 sbbl $0, 4(v)
29140+
29141+#ifdef CONFIG_PAX_REFCOUNT
29142+ jno 0f
29143+ addl $1, (v)
29144+ adcl $0, 4(v)
29145+ int $4
29146+0:
29147+ _ASM_EXTABLE(0b, 0b)
29148+#endif
29149+
29150+RET_ENDP
29151+BEGIN(dec_unchecked)
29152+ subl $1, (v)
29153+ sbbl $0, 4(v)
29154 RET_ENDP
29155 #undef v
29156
29157@@ -132,6 +255,26 @@ BEGIN(dec_return)
29158 movl 4(v), %edx
29159 subl $1, %eax
29160 sbbl $0, %edx
29161+
29162+#ifdef CONFIG_PAX_REFCOUNT
29163+ into
29164+1234:
29165+ _ASM_EXTABLE(1234b, 2f)
29166+#endif
29167+
29168+ movl %eax, (v)
29169+ movl %edx, 4(v)
29170+
29171+#ifdef CONFIG_PAX_REFCOUNT
29172+2:
29173+#endif
29174+
29175+RET_ENDP
29176+BEGIN(dec_return_unchecked)
29177+ movl (v), %eax
29178+ movl 4(v), %edx
29179+ subl $1, %eax
29180+ sbbl $0, %edx
29181 movl %eax, (v)
29182 movl %edx, 4(v)
29183 RET_ENDP
29184@@ -143,6 +286,13 @@ BEGIN(add_unless)
29185 adcl %edx, %edi
29186 addl (v), %eax
29187 adcl 4(v), %edx
29188+
29189+#ifdef CONFIG_PAX_REFCOUNT
29190+ into
29191+1234:
29192+ _ASM_EXTABLE(1234b, 2f)
29193+#endif
29194+
29195 cmpl %eax, %ecx
29196 je 3f
29197 1:
29198@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29199 1:
29200 addl $1, %eax
29201 adcl $0, %edx
29202+
29203+#ifdef CONFIG_PAX_REFCOUNT
29204+ into
29205+1234:
29206+ _ASM_EXTABLE(1234b, 2f)
29207+#endif
29208+
29209 movl %eax, (v)
29210 movl %edx, 4(v)
29211 movl $1, %eax
29212@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29213 movl 4(v), %edx
29214 subl $1, %eax
29215 sbbl $0, %edx
29216+
29217+#ifdef CONFIG_PAX_REFCOUNT
29218+ into
29219+1234:
29220+ _ASM_EXTABLE(1234b, 1f)
29221+#endif
29222+
29223 js 1f
29224 movl %eax, (v)
29225 movl %edx, 4(v)
29226diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29227index f5cc9eb..51fa319 100644
29228--- a/arch/x86/lib/atomic64_cx8_32.S
29229+++ b/arch/x86/lib/atomic64_cx8_32.S
29230@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29231 CFI_STARTPROC
29232
29233 read64 %ecx
29234+ pax_force_retaddr
29235 ret
29236 CFI_ENDPROC
29237 ENDPROC(atomic64_read_cx8)
29238
29239+ENTRY(atomic64_read_unchecked_cx8)
29240+ CFI_STARTPROC
29241+
29242+ read64 %ecx
29243+ pax_force_retaddr
29244+ ret
29245+ CFI_ENDPROC
29246+ENDPROC(atomic64_read_unchecked_cx8)
29247+
29248 ENTRY(atomic64_set_cx8)
29249 CFI_STARTPROC
29250
29251@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29252 cmpxchg8b (%esi)
29253 jne 1b
29254
29255+ pax_force_retaddr
29256 ret
29257 CFI_ENDPROC
29258 ENDPROC(atomic64_set_cx8)
29259
29260+ENTRY(atomic64_set_unchecked_cx8)
29261+ CFI_STARTPROC
29262+
29263+1:
29264+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29265+ * are atomic on 586 and newer */
29266+ cmpxchg8b (%esi)
29267+ jne 1b
29268+
29269+ pax_force_retaddr
29270+ ret
29271+ CFI_ENDPROC
29272+ENDPROC(atomic64_set_unchecked_cx8)
29273+
29274 ENTRY(atomic64_xchg_cx8)
29275 CFI_STARTPROC
29276
29277@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29278 cmpxchg8b (%esi)
29279 jne 1b
29280
29281+ pax_force_retaddr
29282 ret
29283 CFI_ENDPROC
29284 ENDPROC(atomic64_xchg_cx8)
29285
29286-.macro addsub_return func ins insc
29287-ENTRY(atomic64_\func\()_return_cx8)
29288+.macro addsub_return func ins insc unchecked=""
29289+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29290 CFI_STARTPROC
29291 SAVE ebp
29292 SAVE ebx
29293@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29294 movl %edx, %ecx
29295 \ins\()l %esi, %ebx
29296 \insc\()l %edi, %ecx
29297+
29298+.ifb \unchecked
29299+#ifdef CONFIG_PAX_REFCOUNT
29300+ into
29301+2:
29302+ _ASM_EXTABLE(2b, 3f)
29303+#endif
29304+.endif
29305+
29306 LOCK_PREFIX
29307 cmpxchg8b (%ebp)
29308 jne 1b
29309-
29310-10:
29311 movl %ebx, %eax
29312 movl %ecx, %edx
29313+
29314+.ifb \unchecked
29315+#ifdef CONFIG_PAX_REFCOUNT
29316+3:
29317+#endif
29318+.endif
29319+
29320 RESTORE edi
29321 RESTORE esi
29322 RESTORE ebx
29323 RESTORE ebp
29324+ pax_force_retaddr
29325 ret
29326 CFI_ENDPROC
29327-ENDPROC(atomic64_\func\()_return_cx8)
29328+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29329 .endm
29330
29331 addsub_return add add adc
29332 addsub_return sub sub sbb
29333+addsub_return add add adc _unchecked
29334+addsub_return sub sub sbb _unchecked
29335
29336-.macro incdec_return func ins insc
29337-ENTRY(atomic64_\func\()_return_cx8)
29338+.macro incdec_return func ins insc unchecked=""
29339+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29340 CFI_STARTPROC
29341 SAVE ebx
29342
29343@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29344 movl %edx, %ecx
29345 \ins\()l $1, %ebx
29346 \insc\()l $0, %ecx
29347+
29348+.ifb \unchecked
29349+#ifdef CONFIG_PAX_REFCOUNT
29350+ into
29351+2:
29352+ _ASM_EXTABLE(2b, 3f)
29353+#endif
29354+.endif
29355+
29356 LOCK_PREFIX
29357 cmpxchg8b (%esi)
29358 jne 1b
29359
29360-10:
29361 movl %ebx, %eax
29362 movl %ecx, %edx
29363+
29364+.ifb \unchecked
29365+#ifdef CONFIG_PAX_REFCOUNT
29366+3:
29367+#endif
29368+.endif
29369+
29370 RESTORE ebx
29371+ pax_force_retaddr
29372 ret
29373 CFI_ENDPROC
29374-ENDPROC(atomic64_\func\()_return_cx8)
29375+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29376 .endm
29377
29378 incdec_return inc add adc
29379 incdec_return dec sub sbb
29380+incdec_return inc add adc _unchecked
29381+incdec_return dec sub sbb _unchecked
29382
29383 ENTRY(atomic64_dec_if_positive_cx8)
29384 CFI_STARTPROC
29385@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29386 movl %edx, %ecx
29387 subl $1, %ebx
29388 sbb $0, %ecx
29389+
29390+#ifdef CONFIG_PAX_REFCOUNT
29391+ into
29392+1234:
29393+ _ASM_EXTABLE(1234b, 2f)
29394+#endif
29395+
29396 js 2f
29397 LOCK_PREFIX
29398 cmpxchg8b (%esi)
29399@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29400 movl %ebx, %eax
29401 movl %ecx, %edx
29402 RESTORE ebx
29403+ pax_force_retaddr
29404 ret
29405 CFI_ENDPROC
29406 ENDPROC(atomic64_dec_if_positive_cx8)
29407@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29408 movl %edx, %ecx
29409 addl %ebp, %ebx
29410 adcl %edi, %ecx
29411+
29412+#ifdef CONFIG_PAX_REFCOUNT
29413+ into
29414+1234:
29415+ _ASM_EXTABLE(1234b, 3f)
29416+#endif
29417+
29418 LOCK_PREFIX
29419 cmpxchg8b (%esi)
29420 jne 1b
29421@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29422 CFI_ADJUST_CFA_OFFSET -8
29423 RESTORE ebx
29424 RESTORE ebp
29425+ pax_force_retaddr
29426 ret
29427 4:
29428 cmpl %edx, 4(%esp)
29429@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29430 xorl %ecx, %ecx
29431 addl $1, %ebx
29432 adcl %edx, %ecx
29433+
29434+#ifdef CONFIG_PAX_REFCOUNT
29435+ into
29436+1234:
29437+ _ASM_EXTABLE(1234b, 3f)
29438+#endif
29439+
29440 LOCK_PREFIX
29441 cmpxchg8b (%esi)
29442 jne 1b
29443@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29444 movl $1, %eax
29445 3:
29446 RESTORE ebx
29447+ pax_force_retaddr
29448 ret
29449 CFI_ENDPROC
29450 ENDPROC(atomic64_inc_not_zero_cx8)
29451diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29452index e78b8eee..7e173a8 100644
29453--- a/arch/x86/lib/checksum_32.S
29454+++ b/arch/x86/lib/checksum_32.S
29455@@ -29,7 +29,8 @@
29456 #include <asm/dwarf2.h>
29457 #include <asm/errno.h>
29458 #include <asm/asm.h>
29459-
29460+#include <asm/segment.h>
29461+
29462 /*
29463 * computes a partial checksum, e.g. for TCP/UDP fragments
29464 */
29465@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29466
29467 #define ARGBASE 16
29468 #define FP 12
29469-
29470-ENTRY(csum_partial_copy_generic)
29471+
29472+ENTRY(csum_partial_copy_generic_to_user)
29473 CFI_STARTPROC
29474+
29475+#ifdef CONFIG_PAX_MEMORY_UDEREF
29476+ pushl_cfi %gs
29477+ popl_cfi %es
29478+ jmp csum_partial_copy_generic
29479+#endif
29480+
29481+ENTRY(csum_partial_copy_generic_from_user)
29482+
29483+#ifdef CONFIG_PAX_MEMORY_UDEREF
29484+ pushl_cfi %gs
29485+ popl_cfi %ds
29486+#endif
29487+
29488+ENTRY(csum_partial_copy_generic)
29489 subl $4,%esp
29490 CFI_ADJUST_CFA_OFFSET 4
29491 pushl_cfi %edi
29492@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29493 jmp 4f
29494 SRC(1: movw (%esi), %bx )
29495 addl $2, %esi
29496-DST( movw %bx, (%edi) )
29497+DST( movw %bx, %es:(%edi) )
29498 addl $2, %edi
29499 addw %bx, %ax
29500 adcl $0, %eax
29501@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29502 SRC(1: movl (%esi), %ebx )
29503 SRC( movl 4(%esi), %edx )
29504 adcl %ebx, %eax
29505-DST( movl %ebx, (%edi) )
29506+DST( movl %ebx, %es:(%edi) )
29507 adcl %edx, %eax
29508-DST( movl %edx, 4(%edi) )
29509+DST( movl %edx, %es:4(%edi) )
29510
29511 SRC( movl 8(%esi), %ebx )
29512 SRC( movl 12(%esi), %edx )
29513 adcl %ebx, %eax
29514-DST( movl %ebx, 8(%edi) )
29515+DST( movl %ebx, %es:8(%edi) )
29516 adcl %edx, %eax
29517-DST( movl %edx, 12(%edi) )
29518+DST( movl %edx, %es:12(%edi) )
29519
29520 SRC( movl 16(%esi), %ebx )
29521 SRC( movl 20(%esi), %edx )
29522 adcl %ebx, %eax
29523-DST( movl %ebx, 16(%edi) )
29524+DST( movl %ebx, %es:16(%edi) )
29525 adcl %edx, %eax
29526-DST( movl %edx, 20(%edi) )
29527+DST( movl %edx, %es:20(%edi) )
29528
29529 SRC( movl 24(%esi), %ebx )
29530 SRC( movl 28(%esi), %edx )
29531 adcl %ebx, %eax
29532-DST( movl %ebx, 24(%edi) )
29533+DST( movl %ebx, %es:24(%edi) )
29534 adcl %edx, %eax
29535-DST( movl %edx, 28(%edi) )
29536+DST( movl %edx, %es:28(%edi) )
29537
29538 lea 32(%esi), %esi
29539 lea 32(%edi), %edi
29540@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29541 shrl $2, %edx # This clears CF
29542 SRC(3: movl (%esi), %ebx )
29543 adcl %ebx, %eax
29544-DST( movl %ebx, (%edi) )
29545+DST( movl %ebx, %es:(%edi) )
29546 lea 4(%esi), %esi
29547 lea 4(%edi), %edi
29548 dec %edx
29549@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29550 jb 5f
29551 SRC( movw (%esi), %cx )
29552 leal 2(%esi), %esi
29553-DST( movw %cx, (%edi) )
29554+DST( movw %cx, %es:(%edi) )
29555 leal 2(%edi), %edi
29556 je 6f
29557 shll $16,%ecx
29558 SRC(5: movb (%esi), %cl )
29559-DST( movb %cl, (%edi) )
29560+DST( movb %cl, %es:(%edi) )
29561 6: addl %ecx, %eax
29562 adcl $0, %eax
29563 7:
29564@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29565
29566 6001:
29567 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29568- movl $-EFAULT, (%ebx)
29569+ movl $-EFAULT, %ss:(%ebx)
29570
29571 # zero the complete destination - computing the rest
29572 # is too much work
29573@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29574
29575 6002:
29576 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29577- movl $-EFAULT,(%ebx)
29578+ movl $-EFAULT,%ss:(%ebx)
29579 jmp 5000b
29580
29581 .previous
29582
29583+ pushl_cfi %ss
29584+ popl_cfi %ds
29585+ pushl_cfi %ss
29586+ popl_cfi %es
29587 popl_cfi %ebx
29588 CFI_RESTORE ebx
29589 popl_cfi %esi
29590@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29591 popl_cfi %ecx # equivalent to addl $4,%esp
29592 ret
29593 CFI_ENDPROC
29594-ENDPROC(csum_partial_copy_generic)
29595+ENDPROC(csum_partial_copy_generic_to_user)
29596
29597 #else
29598
29599 /* Version for PentiumII/PPro */
29600
29601 #define ROUND1(x) \
29602+ nop; nop; nop; \
29603 SRC(movl x(%esi), %ebx ) ; \
29604 addl %ebx, %eax ; \
29605- DST(movl %ebx, x(%edi) ) ;
29606+ DST(movl %ebx, %es:x(%edi)) ;
29607
29608 #define ROUND(x) \
29609+ nop; nop; nop; \
29610 SRC(movl x(%esi), %ebx ) ; \
29611 adcl %ebx, %eax ; \
29612- DST(movl %ebx, x(%edi) ) ;
29613+ DST(movl %ebx, %es:x(%edi)) ;
29614
29615 #define ARGBASE 12
29616-
29617-ENTRY(csum_partial_copy_generic)
29618+
29619+ENTRY(csum_partial_copy_generic_to_user)
29620 CFI_STARTPROC
29621+
29622+#ifdef CONFIG_PAX_MEMORY_UDEREF
29623+ pushl_cfi %gs
29624+ popl_cfi %es
29625+ jmp csum_partial_copy_generic
29626+#endif
29627+
29628+ENTRY(csum_partial_copy_generic_from_user)
29629+
29630+#ifdef CONFIG_PAX_MEMORY_UDEREF
29631+ pushl_cfi %gs
29632+ popl_cfi %ds
29633+#endif
29634+
29635+ENTRY(csum_partial_copy_generic)
29636 pushl_cfi %ebx
29637 CFI_REL_OFFSET ebx, 0
29638 pushl_cfi %edi
29639@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29640 subl %ebx, %edi
29641 lea -1(%esi),%edx
29642 andl $-32,%edx
29643- lea 3f(%ebx,%ebx), %ebx
29644+ lea 3f(%ebx,%ebx,2), %ebx
29645 testl %esi, %esi
29646 jmp *%ebx
29647 1: addl $64,%esi
29648@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29649 jb 5f
29650 SRC( movw (%esi), %dx )
29651 leal 2(%esi), %esi
29652-DST( movw %dx, (%edi) )
29653+DST( movw %dx, %es:(%edi) )
29654 leal 2(%edi), %edi
29655 je 6f
29656 shll $16,%edx
29657 5:
29658 SRC( movb (%esi), %dl )
29659-DST( movb %dl, (%edi) )
29660+DST( movb %dl, %es:(%edi) )
29661 6: addl %edx, %eax
29662 adcl $0, %eax
29663 7:
29664 .section .fixup, "ax"
29665 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29666- movl $-EFAULT, (%ebx)
29667+ movl $-EFAULT, %ss:(%ebx)
29668 # zero the complete destination (computing the rest is too much work)
29669 movl ARGBASE+8(%esp),%edi # dst
29670 movl ARGBASE+12(%esp),%ecx # len
29671@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29672 rep; stosb
29673 jmp 7b
29674 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29675- movl $-EFAULT, (%ebx)
29676+ movl $-EFAULT, %ss:(%ebx)
29677 jmp 7b
29678 .previous
29679
29680+#ifdef CONFIG_PAX_MEMORY_UDEREF
29681+ pushl_cfi %ss
29682+ popl_cfi %ds
29683+ pushl_cfi %ss
29684+ popl_cfi %es
29685+#endif
29686+
29687 popl_cfi %esi
29688 CFI_RESTORE esi
29689 popl_cfi %edi
29690@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29691 CFI_RESTORE ebx
29692 ret
29693 CFI_ENDPROC
29694-ENDPROC(csum_partial_copy_generic)
29695+ENDPROC(csum_partial_copy_generic_to_user)
29696
29697 #undef ROUND
29698 #undef ROUND1
29699diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29700index f2145cf..cea889d 100644
29701--- a/arch/x86/lib/clear_page_64.S
29702+++ b/arch/x86/lib/clear_page_64.S
29703@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29704 movl $4096/8,%ecx
29705 xorl %eax,%eax
29706 rep stosq
29707+ pax_force_retaddr
29708 ret
29709 CFI_ENDPROC
29710 ENDPROC(clear_page_c)
29711@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29712 movl $4096,%ecx
29713 xorl %eax,%eax
29714 rep stosb
29715+ pax_force_retaddr
29716 ret
29717 CFI_ENDPROC
29718 ENDPROC(clear_page_c_e)
29719@@ -43,6 +45,7 @@ ENTRY(clear_page)
29720 leaq 64(%rdi),%rdi
29721 jnz .Lloop
29722 nop
29723+ pax_force_retaddr
29724 ret
29725 CFI_ENDPROC
29726 .Lclear_page_end:
29727@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29728
29729 #include <asm/cpufeature.h>
29730
29731- .section .altinstr_replacement,"ax"
29732+ .section .altinstr_replacement,"a"
29733 1: .byte 0xeb /* jmp <disp8> */
29734 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29735 2: .byte 0xeb /* jmp <disp8> */
29736diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29737index 40a1725..5d12ac4 100644
29738--- a/arch/x86/lib/cmpxchg16b_emu.S
29739+++ b/arch/x86/lib/cmpxchg16b_emu.S
29740@@ -8,6 +8,7 @@
29741 #include <linux/linkage.h>
29742 #include <asm/dwarf2.h>
29743 #include <asm/percpu.h>
29744+#include <asm/alternative-asm.h>
29745
29746 .text
29747
29748@@ -46,12 +47,14 @@ CFI_STARTPROC
29749 CFI_REMEMBER_STATE
29750 popfq_cfi
29751 mov $1, %al
29752+ pax_force_retaddr
29753 ret
29754
29755 CFI_RESTORE_STATE
29756 .Lnot_same:
29757 popfq_cfi
29758 xor %al,%al
29759+ pax_force_retaddr
29760 ret
29761
29762 CFI_ENDPROC
29763diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29764index 176cca6..e0d658e 100644
29765--- a/arch/x86/lib/copy_page_64.S
29766+++ b/arch/x86/lib/copy_page_64.S
29767@@ -9,6 +9,7 @@ copy_page_rep:
29768 CFI_STARTPROC
29769 movl $4096/8, %ecx
29770 rep movsq
29771+ pax_force_retaddr
29772 ret
29773 CFI_ENDPROC
29774 ENDPROC(copy_page_rep)
29775@@ -24,8 +25,8 @@ ENTRY(copy_page)
29776 CFI_ADJUST_CFA_OFFSET 2*8
29777 movq %rbx, (%rsp)
29778 CFI_REL_OFFSET rbx, 0
29779- movq %r12, 1*8(%rsp)
29780- CFI_REL_OFFSET r12, 1*8
29781+ movq %r13, 1*8(%rsp)
29782+ CFI_REL_OFFSET r13, 1*8
29783
29784 movl $(4096/64)-5, %ecx
29785 .p2align 4
29786@@ -38,7 +39,7 @@ ENTRY(copy_page)
29787 movq 0x8*4(%rsi), %r9
29788 movq 0x8*5(%rsi), %r10
29789 movq 0x8*6(%rsi), %r11
29790- movq 0x8*7(%rsi), %r12
29791+ movq 0x8*7(%rsi), %r13
29792
29793 prefetcht0 5*64(%rsi)
29794
29795@@ -49,7 +50,7 @@ ENTRY(copy_page)
29796 movq %r9, 0x8*4(%rdi)
29797 movq %r10, 0x8*5(%rdi)
29798 movq %r11, 0x8*6(%rdi)
29799- movq %r12, 0x8*7(%rdi)
29800+ movq %r13, 0x8*7(%rdi)
29801
29802 leaq 64 (%rsi), %rsi
29803 leaq 64 (%rdi), %rdi
29804@@ -68,7 +69,7 @@ ENTRY(copy_page)
29805 movq 0x8*4(%rsi), %r9
29806 movq 0x8*5(%rsi), %r10
29807 movq 0x8*6(%rsi), %r11
29808- movq 0x8*7(%rsi), %r12
29809+ movq 0x8*7(%rsi), %r13
29810
29811 movq %rax, 0x8*0(%rdi)
29812 movq %rbx, 0x8*1(%rdi)
29813@@ -77,7 +78,7 @@ ENTRY(copy_page)
29814 movq %r9, 0x8*4(%rdi)
29815 movq %r10, 0x8*5(%rdi)
29816 movq %r11, 0x8*6(%rdi)
29817- movq %r12, 0x8*7(%rdi)
29818+ movq %r13, 0x8*7(%rdi)
29819
29820 leaq 64(%rdi), %rdi
29821 leaq 64(%rsi), %rsi
29822@@ -85,10 +86,11 @@ ENTRY(copy_page)
29823
29824 movq (%rsp), %rbx
29825 CFI_RESTORE rbx
29826- movq 1*8(%rsp), %r12
29827- CFI_RESTORE r12
29828+ movq 1*8(%rsp), %r13
29829+ CFI_RESTORE r13
29830 addq $2*8, %rsp
29831 CFI_ADJUST_CFA_OFFSET -2*8
29832+ pax_force_retaddr
29833 ret
29834 .Lcopy_page_end:
29835 CFI_ENDPROC
29836@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29837
29838 #include <asm/cpufeature.h>
29839
29840- .section .altinstr_replacement,"ax"
29841+ .section .altinstr_replacement,"a"
29842 1: .byte 0xeb /* jmp <disp8> */
29843 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29844 2:
29845diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29846index dee945d..a84067b 100644
29847--- a/arch/x86/lib/copy_user_64.S
29848+++ b/arch/x86/lib/copy_user_64.S
29849@@ -18,31 +18,7 @@
29850 #include <asm/alternative-asm.h>
29851 #include <asm/asm.h>
29852 #include <asm/smap.h>
29853-
29854-/*
29855- * By placing feature2 after feature1 in altinstructions section, we logically
29856- * implement:
29857- * If CPU has feature2, jmp to alt2 is used
29858- * else if CPU has feature1, jmp to alt1 is used
29859- * else jmp to orig is used.
29860- */
29861- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29862-0:
29863- .byte 0xe9 /* 32bit jump */
29864- .long \orig-1f /* by default jump to orig */
29865-1:
29866- .section .altinstr_replacement,"ax"
29867-2: .byte 0xe9 /* near jump with 32bit immediate */
29868- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29869-3: .byte 0xe9 /* near jump with 32bit immediate */
29870- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29871- .previous
29872-
29873- .section .altinstructions,"a"
29874- altinstruction_entry 0b,2b,\feature1,5,5
29875- altinstruction_entry 0b,3b,\feature2,5,5
29876- .previous
29877- .endm
29878+#include <asm/pgtable.h>
29879
29880 .macro ALIGN_DESTINATION
29881 #ifdef FIX_ALIGNMENT
29882@@ -70,52 +46,6 @@
29883 #endif
29884 .endm
29885
29886-/* Standard copy_to_user with segment limit checking */
29887-ENTRY(_copy_to_user)
29888- CFI_STARTPROC
29889- GET_THREAD_INFO(%rax)
29890- movq %rdi,%rcx
29891- addq %rdx,%rcx
29892- jc bad_to_user
29893- cmpq TI_addr_limit(%rax),%rcx
29894- ja bad_to_user
29895- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29896- copy_user_generic_unrolled,copy_user_generic_string, \
29897- copy_user_enhanced_fast_string
29898- CFI_ENDPROC
29899-ENDPROC(_copy_to_user)
29900-
29901-/* Standard copy_from_user with segment limit checking */
29902-ENTRY(_copy_from_user)
29903- CFI_STARTPROC
29904- GET_THREAD_INFO(%rax)
29905- movq %rsi,%rcx
29906- addq %rdx,%rcx
29907- jc bad_from_user
29908- cmpq TI_addr_limit(%rax),%rcx
29909- ja bad_from_user
29910- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29911- copy_user_generic_unrolled,copy_user_generic_string, \
29912- copy_user_enhanced_fast_string
29913- CFI_ENDPROC
29914-ENDPROC(_copy_from_user)
29915-
29916- .section .fixup,"ax"
29917- /* must zero dest */
29918-ENTRY(bad_from_user)
29919-bad_from_user:
29920- CFI_STARTPROC
29921- movl %edx,%ecx
29922- xorl %eax,%eax
29923- rep
29924- stosb
29925-bad_to_user:
29926- movl %edx,%eax
29927- ret
29928- CFI_ENDPROC
29929-ENDPROC(bad_from_user)
29930- .previous
29931-
29932 /*
29933 * copy_user_generic_unrolled - memory copy with exception handling.
29934 * This version is for CPUs like P4 that don't have efficient micro
29935@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29936 */
29937 ENTRY(copy_user_generic_unrolled)
29938 CFI_STARTPROC
29939+ ASM_PAX_OPEN_USERLAND
29940 ASM_STAC
29941 cmpl $8,%edx
29942 jb 20f /* less then 8 bytes, go to byte copy loop */
29943@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29944 jnz 21b
29945 23: xor %eax,%eax
29946 ASM_CLAC
29947+ ASM_PAX_CLOSE_USERLAND
29948+ pax_force_retaddr
29949 ret
29950
29951 .section .fixup,"ax"
29952@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29953 */
29954 ENTRY(copy_user_generic_string)
29955 CFI_STARTPROC
29956+ ASM_PAX_OPEN_USERLAND
29957 ASM_STAC
29958 cmpl $8,%edx
29959 jb 2f /* less than 8 bytes, go to byte copy loop */
29960@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29961 movsb
29962 xorl %eax,%eax
29963 ASM_CLAC
29964+ ASM_PAX_CLOSE_USERLAND
29965+ pax_force_retaddr
29966 ret
29967
29968 .section .fixup,"ax"
29969@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29970 */
29971 ENTRY(copy_user_enhanced_fast_string)
29972 CFI_STARTPROC
29973+ ASM_PAX_OPEN_USERLAND
29974 ASM_STAC
29975 movl %edx,%ecx
29976 1: rep
29977 movsb
29978 xorl %eax,%eax
29979 ASM_CLAC
29980+ ASM_PAX_CLOSE_USERLAND
29981+ pax_force_retaddr
29982 ret
29983
29984 .section .fixup,"ax"
29985diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29986index 6a4f43c..c70fb52 100644
29987--- a/arch/x86/lib/copy_user_nocache_64.S
29988+++ b/arch/x86/lib/copy_user_nocache_64.S
29989@@ -8,6 +8,7 @@
29990
29991 #include <linux/linkage.h>
29992 #include <asm/dwarf2.h>
29993+#include <asm/alternative-asm.h>
29994
29995 #define FIX_ALIGNMENT 1
29996
29997@@ -16,6 +17,7 @@
29998 #include <asm/thread_info.h>
29999 #include <asm/asm.h>
30000 #include <asm/smap.h>
30001+#include <asm/pgtable.h>
30002
30003 .macro ALIGN_DESTINATION
30004 #ifdef FIX_ALIGNMENT
30005@@ -49,6 +51,16 @@
30006 */
30007 ENTRY(__copy_user_nocache)
30008 CFI_STARTPROC
30009+
30010+#ifdef CONFIG_PAX_MEMORY_UDEREF
30011+ mov pax_user_shadow_base,%rcx
30012+ cmp %rcx,%rsi
30013+ jae 1f
30014+ add %rcx,%rsi
30015+1:
30016+#endif
30017+
30018+ ASM_PAX_OPEN_USERLAND
30019 ASM_STAC
30020 cmpl $8,%edx
30021 jb 20f /* less then 8 bytes, go to byte copy loop */
30022@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30023 jnz 21b
30024 23: xorl %eax,%eax
30025 ASM_CLAC
30026+ ASM_PAX_CLOSE_USERLAND
30027 sfence
30028+ pax_force_retaddr
30029 ret
30030
30031 .section .fixup,"ax"
30032diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30033index 2419d5f..fe52d0e 100644
30034--- a/arch/x86/lib/csum-copy_64.S
30035+++ b/arch/x86/lib/csum-copy_64.S
30036@@ -9,6 +9,7 @@
30037 #include <asm/dwarf2.h>
30038 #include <asm/errno.h>
30039 #include <asm/asm.h>
30040+#include <asm/alternative-asm.h>
30041
30042 /*
30043 * Checksum copy with exception handling.
30044@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30045 CFI_ADJUST_CFA_OFFSET 7*8
30046 movq %rbx, 2*8(%rsp)
30047 CFI_REL_OFFSET rbx, 2*8
30048- movq %r12, 3*8(%rsp)
30049- CFI_REL_OFFSET r12, 3*8
30050+ movq %r15, 3*8(%rsp)
30051+ CFI_REL_OFFSET r15, 3*8
30052 movq %r14, 4*8(%rsp)
30053 CFI_REL_OFFSET r14, 4*8
30054 movq %r13, 5*8(%rsp)
30055@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30056 movl %edx, %ecx
30057
30058 xorl %r9d, %r9d
30059- movq %rcx, %r12
30060+ movq %rcx, %r15
30061
30062- shrq $6, %r12
30063+ shrq $6, %r15
30064 jz .Lhandle_tail /* < 64 */
30065
30066 clc
30067
30068 /* main loop. clear in 64 byte blocks */
30069 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30070- /* r11: temp3, rdx: temp4, r12 loopcnt */
30071+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30072 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30073 .p2align 4
30074 .Lloop:
30075@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30076 adcq %r14, %rax
30077 adcq %r13, %rax
30078
30079- decl %r12d
30080+ decl %r15d
30081
30082 dest
30083 movq %rbx, (%rsi)
30084@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30085 .Lende:
30086 movq 2*8(%rsp), %rbx
30087 CFI_RESTORE rbx
30088- movq 3*8(%rsp), %r12
30089- CFI_RESTORE r12
30090+ movq 3*8(%rsp), %r15
30091+ CFI_RESTORE r15
30092 movq 4*8(%rsp), %r14
30093 CFI_RESTORE r14
30094 movq 5*8(%rsp), %r13
30095@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30096 CFI_RESTORE rbp
30097 addq $7*8, %rsp
30098 CFI_ADJUST_CFA_OFFSET -7*8
30099+ pax_force_retaddr
30100 ret
30101 CFI_RESTORE_STATE
30102
30103diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30104index 1318f75..44c30fd 100644
30105--- a/arch/x86/lib/csum-wrappers_64.c
30106+++ b/arch/x86/lib/csum-wrappers_64.c
30107@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30108 len -= 2;
30109 }
30110 }
30111+ pax_open_userland();
30112 stac();
30113- isum = csum_partial_copy_generic((__force const void *)src,
30114+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30115 dst, len, isum, errp, NULL);
30116 clac();
30117+ pax_close_userland();
30118 if (unlikely(*errp))
30119 goto out_err;
30120
30121@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30122 }
30123
30124 *errp = 0;
30125+ pax_open_userland();
30126 stac();
30127- ret = csum_partial_copy_generic(src, (void __force *)dst,
30128+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30129 len, isum, NULL, errp);
30130 clac();
30131+ pax_close_userland();
30132 return ret;
30133 }
30134 EXPORT_SYMBOL(csum_partial_copy_to_user);
30135diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30136index a451235..a74bfa3 100644
30137--- a/arch/x86/lib/getuser.S
30138+++ b/arch/x86/lib/getuser.S
30139@@ -33,17 +33,40 @@
30140 #include <asm/thread_info.h>
30141 #include <asm/asm.h>
30142 #include <asm/smap.h>
30143+#include <asm/segment.h>
30144+#include <asm/pgtable.h>
30145+#include <asm/alternative-asm.h>
30146+
30147+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30148+#define __copyuser_seg gs;
30149+#else
30150+#define __copyuser_seg
30151+#endif
30152
30153 .text
30154 ENTRY(__get_user_1)
30155 CFI_STARTPROC
30156+
30157+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30158 GET_THREAD_INFO(%_ASM_DX)
30159 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30160 jae bad_get_user
30161+
30162+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30163+ mov pax_user_shadow_base,%_ASM_DX
30164+ cmp %_ASM_DX,%_ASM_AX
30165+ jae 1234f
30166+ add %_ASM_DX,%_ASM_AX
30167+1234:
30168+#endif
30169+
30170+#endif
30171+
30172 ASM_STAC
30173-1: movzbl (%_ASM_AX),%edx
30174+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30175 xor %eax,%eax
30176 ASM_CLAC
30177+ pax_force_retaddr
30178 ret
30179 CFI_ENDPROC
30180 ENDPROC(__get_user_1)
30181@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30182 ENTRY(__get_user_2)
30183 CFI_STARTPROC
30184 add $1,%_ASM_AX
30185+
30186+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30187 jc bad_get_user
30188 GET_THREAD_INFO(%_ASM_DX)
30189 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30190 jae bad_get_user
30191+
30192+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30193+ mov pax_user_shadow_base,%_ASM_DX
30194+ cmp %_ASM_DX,%_ASM_AX
30195+ jae 1234f
30196+ add %_ASM_DX,%_ASM_AX
30197+1234:
30198+#endif
30199+
30200+#endif
30201+
30202 ASM_STAC
30203-2: movzwl -1(%_ASM_AX),%edx
30204+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30205 xor %eax,%eax
30206 ASM_CLAC
30207+ pax_force_retaddr
30208 ret
30209 CFI_ENDPROC
30210 ENDPROC(__get_user_2)
30211@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30212 ENTRY(__get_user_4)
30213 CFI_STARTPROC
30214 add $3,%_ASM_AX
30215+
30216+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30217 jc bad_get_user
30218 GET_THREAD_INFO(%_ASM_DX)
30219 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30220 jae bad_get_user
30221+
30222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30223+ mov pax_user_shadow_base,%_ASM_DX
30224+ cmp %_ASM_DX,%_ASM_AX
30225+ jae 1234f
30226+ add %_ASM_DX,%_ASM_AX
30227+1234:
30228+#endif
30229+
30230+#endif
30231+
30232 ASM_STAC
30233-3: movl -3(%_ASM_AX),%edx
30234+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30235 xor %eax,%eax
30236 ASM_CLAC
30237+ pax_force_retaddr
30238 ret
30239 CFI_ENDPROC
30240 ENDPROC(__get_user_4)
30241@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30242 GET_THREAD_INFO(%_ASM_DX)
30243 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30244 jae bad_get_user
30245+
30246+#ifdef CONFIG_PAX_MEMORY_UDEREF
30247+ mov pax_user_shadow_base,%_ASM_DX
30248+ cmp %_ASM_DX,%_ASM_AX
30249+ jae 1234f
30250+ add %_ASM_DX,%_ASM_AX
30251+1234:
30252+#endif
30253+
30254 ASM_STAC
30255 4: movq -7(%_ASM_AX),%rdx
30256 xor %eax,%eax
30257 ASM_CLAC
30258+ pax_force_retaddr
30259 ret
30260 #else
30261 add $7,%_ASM_AX
30262@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30263 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30264 jae bad_get_user_8
30265 ASM_STAC
30266-4: movl -7(%_ASM_AX),%edx
30267-5: movl -3(%_ASM_AX),%ecx
30268+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30269+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30270 xor %eax,%eax
30271 ASM_CLAC
30272+ pax_force_retaddr
30273 ret
30274 #endif
30275 CFI_ENDPROC
30276@@ -113,6 +175,7 @@ bad_get_user:
30277 xor %edx,%edx
30278 mov $(-EFAULT),%_ASM_AX
30279 ASM_CLAC
30280+ pax_force_retaddr
30281 ret
30282 CFI_ENDPROC
30283 END(bad_get_user)
30284@@ -124,6 +187,7 @@ bad_get_user_8:
30285 xor %ecx,%ecx
30286 mov $(-EFAULT),%_ASM_AX
30287 ASM_CLAC
30288+ pax_force_retaddr
30289 ret
30290 CFI_ENDPROC
30291 END(bad_get_user_8)
30292diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30293index 1313ae6..84f25ea 100644
30294--- a/arch/x86/lib/insn.c
30295+++ b/arch/x86/lib/insn.c
30296@@ -20,8 +20,10 @@
30297
30298 #ifdef __KERNEL__
30299 #include <linux/string.h>
30300+#include <asm/pgtable_types.h>
30301 #else
30302 #include <string.h>
30303+#define ktla_ktva(addr) addr
30304 #endif
30305 #include <asm/inat.h>
30306 #include <asm/insn.h>
30307@@ -53,9 +55,9 @@
30308 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30309 {
30310 memset(insn, 0, sizeof(*insn));
30311- insn->kaddr = kaddr;
30312- insn->end_kaddr = kaddr + buf_len;
30313- insn->next_byte = kaddr;
30314+ insn->kaddr = ktla_ktva(kaddr);
30315+ insn->end_kaddr = insn->kaddr + buf_len;
30316+ insn->next_byte = insn->kaddr;
30317 insn->x86_64 = x86_64 ? 1 : 0;
30318 insn->opnd_bytes = 4;
30319 if (x86_64)
30320diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30321index 05a95e7..326f2fa 100644
30322--- a/arch/x86/lib/iomap_copy_64.S
30323+++ b/arch/x86/lib/iomap_copy_64.S
30324@@ -17,6 +17,7 @@
30325
30326 #include <linux/linkage.h>
30327 #include <asm/dwarf2.h>
30328+#include <asm/alternative-asm.h>
30329
30330 /*
30331 * override generic version in lib/iomap_copy.c
30332@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30333 CFI_STARTPROC
30334 movl %edx,%ecx
30335 rep movsd
30336+ pax_force_retaddr
30337 ret
30338 CFI_ENDPROC
30339 ENDPROC(__iowrite32_copy)
30340diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30341index 89b53c9..97357ca 100644
30342--- a/arch/x86/lib/memcpy_64.S
30343+++ b/arch/x86/lib/memcpy_64.S
30344@@ -24,7 +24,7 @@
30345 * This gets patched over the unrolled variant (below) via the
30346 * alternative instructions framework:
30347 */
30348- .section .altinstr_replacement, "ax", @progbits
30349+ .section .altinstr_replacement, "a", @progbits
30350 .Lmemcpy_c:
30351 movq %rdi, %rax
30352 movq %rdx, %rcx
30353@@ -33,6 +33,7 @@
30354 rep movsq
30355 movl %edx, %ecx
30356 rep movsb
30357+ pax_force_retaddr
30358 ret
30359 .Lmemcpy_e:
30360 .previous
30361@@ -44,11 +45,12 @@
30362 * This gets patched over the unrolled variant (below) via the
30363 * alternative instructions framework:
30364 */
30365- .section .altinstr_replacement, "ax", @progbits
30366+ .section .altinstr_replacement, "a", @progbits
30367 .Lmemcpy_c_e:
30368 movq %rdi, %rax
30369 movq %rdx, %rcx
30370 rep movsb
30371+ pax_force_retaddr
30372 ret
30373 .Lmemcpy_e_e:
30374 .previous
30375@@ -138,6 +140,7 @@ ENTRY(memcpy)
30376 movq %r9, 1*8(%rdi)
30377 movq %r10, -2*8(%rdi, %rdx)
30378 movq %r11, -1*8(%rdi, %rdx)
30379+ pax_force_retaddr
30380 retq
30381 .p2align 4
30382 .Lless_16bytes:
30383@@ -150,6 +153,7 @@ ENTRY(memcpy)
30384 movq -1*8(%rsi, %rdx), %r9
30385 movq %r8, 0*8(%rdi)
30386 movq %r9, -1*8(%rdi, %rdx)
30387+ pax_force_retaddr
30388 retq
30389 .p2align 4
30390 .Lless_8bytes:
30391@@ -163,6 +167,7 @@ ENTRY(memcpy)
30392 movl -4(%rsi, %rdx), %r8d
30393 movl %ecx, (%rdi)
30394 movl %r8d, -4(%rdi, %rdx)
30395+ pax_force_retaddr
30396 retq
30397 .p2align 4
30398 .Lless_3bytes:
30399@@ -181,6 +186,7 @@ ENTRY(memcpy)
30400 movb %cl, (%rdi)
30401
30402 .Lend:
30403+ pax_force_retaddr
30404 retq
30405 CFI_ENDPROC
30406 ENDPROC(memcpy)
30407diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30408index 9c4b530..830b77a 100644
30409--- a/arch/x86/lib/memmove_64.S
30410+++ b/arch/x86/lib/memmove_64.S
30411@@ -205,14 +205,16 @@ ENTRY(__memmove)
30412 movb (%rsi), %r11b
30413 movb %r11b, (%rdi)
30414 13:
30415+ pax_force_retaddr
30416 retq
30417 CFI_ENDPROC
30418
30419- .section .altinstr_replacement,"ax"
30420+ .section .altinstr_replacement,"a"
30421 .Lmemmove_begin_forward_efs:
30422 /* Forward moving data. */
30423 movq %rdx, %rcx
30424 rep movsb
30425+ pax_force_retaddr
30426 retq
30427 .Lmemmove_end_forward_efs:
30428 .previous
30429diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30430index 6f44935..fbf5f6d 100644
30431--- a/arch/x86/lib/memset_64.S
30432+++ b/arch/x86/lib/memset_64.S
30433@@ -16,7 +16,7 @@
30434 *
30435 * rax original destination
30436 */
30437- .section .altinstr_replacement, "ax", @progbits
30438+ .section .altinstr_replacement, "a", @progbits
30439 .Lmemset_c:
30440 movq %rdi,%r9
30441 movq %rdx,%rcx
30442@@ -30,6 +30,7 @@
30443 movl %edx,%ecx
30444 rep stosb
30445 movq %r9,%rax
30446+ pax_force_retaddr
30447 ret
30448 .Lmemset_e:
30449 .previous
30450@@ -45,13 +46,14 @@
30451 *
30452 * rax original destination
30453 */
30454- .section .altinstr_replacement, "ax", @progbits
30455+ .section .altinstr_replacement, "a", @progbits
30456 .Lmemset_c_e:
30457 movq %rdi,%r9
30458 movb %sil,%al
30459 movq %rdx,%rcx
30460 rep stosb
30461 movq %r9,%rax
30462+ pax_force_retaddr
30463 ret
30464 .Lmemset_e_e:
30465 .previous
30466@@ -120,6 +122,7 @@ ENTRY(__memset)
30467
30468 .Lende:
30469 movq %r10,%rax
30470+ pax_force_retaddr
30471 ret
30472
30473 CFI_RESTORE_STATE
30474diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30475index c9f2d9b..e7fd2c0 100644
30476--- a/arch/x86/lib/mmx_32.c
30477+++ b/arch/x86/lib/mmx_32.c
30478@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30479 {
30480 void *p;
30481 int i;
30482+ unsigned long cr0;
30483
30484 if (unlikely(in_interrupt()))
30485 return __memcpy(to, from, len);
30486@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30487 kernel_fpu_begin();
30488
30489 __asm__ __volatile__ (
30490- "1: prefetch (%0)\n" /* This set is 28 bytes */
30491- " prefetch 64(%0)\n"
30492- " prefetch 128(%0)\n"
30493- " prefetch 192(%0)\n"
30494- " prefetch 256(%0)\n"
30495+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30496+ " prefetch 64(%1)\n"
30497+ " prefetch 128(%1)\n"
30498+ " prefetch 192(%1)\n"
30499+ " prefetch 256(%1)\n"
30500 "2: \n"
30501 ".section .fixup, \"ax\"\n"
30502- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30503+ "3: \n"
30504+
30505+#ifdef CONFIG_PAX_KERNEXEC
30506+ " movl %%cr0, %0\n"
30507+ " movl %0, %%eax\n"
30508+ " andl $0xFFFEFFFF, %%eax\n"
30509+ " movl %%eax, %%cr0\n"
30510+#endif
30511+
30512+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30513+
30514+#ifdef CONFIG_PAX_KERNEXEC
30515+ " movl %0, %%cr0\n"
30516+#endif
30517+
30518 " jmp 2b\n"
30519 ".previous\n"
30520 _ASM_EXTABLE(1b, 3b)
30521- : : "r" (from));
30522+ : "=&r" (cr0) : "r" (from) : "ax");
30523
30524 for ( ; i > 5; i--) {
30525 __asm__ __volatile__ (
30526- "1: prefetch 320(%0)\n"
30527- "2: movq (%0), %%mm0\n"
30528- " movq 8(%0), %%mm1\n"
30529- " movq 16(%0), %%mm2\n"
30530- " movq 24(%0), %%mm3\n"
30531- " movq %%mm0, (%1)\n"
30532- " movq %%mm1, 8(%1)\n"
30533- " movq %%mm2, 16(%1)\n"
30534- " movq %%mm3, 24(%1)\n"
30535- " movq 32(%0), %%mm0\n"
30536- " movq 40(%0), %%mm1\n"
30537- " movq 48(%0), %%mm2\n"
30538- " movq 56(%0), %%mm3\n"
30539- " movq %%mm0, 32(%1)\n"
30540- " movq %%mm1, 40(%1)\n"
30541- " movq %%mm2, 48(%1)\n"
30542- " movq %%mm3, 56(%1)\n"
30543+ "1: prefetch 320(%1)\n"
30544+ "2: movq (%1), %%mm0\n"
30545+ " movq 8(%1), %%mm1\n"
30546+ " movq 16(%1), %%mm2\n"
30547+ " movq 24(%1), %%mm3\n"
30548+ " movq %%mm0, (%2)\n"
30549+ " movq %%mm1, 8(%2)\n"
30550+ " movq %%mm2, 16(%2)\n"
30551+ " movq %%mm3, 24(%2)\n"
30552+ " movq 32(%1), %%mm0\n"
30553+ " movq 40(%1), %%mm1\n"
30554+ " movq 48(%1), %%mm2\n"
30555+ " movq 56(%1), %%mm3\n"
30556+ " movq %%mm0, 32(%2)\n"
30557+ " movq %%mm1, 40(%2)\n"
30558+ " movq %%mm2, 48(%2)\n"
30559+ " movq %%mm3, 56(%2)\n"
30560 ".section .fixup, \"ax\"\n"
30561- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30562+ "3:\n"
30563+
30564+#ifdef CONFIG_PAX_KERNEXEC
30565+ " movl %%cr0, %0\n"
30566+ " movl %0, %%eax\n"
30567+ " andl $0xFFFEFFFF, %%eax\n"
30568+ " movl %%eax, %%cr0\n"
30569+#endif
30570+
30571+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30572+
30573+#ifdef CONFIG_PAX_KERNEXEC
30574+ " movl %0, %%cr0\n"
30575+#endif
30576+
30577 " jmp 2b\n"
30578 ".previous\n"
30579 _ASM_EXTABLE(1b, 3b)
30580- : : "r" (from), "r" (to) : "memory");
30581+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30582
30583 from += 64;
30584 to += 64;
30585@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30586 static void fast_copy_page(void *to, void *from)
30587 {
30588 int i;
30589+ unsigned long cr0;
30590
30591 kernel_fpu_begin();
30592
30593@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30594 * but that is for later. -AV
30595 */
30596 __asm__ __volatile__(
30597- "1: prefetch (%0)\n"
30598- " prefetch 64(%0)\n"
30599- " prefetch 128(%0)\n"
30600- " prefetch 192(%0)\n"
30601- " prefetch 256(%0)\n"
30602+ "1: prefetch (%1)\n"
30603+ " prefetch 64(%1)\n"
30604+ " prefetch 128(%1)\n"
30605+ " prefetch 192(%1)\n"
30606+ " prefetch 256(%1)\n"
30607 "2: \n"
30608 ".section .fixup, \"ax\"\n"
30609- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30610+ "3: \n"
30611+
30612+#ifdef CONFIG_PAX_KERNEXEC
30613+ " movl %%cr0, %0\n"
30614+ " movl %0, %%eax\n"
30615+ " andl $0xFFFEFFFF, %%eax\n"
30616+ " movl %%eax, %%cr0\n"
30617+#endif
30618+
30619+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30620+
30621+#ifdef CONFIG_PAX_KERNEXEC
30622+ " movl %0, %%cr0\n"
30623+#endif
30624+
30625 " jmp 2b\n"
30626 ".previous\n"
30627- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30628+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30629
30630 for (i = 0; i < (4096-320)/64; i++) {
30631 __asm__ __volatile__ (
30632- "1: prefetch 320(%0)\n"
30633- "2: movq (%0), %%mm0\n"
30634- " movntq %%mm0, (%1)\n"
30635- " movq 8(%0), %%mm1\n"
30636- " movntq %%mm1, 8(%1)\n"
30637- " movq 16(%0), %%mm2\n"
30638- " movntq %%mm2, 16(%1)\n"
30639- " movq 24(%0), %%mm3\n"
30640- " movntq %%mm3, 24(%1)\n"
30641- " movq 32(%0), %%mm4\n"
30642- " movntq %%mm4, 32(%1)\n"
30643- " movq 40(%0), %%mm5\n"
30644- " movntq %%mm5, 40(%1)\n"
30645- " movq 48(%0), %%mm6\n"
30646- " movntq %%mm6, 48(%1)\n"
30647- " movq 56(%0), %%mm7\n"
30648- " movntq %%mm7, 56(%1)\n"
30649+ "1: prefetch 320(%1)\n"
30650+ "2: movq (%1), %%mm0\n"
30651+ " movntq %%mm0, (%2)\n"
30652+ " movq 8(%1), %%mm1\n"
30653+ " movntq %%mm1, 8(%2)\n"
30654+ " movq 16(%1), %%mm2\n"
30655+ " movntq %%mm2, 16(%2)\n"
30656+ " movq 24(%1), %%mm3\n"
30657+ " movntq %%mm3, 24(%2)\n"
30658+ " movq 32(%1), %%mm4\n"
30659+ " movntq %%mm4, 32(%2)\n"
30660+ " movq 40(%1), %%mm5\n"
30661+ " movntq %%mm5, 40(%2)\n"
30662+ " movq 48(%1), %%mm6\n"
30663+ " movntq %%mm6, 48(%2)\n"
30664+ " movq 56(%1), %%mm7\n"
30665+ " movntq %%mm7, 56(%2)\n"
30666 ".section .fixup, \"ax\"\n"
30667- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30668+ "3:\n"
30669+
30670+#ifdef CONFIG_PAX_KERNEXEC
30671+ " movl %%cr0, %0\n"
30672+ " movl %0, %%eax\n"
30673+ " andl $0xFFFEFFFF, %%eax\n"
30674+ " movl %%eax, %%cr0\n"
30675+#endif
30676+
30677+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30678+
30679+#ifdef CONFIG_PAX_KERNEXEC
30680+ " movl %0, %%cr0\n"
30681+#endif
30682+
30683 " jmp 2b\n"
30684 ".previous\n"
30685- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30686+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30687
30688 from += 64;
30689 to += 64;
30690@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30691 static void fast_copy_page(void *to, void *from)
30692 {
30693 int i;
30694+ unsigned long cr0;
30695
30696 kernel_fpu_begin();
30697
30698 __asm__ __volatile__ (
30699- "1: prefetch (%0)\n"
30700- " prefetch 64(%0)\n"
30701- " prefetch 128(%0)\n"
30702- " prefetch 192(%0)\n"
30703- " prefetch 256(%0)\n"
30704+ "1: prefetch (%1)\n"
30705+ " prefetch 64(%1)\n"
30706+ " prefetch 128(%1)\n"
30707+ " prefetch 192(%1)\n"
30708+ " prefetch 256(%1)\n"
30709 "2: \n"
30710 ".section .fixup, \"ax\"\n"
30711- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30712+ "3: \n"
30713+
30714+#ifdef CONFIG_PAX_KERNEXEC
30715+ " movl %%cr0, %0\n"
30716+ " movl %0, %%eax\n"
30717+ " andl $0xFFFEFFFF, %%eax\n"
30718+ " movl %%eax, %%cr0\n"
30719+#endif
30720+
30721+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30722+
30723+#ifdef CONFIG_PAX_KERNEXEC
30724+ " movl %0, %%cr0\n"
30725+#endif
30726+
30727 " jmp 2b\n"
30728 ".previous\n"
30729- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30730+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30731
30732 for (i = 0; i < 4096/64; i++) {
30733 __asm__ __volatile__ (
30734- "1: prefetch 320(%0)\n"
30735- "2: movq (%0), %%mm0\n"
30736- " movq 8(%0), %%mm1\n"
30737- " movq 16(%0), %%mm2\n"
30738- " movq 24(%0), %%mm3\n"
30739- " movq %%mm0, (%1)\n"
30740- " movq %%mm1, 8(%1)\n"
30741- " movq %%mm2, 16(%1)\n"
30742- " movq %%mm3, 24(%1)\n"
30743- " movq 32(%0), %%mm0\n"
30744- " movq 40(%0), %%mm1\n"
30745- " movq 48(%0), %%mm2\n"
30746- " movq 56(%0), %%mm3\n"
30747- " movq %%mm0, 32(%1)\n"
30748- " movq %%mm1, 40(%1)\n"
30749- " movq %%mm2, 48(%1)\n"
30750- " movq %%mm3, 56(%1)\n"
30751+ "1: prefetch 320(%1)\n"
30752+ "2: movq (%1), %%mm0\n"
30753+ " movq 8(%1), %%mm1\n"
30754+ " movq 16(%1), %%mm2\n"
30755+ " movq 24(%1), %%mm3\n"
30756+ " movq %%mm0, (%2)\n"
30757+ " movq %%mm1, 8(%2)\n"
30758+ " movq %%mm2, 16(%2)\n"
30759+ " movq %%mm3, 24(%2)\n"
30760+ " movq 32(%1), %%mm0\n"
30761+ " movq 40(%1), %%mm1\n"
30762+ " movq 48(%1), %%mm2\n"
30763+ " movq 56(%1), %%mm3\n"
30764+ " movq %%mm0, 32(%2)\n"
30765+ " movq %%mm1, 40(%2)\n"
30766+ " movq %%mm2, 48(%2)\n"
30767+ " movq %%mm3, 56(%2)\n"
30768 ".section .fixup, \"ax\"\n"
30769- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30770+ "3:\n"
30771+
30772+#ifdef CONFIG_PAX_KERNEXEC
30773+ " movl %%cr0, %0\n"
30774+ " movl %0, %%eax\n"
30775+ " andl $0xFFFEFFFF, %%eax\n"
30776+ " movl %%eax, %%cr0\n"
30777+#endif
30778+
30779+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30780+
30781+#ifdef CONFIG_PAX_KERNEXEC
30782+ " movl %0, %%cr0\n"
30783+#endif
30784+
30785 " jmp 2b\n"
30786 ".previous\n"
30787 _ASM_EXTABLE(1b, 3b)
30788- : : "r" (from), "r" (to) : "memory");
30789+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30790
30791 from += 64;
30792 to += 64;
30793diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30794index f6d13ee..d789440 100644
30795--- a/arch/x86/lib/msr-reg.S
30796+++ b/arch/x86/lib/msr-reg.S
30797@@ -3,6 +3,7 @@
30798 #include <asm/dwarf2.h>
30799 #include <asm/asm.h>
30800 #include <asm/msr.h>
30801+#include <asm/alternative-asm.h>
30802
30803 #ifdef CONFIG_X86_64
30804 /*
30805@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30806 movl %edi, 28(%r10)
30807 popq_cfi %rbp
30808 popq_cfi %rbx
30809+ pax_force_retaddr
30810 ret
30811 3:
30812 CFI_RESTORE_STATE
30813diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30814index fc6ba17..14ad9a5 100644
30815--- a/arch/x86/lib/putuser.S
30816+++ b/arch/x86/lib/putuser.S
30817@@ -16,7 +16,9 @@
30818 #include <asm/errno.h>
30819 #include <asm/asm.h>
30820 #include <asm/smap.h>
30821-
30822+#include <asm/segment.h>
30823+#include <asm/pgtable.h>
30824+#include <asm/alternative-asm.h>
30825
30826 /*
30827 * __put_user_X
30828@@ -30,57 +32,125 @@
30829 * as they get called from within inline assembly.
30830 */
30831
30832-#define ENTER CFI_STARTPROC ; \
30833- GET_THREAD_INFO(%_ASM_BX)
30834-#define EXIT ASM_CLAC ; \
30835- ret ; \
30836+#define ENTER CFI_STARTPROC
30837+#define EXIT ASM_CLAC ; \
30838+ pax_force_retaddr ; \
30839+ ret ; \
30840 CFI_ENDPROC
30841
30842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30843+#define _DEST %_ASM_CX,%_ASM_BX
30844+#else
30845+#define _DEST %_ASM_CX
30846+#endif
30847+
30848+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30849+#define __copyuser_seg gs;
30850+#else
30851+#define __copyuser_seg
30852+#endif
30853+
30854 .text
30855 ENTRY(__put_user_1)
30856 ENTER
30857+
30858+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30859+ GET_THREAD_INFO(%_ASM_BX)
30860 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30861 jae bad_put_user
30862+
30863+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30864+ mov pax_user_shadow_base,%_ASM_BX
30865+ cmp %_ASM_BX,%_ASM_CX
30866+ jb 1234f
30867+ xor %ebx,%ebx
30868+1234:
30869+#endif
30870+
30871+#endif
30872+
30873 ASM_STAC
30874-1: movb %al,(%_ASM_CX)
30875+1: __copyuser_seg movb %al,(_DEST)
30876 xor %eax,%eax
30877 EXIT
30878 ENDPROC(__put_user_1)
30879
30880 ENTRY(__put_user_2)
30881 ENTER
30882+
30883+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30884+ GET_THREAD_INFO(%_ASM_BX)
30885 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30886 sub $1,%_ASM_BX
30887 cmp %_ASM_BX,%_ASM_CX
30888 jae bad_put_user
30889+
30890+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30891+ mov pax_user_shadow_base,%_ASM_BX
30892+ cmp %_ASM_BX,%_ASM_CX
30893+ jb 1234f
30894+ xor %ebx,%ebx
30895+1234:
30896+#endif
30897+
30898+#endif
30899+
30900 ASM_STAC
30901-2: movw %ax,(%_ASM_CX)
30902+2: __copyuser_seg movw %ax,(_DEST)
30903 xor %eax,%eax
30904 EXIT
30905 ENDPROC(__put_user_2)
30906
30907 ENTRY(__put_user_4)
30908 ENTER
30909+
30910+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30911+ GET_THREAD_INFO(%_ASM_BX)
30912 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30913 sub $3,%_ASM_BX
30914 cmp %_ASM_BX,%_ASM_CX
30915 jae bad_put_user
30916+
30917+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30918+ mov pax_user_shadow_base,%_ASM_BX
30919+ cmp %_ASM_BX,%_ASM_CX
30920+ jb 1234f
30921+ xor %ebx,%ebx
30922+1234:
30923+#endif
30924+
30925+#endif
30926+
30927 ASM_STAC
30928-3: movl %eax,(%_ASM_CX)
30929+3: __copyuser_seg movl %eax,(_DEST)
30930 xor %eax,%eax
30931 EXIT
30932 ENDPROC(__put_user_4)
30933
30934 ENTRY(__put_user_8)
30935 ENTER
30936+
30937+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30938+ GET_THREAD_INFO(%_ASM_BX)
30939 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30940 sub $7,%_ASM_BX
30941 cmp %_ASM_BX,%_ASM_CX
30942 jae bad_put_user
30943+
30944+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30945+ mov pax_user_shadow_base,%_ASM_BX
30946+ cmp %_ASM_BX,%_ASM_CX
30947+ jb 1234f
30948+ xor %ebx,%ebx
30949+1234:
30950+#endif
30951+
30952+#endif
30953+
30954 ASM_STAC
30955-4: mov %_ASM_AX,(%_ASM_CX)
30956+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30957 #ifdef CONFIG_X86_32
30958-5: movl %edx,4(%_ASM_CX)
30959+5: __copyuser_seg movl %edx,4(_DEST)
30960 #endif
30961 xor %eax,%eax
30962 EXIT
30963diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30964index 5dff5f0..cadebf4 100644
30965--- a/arch/x86/lib/rwsem.S
30966+++ b/arch/x86/lib/rwsem.S
30967@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30968 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30969 CFI_RESTORE __ASM_REG(dx)
30970 restore_common_regs
30971+ pax_force_retaddr
30972 ret
30973 CFI_ENDPROC
30974 ENDPROC(call_rwsem_down_read_failed)
30975@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30976 movq %rax,%rdi
30977 call rwsem_down_write_failed
30978 restore_common_regs
30979+ pax_force_retaddr
30980 ret
30981 CFI_ENDPROC
30982 ENDPROC(call_rwsem_down_write_failed)
30983@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30984 movq %rax,%rdi
30985 call rwsem_wake
30986 restore_common_regs
30987-1: ret
30988+1: pax_force_retaddr
30989+ ret
30990 CFI_ENDPROC
30991 ENDPROC(call_rwsem_wake)
30992
30993@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30994 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30995 CFI_RESTORE __ASM_REG(dx)
30996 restore_common_regs
30997+ pax_force_retaddr
30998 ret
30999 CFI_ENDPROC
31000 ENDPROC(call_rwsem_downgrade_wake)
31001diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31002index b30b5eb..2b57052 100644
31003--- a/arch/x86/lib/thunk_64.S
31004+++ b/arch/x86/lib/thunk_64.S
31005@@ -9,6 +9,7 @@
31006 #include <asm/dwarf2.h>
31007 #include <asm/calling.h>
31008 #include <asm/asm.h>
31009+#include <asm/alternative-asm.h>
31010
31011 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31012 .macro THUNK name, func, put_ret_addr_in_rdi=0
31013@@ -16,11 +17,11 @@
31014 \name:
31015 CFI_STARTPROC
31016
31017- /* this one pushes 9 elems, the next one would be %rIP */
31018- SAVE_ARGS
31019+ /* this one pushes 15+1 elems, the next one would be %rIP */
31020+ SAVE_ARGS 8
31021
31022 .if \put_ret_addr_in_rdi
31023- movq_cfi_restore 9*8, rdi
31024+ movq_cfi_restore RIP, rdi
31025 .endif
31026
31027 call \func
31028@@ -47,9 +48,10 @@
31029
31030 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31031 CFI_STARTPROC
31032- SAVE_ARGS
31033+ SAVE_ARGS 8
31034 restore:
31035- RESTORE_ARGS
31036+ RESTORE_ARGS 1,8
31037+ pax_force_retaddr
31038 ret
31039 CFI_ENDPROC
31040 _ASM_NOKPROBE(restore)
31041diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31042index e2f5e21..4b22130 100644
31043--- a/arch/x86/lib/usercopy_32.c
31044+++ b/arch/x86/lib/usercopy_32.c
31045@@ -42,11 +42,13 @@ do { \
31046 int __d0; \
31047 might_fault(); \
31048 __asm__ __volatile__( \
31049+ __COPYUSER_SET_ES \
31050 ASM_STAC "\n" \
31051 "0: rep; stosl\n" \
31052 " movl %2,%0\n" \
31053 "1: rep; stosb\n" \
31054 "2: " ASM_CLAC "\n" \
31055+ __COPYUSER_RESTORE_ES \
31056 ".section .fixup,\"ax\"\n" \
31057 "3: lea 0(%2,%0,4),%0\n" \
31058 " jmp 2b\n" \
31059@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31060
31061 #ifdef CONFIG_X86_INTEL_USERCOPY
31062 static unsigned long
31063-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31064+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31065 {
31066 int d0, d1;
31067 __asm__ __volatile__(
31068@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31069 " .align 2,0x90\n"
31070 "3: movl 0(%4), %%eax\n"
31071 "4: movl 4(%4), %%edx\n"
31072- "5: movl %%eax, 0(%3)\n"
31073- "6: movl %%edx, 4(%3)\n"
31074+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31075+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31076 "7: movl 8(%4), %%eax\n"
31077 "8: movl 12(%4),%%edx\n"
31078- "9: movl %%eax, 8(%3)\n"
31079- "10: movl %%edx, 12(%3)\n"
31080+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31081+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31082 "11: movl 16(%4), %%eax\n"
31083 "12: movl 20(%4), %%edx\n"
31084- "13: movl %%eax, 16(%3)\n"
31085- "14: movl %%edx, 20(%3)\n"
31086+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31087+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31088 "15: movl 24(%4), %%eax\n"
31089 "16: movl 28(%4), %%edx\n"
31090- "17: movl %%eax, 24(%3)\n"
31091- "18: movl %%edx, 28(%3)\n"
31092+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31093+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31094 "19: movl 32(%4), %%eax\n"
31095 "20: movl 36(%4), %%edx\n"
31096- "21: movl %%eax, 32(%3)\n"
31097- "22: movl %%edx, 36(%3)\n"
31098+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31099+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31100 "23: movl 40(%4), %%eax\n"
31101 "24: movl 44(%4), %%edx\n"
31102- "25: movl %%eax, 40(%3)\n"
31103- "26: movl %%edx, 44(%3)\n"
31104+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31105+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31106 "27: movl 48(%4), %%eax\n"
31107 "28: movl 52(%4), %%edx\n"
31108- "29: movl %%eax, 48(%3)\n"
31109- "30: movl %%edx, 52(%3)\n"
31110+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31111+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31112 "31: movl 56(%4), %%eax\n"
31113 "32: movl 60(%4), %%edx\n"
31114- "33: movl %%eax, 56(%3)\n"
31115- "34: movl %%edx, 60(%3)\n"
31116+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31117+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31118 " addl $-64, %0\n"
31119 " addl $64, %4\n"
31120 " addl $64, %3\n"
31121@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31122 " shrl $2, %0\n"
31123 " andl $3, %%eax\n"
31124 " cld\n"
31125+ __COPYUSER_SET_ES
31126 "99: rep; movsl\n"
31127 "36: movl %%eax, %0\n"
31128 "37: rep; movsb\n"
31129 "100:\n"
31130+ __COPYUSER_RESTORE_ES
31131+ ".section .fixup,\"ax\"\n"
31132+ "101: lea 0(%%eax,%0,4),%0\n"
31133+ " jmp 100b\n"
31134+ ".previous\n"
31135+ _ASM_EXTABLE(1b,100b)
31136+ _ASM_EXTABLE(2b,100b)
31137+ _ASM_EXTABLE(3b,100b)
31138+ _ASM_EXTABLE(4b,100b)
31139+ _ASM_EXTABLE(5b,100b)
31140+ _ASM_EXTABLE(6b,100b)
31141+ _ASM_EXTABLE(7b,100b)
31142+ _ASM_EXTABLE(8b,100b)
31143+ _ASM_EXTABLE(9b,100b)
31144+ _ASM_EXTABLE(10b,100b)
31145+ _ASM_EXTABLE(11b,100b)
31146+ _ASM_EXTABLE(12b,100b)
31147+ _ASM_EXTABLE(13b,100b)
31148+ _ASM_EXTABLE(14b,100b)
31149+ _ASM_EXTABLE(15b,100b)
31150+ _ASM_EXTABLE(16b,100b)
31151+ _ASM_EXTABLE(17b,100b)
31152+ _ASM_EXTABLE(18b,100b)
31153+ _ASM_EXTABLE(19b,100b)
31154+ _ASM_EXTABLE(20b,100b)
31155+ _ASM_EXTABLE(21b,100b)
31156+ _ASM_EXTABLE(22b,100b)
31157+ _ASM_EXTABLE(23b,100b)
31158+ _ASM_EXTABLE(24b,100b)
31159+ _ASM_EXTABLE(25b,100b)
31160+ _ASM_EXTABLE(26b,100b)
31161+ _ASM_EXTABLE(27b,100b)
31162+ _ASM_EXTABLE(28b,100b)
31163+ _ASM_EXTABLE(29b,100b)
31164+ _ASM_EXTABLE(30b,100b)
31165+ _ASM_EXTABLE(31b,100b)
31166+ _ASM_EXTABLE(32b,100b)
31167+ _ASM_EXTABLE(33b,100b)
31168+ _ASM_EXTABLE(34b,100b)
31169+ _ASM_EXTABLE(35b,100b)
31170+ _ASM_EXTABLE(36b,100b)
31171+ _ASM_EXTABLE(37b,100b)
31172+ _ASM_EXTABLE(99b,101b)
31173+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31174+ : "1"(to), "2"(from), "0"(size)
31175+ : "eax", "edx", "memory");
31176+ return size;
31177+}
31178+
31179+static unsigned long
31180+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31181+{
31182+ int d0, d1;
31183+ __asm__ __volatile__(
31184+ " .align 2,0x90\n"
31185+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31186+ " cmpl $67, %0\n"
31187+ " jbe 3f\n"
31188+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31189+ " .align 2,0x90\n"
31190+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31191+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31192+ "5: movl %%eax, 0(%3)\n"
31193+ "6: movl %%edx, 4(%3)\n"
31194+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31195+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31196+ "9: movl %%eax, 8(%3)\n"
31197+ "10: movl %%edx, 12(%3)\n"
31198+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31199+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31200+ "13: movl %%eax, 16(%3)\n"
31201+ "14: movl %%edx, 20(%3)\n"
31202+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31203+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31204+ "17: movl %%eax, 24(%3)\n"
31205+ "18: movl %%edx, 28(%3)\n"
31206+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31207+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31208+ "21: movl %%eax, 32(%3)\n"
31209+ "22: movl %%edx, 36(%3)\n"
31210+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31211+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31212+ "25: movl %%eax, 40(%3)\n"
31213+ "26: movl %%edx, 44(%3)\n"
31214+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31215+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31216+ "29: movl %%eax, 48(%3)\n"
31217+ "30: movl %%edx, 52(%3)\n"
31218+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31219+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31220+ "33: movl %%eax, 56(%3)\n"
31221+ "34: movl %%edx, 60(%3)\n"
31222+ " addl $-64, %0\n"
31223+ " addl $64, %4\n"
31224+ " addl $64, %3\n"
31225+ " cmpl $63, %0\n"
31226+ " ja 1b\n"
31227+ "35: movl %0, %%eax\n"
31228+ " shrl $2, %0\n"
31229+ " andl $3, %%eax\n"
31230+ " cld\n"
31231+ "99: rep; "__copyuser_seg" movsl\n"
31232+ "36: movl %%eax, %0\n"
31233+ "37: rep; "__copyuser_seg" movsb\n"
31234+ "100:\n"
31235 ".section .fixup,\"ax\"\n"
31236 "101: lea 0(%%eax,%0,4),%0\n"
31237 " jmp 100b\n"
31238@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31239 int d0, d1;
31240 __asm__ __volatile__(
31241 " .align 2,0x90\n"
31242- "0: movl 32(%4), %%eax\n"
31243+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31244 " cmpl $67, %0\n"
31245 " jbe 2f\n"
31246- "1: movl 64(%4), %%eax\n"
31247+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31248 " .align 2,0x90\n"
31249- "2: movl 0(%4), %%eax\n"
31250- "21: movl 4(%4), %%edx\n"
31251+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31252+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31253 " movl %%eax, 0(%3)\n"
31254 " movl %%edx, 4(%3)\n"
31255- "3: movl 8(%4), %%eax\n"
31256- "31: movl 12(%4),%%edx\n"
31257+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31258+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31259 " movl %%eax, 8(%3)\n"
31260 " movl %%edx, 12(%3)\n"
31261- "4: movl 16(%4), %%eax\n"
31262- "41: movl 20(%4), %%edx\n"
31263+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31264+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31265 " movl %%eax, 16(%3)\n"
31266 " movl %%edx, 20(%3)\n"
31267- "10: movl 24(%4), %%eax\n"
31268- "51: movl 28(%4), %%edx\n"
31269+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31270+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31271 " movl %%eax, 24(%3)\n"
31272 " movl %%edx, 28(%3)\n"
31273- "11: movl 32(%4), %%eax\n"
31274- "61: movl 36(%4), %%edx\n"
31275+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31276+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31277 " movl %%eax, 32(%3)\n"
31278 " movl %%edx, 36(%3)\n"
31279- "12: movl 40(%4), %%eax\n"
31280- "71: movl 44(%4), %%edx\n"
31281+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31282+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31283 " movl %%eax, 40(%3)\n"
31284 " movl %%edx, 44(%3)\n"
31285- "13: movl 48(%4), %%eax\n"
31286- "81: movl 52(%4), %%edx\n"
31287+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31288+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31289 " movl %%eax, 48(%3)\n"
31290 " movl %%edx, 52(%3)\n"
31291- "14: movl 56(%4), %%eax\n"
31292- "91: movl 60(%4), %%edx\n"
31293+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31294+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31295 " movl %%eax, 56(%3)\n"
31296 " movl %%edx, 60(%3)\n"
31297 " addl $-64, %0\n"
31298@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31299 " shrl $2, %0\n"
31300 " andl $3, %%eax\n"
31301 " cld\n"
31302- "6: rep; movsl\n"
31303+ "6: rep; "__copyuser_seg" movsl\n"
31304 " movl %%eax,%0\n"
31305- "7: rep; movsb\n"
31306+ "7: rep; "__copyuser_seg" movsb\n"
31307 "8:\n"
31308 ".section .fixup,\"ax\"\n"
31309 "9: lea 0(%%eax,%0,4),%0\n"
31310@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31311
31312 __asm__ __volatile__(
31313 " .align 2,0x90\n"
31314- "0: movl 32(%4), %%eax\n"
31315+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31316 " cmpl $67, %0\n"
31317 " jbe 2f\n"
31318- "1: movl 64(%4), %%eax\n"
31319+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31320 " .align 2,0x90\n"
31321- "2: movl 0(%4), %%eax\n"
31322- "21: movl 4(%4), %%edx\n"
31323+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31324+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31325 " movnti %%eax, 0(%3)\n"
31326 " movnti %%edx, 4(%3)\n"
31327- "3: movl 8(%4), %%eax\n"
31328- "31: movl 12(%4),%%edx\n"
31329+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31330+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31331 " movnti %%eax, 8(%3)\n"
31332 " movnti %%edx, 12(%3)\n"
31333- "4: movl 16(%4), %%eax\n"
31334- "41: movl 20(%4), %%edx\n"
31335+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31336+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31337 " movnti %%eax, 16(%3)\n"
31338 " movnti %%edx, 20(%3)\n"
31339- "10: movl 24(%4), %%eax\n"
31340- "51: movl 28(%4), %%edx\n"
31341+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31342+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31343 " movnti %%eax, 24(%3)\n"
31344 " movnti %%edx, 28(%3)\n"
31345- "11: movl 32(%4), %%eax\n"
31346- "61: movl 36(%4), %%edx\n"
31347+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31348+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31349 " movnti %%eax, 32(%3)\n"
31350 " movnti %%edx, 36(%3)\n"
31351- "12: movl 40(%4), %%eax\n"
31352- "71: movl 44(%4), %%edx\n"
31353+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31354+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31355 " movnti %%eax, 40(%3)\n"
31356 " movnti %%edx, 44(%3)\n"
31357- "13: movl 48(%4), %%eax\n"
31358- "81: movl 52(%4), %%edx\n"
31359+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31360+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31361 " movnti %%eax, 48(%3)\n"
31362 " movnti %%edx, 52(%3)\n"
31363- "14: movl 56(%4), %%eax\n"
31364- "91: movl 60(%4), %%edx\n"
31365+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31366+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31367 " movnti %%eax, 56(%3)\n"
31368 " movnti %%edx, 60(%3)\n"
31369 " addl $-64, %0\n"
31370@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31371 " shrl $2, %0\n"
31372 " andl $3, %%eax\n"
31373 " cld\n"
31374- "6: rep; movsl\n"
31375+ "6: rep; "__copyuser_seg" movsl\n"
31376 " movl %%eax,%0\n"
31377- "7: rep; movsb\n"
31378+ "7: rep; "__copyuser_seg" movsb\n"
31379 "8:\n"
31380 ".section .fixup,\"ax\"\n"
31381 "9: lea 0(%%eax,%0,4),%0\n"
31382@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31383
31384 __asm__ __volatile__(
31385 " .align 2,0x90\n"
31386- "0: movl 32(%4), %%eax\n"
31387+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31388 " cmpl $67, %0\n"
31389 " jbe 2f\n"
31390- "1: movl 64(%4), %%eax\n"
31391+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31392 " .align 2,0x90\n"
31393- "2: movl 0(%4), %%eax\n"
31394- "21: movl 4(%4), %%edx\n"
31395+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31396+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31397 " movnti %%eax, 0(%3)\n"
31398 " movnti %%edx, 4(%3)\n"
31399- "3: movl 8(%4), %%eax\n"
31400- "31: movl 12(%4),%%edx\n"
31401+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31402+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31403 " movnti %%eax, 8(%3)\n"
31404 " movnti %%edx, 12(%3)\n"
31405- "4: movl 16(%4), %%eax\n"
31406- "41: movl 20(%4), %%edx\n"
31407+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31408+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31409 " movnti %%eax, 16(%3)\n"
31410 " movnti %%edx, 20(%3)\n"
31411- "10: movl 24(%4), %%eax\n"
31412- "51: movl 28(%4), %%edx\n"
31413+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31414+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31415 " movnti %%eax, 24(%3)\n"
31416 " movnti %%edx, 28(%3)\n"
31417- "11: movl 32(%4), %%eax\n"
31418- "61: movl 36(%4), %%edx\n"
31419+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31420+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31421 " movnti %%eax, 32(%3)\n"
31422 " movnti %%edx, 36(%3)\n"
31423- "12: movl 40(%4), %%eax\n"
31424- "71: movl 44(%4), %%edx\n"
31425+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31426+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31427 " movnti %%eax, 40(%3)\n"
31428 " movnti %%edx, 44(%3)\n"
31429- "13: movl 48(%4), %%eax\n"
31430- "81: movl 52(%4), %%edx\n"
31431+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31432+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31433 " movnti %%eax, 48(%3)\n"
31434 " movnti %%edx, 52(%3)\n"
31435- "14: movl 56(%4), %%eax\n"
31436- "91: movl 60(%4), %%edx\n"
31437+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31438+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31439 " movnti %%eax, 56(%3)\n"
31440 " movnti %%edx, 60(%3)\n"
31441 " addl $-64, %0\n"
31442@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31443 " shrl $2, %0\n"
31444 " andl $3, %%eax\n"
31445 " cld\n"
31446- "6: rep; movsl\n"
31447+ "6: rep; "__copyuser_seg" movsl\n"
31448 " movl %%eax,%0\n"
31449- "7: rep; movsb\n"
31450+ "7: rep; "__copyuser_seg" movsb\n"
31451 "8:\n"
31452 ".section .fixup,\"ax\"\n"
31453 "9: lea 0(%%eax,%0,4),%0\n"
31454@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31455 */
31456 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31457 unsigned long size);
31458-unsigned long __copy_user_intel(void __user *to, const void *from,
31459+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31460+ unsigned long size);
31461+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31462 unsigned long size);
31463 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31464 const void __user *from, unsigned long size);
31465 #endif /* CONFIG_X86_INTEL_USERCOPY */
31466
31467 /* Generic arbitrary sized copy. */
31468-#define __copy_user(to, from, size) \
31469+#define __copy_user(to, from, size, prefix, set, restore) \
31470 do { \
31471 int __d0, __d1, __d2; \
31472 __asm__ __volatile__( \
31473+ set \
31474 " cmp $7,%0\n" \
31475 " jbe 1f\n" \
31476 " movl %1,%0\n" \
31477 " negl %0\n" \
31478 " andl $7,%0\n" \
31479 " subl %0,%3\n" \
31480- "4: rep; movsb\n" \
31481+ "4: rep; "prefix"movsb\n" \
31482 " movl %3,%0\n" \
31483 " shrl $2,%0\n" \
31484 " andl $3,%3\n" \
31485 " .align 2,0x90\n" \
31486- "0: rep; movsl\n" \
31487+ "0: rep; "prefix"movsl\n" \
31488 " movl %3,%0\n" \
31489- "1: rep; movsb\n" \
31490+ "1: rep; "prefix"movsb\n" \
31491 "2:\n" \
31492+ restore \
31493 ".section .fixup,\"ax\"\n" \
31494 "5: addl %3,%0\n" \
31495 " jmp 2b\n" \
31496@@ -538,14 +650,14 @@ do { \
31497 " negl %0\n" \
31498 " andl $7,%0\n" \
31499 " subl %0,%3\n" \
31500- "4: rep; movsb\n" \
31501+ "4: rep; "__copyuser_seg"movsb\n" \
31502 " movl %3,%0\n" \
31503 " shrl $2,%0\n" \
31504 " andl $3,%3\n" \
31505 " .align 2,0x90\n" \
31506- "0: rep; movsl\n" \
31507+ "0: rep; "__copyuser_seg"movsl\n" \
31508 " movl %3,%0\n" \
31509- "1: rep; movsb\n" \
31510+ "1: rep; "__copyuser_seg"movsb\n" \
31511 "2:\n" \
31512 ".section .fixup,\"ax\"\n" \
31513 "5: addl %3,%0\n" \
31514@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31515 {
31516 stac();
31517 if (movsl_is_ok(to, from, n))
31518- __copy_user(to, from, n);
31519+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31520 else
31521- n = __copy_user_intel(to, from, n);
31522+ n = __generic_copy_to_user_intel(to, from, n);
31523 clac();
31524 return n;
31525 }
31526@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31527 {
31528 stac();
31529 if (movsl_is_ok(to, from, n))
31530- __copy_user(to, from, n);
31531+ __copy_user(to, from, n, __copyuser_seg, "", "");
31532 else
31533- n = __copy_user_intel((void __user *)to,
31534- (const void *)from, n);
31535+ n = __generic_copy_from_user_intel(to, from, n);
31536 clac();
31537 return n;
31538 }
31539@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31540 if (n > 64 && cpu_has_xmm2)
31541 n = __copy_user_intel_nocache(to, from, n);
31542 else
31543- __copy_user(to, from, n);
31544+ __copy_user(to, from, n, __copyuser_seg, "", "");
31545 #else
31546- __copy_user(to, from, n);
31547+ __copy_user(to, from, n, __copyuser_seg, "", "");
31548 #endif
31549 clac();
31550 return n;
31551 }
31552 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31553
31554-/**
31555- * copy_to_user: - Copy a block of data into user space.
31556- * @to: Destination address, in user space.
31557- * @from: Source address, in kernel space.
31558- * @n: Number of bytes to copy.
31559- *
31560- * Context: User context only. This function may sleep.
31561- *
31562- * Copy data from kernel space to user space.
31563- *
31564- * Returns number of bytes that could not be copied.
31565- * On success, this will be zero.
31566- */
31567-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31568+#ifdef CONFIG_PAX_MEMORY_UDEREF
31569+void __set_fs(mm_segment_t x)
31570 {
31571- if (access_ok(VERIFY_WRITE, to, n))
31572- n = __copy_to_user(to, from, n);
31573- return n;
31574+ switch (x.seg) {
31575+ case 0:
31576+ loadsegment(gs, 0);
31577+ break;
31578+ case TASK_SIZE_MAX:
31579+ loadsegment(gs, __USER_DS);
31580+ break;
31581+ case -1UL:
31582+ loadsegment(gs, __KERNEL_DS);
31583+ break;
31584+ default:
31585+ BUG();
31586+ }
31587 }
31588-EXPORT_SYMBOL(_copy_to_user);
31589+EXPORT_SYMBOL(__set_fs);
31590
31591-/**
31592- * copy_from_user: - Copy a block of data from user space.
31593- * @to: Destination address, in kernel space.
31594- * @from: Source address, in user space.
31595- * @n: Number of bytes to copy.
31596- *
31597- * Context: User context only. This function may sleep.
31598- *
31599- * Copy data from user space to kernel space.
31600- *
31601- * Returns number of bytes that could not be copied.
31602- * On success, this will be zero.
31603- *
31604- * If some data could not be copied, this function will pad the copied
31605- * data to the requested size using zero bytes.
31606- */
31607-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31608+void set_fs(mm_segment_t x)
31609 {
31610- if (access_ok(VERIFY_READ, from, n))
31611- n = __copy_from_user(to, from, n);
31612- else
31613- memset(to, 0, n);
31614- return n;
31615+ current_thread_info()->addr_limit = x;
31616+ __set_fs(x);
31617 }
31618-EXPORT_SYMBOL(_copy_from_user);
31619+EXPORT_SYMBOL(set_fs);
31620+#endif
31621diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31622index 1f33b3d..83c151d 100644
31623--- a/arch/x86/lib/usercopy_64.c
31624+++ b/arch/x86/lib/usercopy_64.c
31625@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31626 might_fault();
31627 /* no memory constraint because it doesn't change any memory gcc knows
31628 about */
31629+ pax_open_userland();
31630 stac();
31631 asm volatile(
31632 " testq %[size8],%[size8]\n"
31633@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31634 _ASM_EXTABLE(0b,3b)
31635 _ASM_EXTABLE(1b,2b)
31636 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31637- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31638+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31639 [zero] "r" (0UL), [eight] "r" (8UL));
31640 clac();
31641+ pax_close_userland();
31642 return size;
31643 }
31644 EXPORT_SYMBOL(__clear_user);
31645@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31646 }
31647 EXPORT_SYMBOL(clear_user);
31648
31649-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31650+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31651 {
31652- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31653- return copy_user_generic((__force void *)to, (__force void *)from, len);
31654- }
31655- return len;
31656+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31657+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31658+ return len;
31659 }
31660 EXPORT_SYMBOL(copy_in_user);
31661
31662@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31663 * it is not necessary to optimize tail handling.
31664 */
31665 __visible unsigned long
31666-copy_user_handle_tail(char *to, char *from, unsigned len)
31667+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31668 {
31669+ clac();
31670+ pax_close_userland();
31671 for (; len; --len, to++) {
31672 char c;
31673
31674@@ -79,7 +82,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31675 if (__put_user_nocheck(c, to, sizeof(char)))
31676 break;
31677 }
31678- clac();
31679
31680 /* If the destination is a kernel buffer, we always clear the end */
31681 if ((unsigned long)to >= TASK_SIZE_MAX)
31682diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31683index c4cc740..60a7362 100644
31684--- a/arch/x86/mm/Makefile
31685+++ b/arch/x86/mm/Makefile
31686@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31687 obj-$(CONFIG_MEMTEST) += memtest.o
31688
31689 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31690+
31691+quote:="
31692+obj-$(CONFIG_X86_64) += uderef_64.o
31693+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31694diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31695index 903ec1e..c4166b2 100644
31696--- a/arch/x86/mm/extable.c
31697+++ b/arch/x86/mm/extable.c
31698@@ -6,12 +6,24 @@
31699 static inline unsigned long
31700 ex_insn_addr(const struct exception_table_entry *x)
31701 {
31702- return (unsigned long)&x->insn + x->insn;
31703+ unsigned long reloc = 0;
31704+
31705+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31706+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31707+#endif
31708+
31709+ return (unsigned long)&x->insn + x->insn + reloc;
31710 }
31711 static inline unsigned long
31712 ex_fixup_addr(const struct exception_table_entry *x)
31713 {
31714- return (unsigned long)&x->fixup + x->fixup;
31715+ unsigned long reloc = 0;
31716+
31717+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31718+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31719+#endif
31720+
31721+ return (unsigned long)&x->fixup + x->fixup + reloc;
31722 }
31723
31724 int fixup_exception(struct pt_regs *regs)
31725@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31726 unsigned long new_ip;
31727
31728 #ifdef CONFIG_PNPBIOS
31729- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31730+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31731 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31732 extern u32 pnp_bios_is_utter_crap;
31733 pnp_bios_is_utter_crap = 1;
31734@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31735 i += 4;
31736 p->fixup -= i;
31737 i += 4;
31738+
31739+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31740+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31741+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31742+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31743+#endif
31744+
31745 }
31746 }
31747
31748diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31749index ede025f..380466b 100644
31750--- a/arch/x86/mm/fault.c
31751+++ b/arch/x86/mm/fault.c
31752@@ -13,12 +13,19 @@
31753 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31754 #include <linux/prefetch.h> /* prefetchw */
31755 #include <linux/context_tracking.h> /* exception_enter(), ... */
31756+#include <linux/unistd.h>
31757+#include <linux/compiler.h>
31758
31759 #include <asm/traps.h> /* dotraplinkage, ... */
31760 #include <asm/pgalloc.h> /* pgd_*(), ... */
31761 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31762 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31763 #include <asm/vsyscall.h> /* emulate_vsyscall */
31764+#include <asm/tlbflush.h>
31765+
31766+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31767+#include <asm/stacktrace.h>
31768+#endif
31769
31770 #define CREATE_TRACE_POINTS
31771 #include <asm/trace/exceptions.h>
31772@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31773 int ret = 0;
31774
31775 /* kprobe_running() needs smp_processor_id() */
31776- if (kprobes_built_in() && !user_mode_vm(regs)) {
31777+ if (kprobes_built_in() && !user_mode(regs)) {
31778 preempt_disable();
31779 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31780 ret = 1;
31781@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31782 return !instr_lo || (instr_lo>>1) == 1;
31783 case 0x00:
31784 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31785- if (probe_kernel_address(instr, opcode))
31786+ if (user_mode(regs)) {
31787+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31788+ return 0;
31789+ } else if (probe_kernel_address(instr, opcode))
31790 return 0;
31791
31792 *prefetch = (instr_lo == 0xF) &&
31793@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31794 while (instr < max_instr) {
31795 unsigned char opcode;
31796
31797- if (probe_kernel_address(instr, opcode))
31798+ if (user_mode(regs)) {
31799+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31800+ break;
31801+ } else if (probe_kernel_address(instr, opcode))
31802 break;
31803
31804 instr++;
31805@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31806 force_sig_info(si_signo, &info, tsk);
31807 }
31808
31809+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31810+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31811+#endif
31812+
31813+#ifdef CONFIG_PAX_EMUTRAMP
31814+static int pax_handle_fetch_fault(struct pt_regs *regs);
31815+#endif
31816+
31817+#ifdef CONFIG_PAX_PAGEEXEC
31818+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31819+{
31820+ pgd_t *pgd;
31821+ pud_t *pud;
31822+ pmd_t *pmd;
31823+
31824+ pgd = pgd_offset(mm, address);
31825+ if (!pgd_present(*pgd))
31826+ return NULL;
31827+ pud = pud_offset(pgd, address);
31828+ if (!pud_present(*pud))
31829+ return NULL;
31830+ pmd = pmd_offset(pud, address);
31831+ if (!pmd_present(*pmd))
31832+ return NULL;
31833+ return pmd;
31834+}
31835+#endif
31836+
31837 DEFINE_SPINLOCK(pgd_lock);
31838 LIST_HEAD(pgd_list);
31839
31840@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31841 for (address = VMALLOC_START & PMD_MASK;
31842 address >= TASK_SIZE && address < FIXADDR_TOP;
31843 address += PMD_SIZE) {
31844+
31845+#ifdef CONFIG_PAX_PER_CPU_PGD
31846+ unsigned long cpu;
31847+#else
31848 struct page *page;
31849+#endif
31850
31851 spin_lock(&pgd_lock);
31852+
31853+#ifdef CONFIG_PAX_PER_CPU_PGD
31854+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31855+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31856+ pmd_t *ret;
31857+
31858+ ret = vmalloc_sync_one(pgd, address);
31859+ if (!ret)
31860+ break;
31861+ pgd = get_cpu_pgd(cpu, kernel);
31862+#else
31863 list_for_each_entry(page, &pgd_list, lru) {
31864+ pgd_t *pgd;
31865 spinlock_t *pgt_lock;
31866 pmd_t *ret;
31867
31868@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31869 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31870
31871 spin_lock(pgt_lock);
31872- ret = vmalloc_sync_one(page_address(page), address);
31873+ pgd = page_address(page);
31874+#endif
31875+
31876+ ret = vmalloc_sync_one(pgd, address);
31877+
31878+#ifndef CONFIG_PAX_PER_CPU_PGD
31879 spin_unlock(pgt_lock);
31880+#endif
31881
31882 if (!ret)
31883 break;
31884@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31885 * an interrupt in the middle of a task switch..
31886 */
31887 pgd_paddr = read_cr3();
31888+
31889+#ifdef CONFIG_PAX_PER_CPU_PGD
31890+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31891+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31892+#endif
31893+
31894 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31895 if (!pmd_k)
31896 return -1;
31897@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31898 * happen within a race in page table update. In the later
31899 * case just flush:
31900 */
31901- pgd = pgd_offset(current->active_mm, address);
31902+
31903 pgd_ref = pgd_offset_k(address);
31904 if (pgd_none(*pgd_ref))
31905 return -1;
31906
31907+#ifdef CONFIG_PAX_PER_CPU_PGD
31908+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31909+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31910+ if (pgd_none(*pgd)) {
31911+ set_pgd(pgd, *pgd_ref);
31912+ arch_flush_lazy_mmu_mode();
31913+ } else {
31914+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31915+ }
31916+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31917+#else
31918+ pgd = pgd_offset(current->active_mm, address);
31919+#endif
31920+
31921 if (pgd_none(*pgd)) {
31922 set_pgd(pgd, *pgd_ref);
31923 arch_flush_lazy_mmu_mode();
31924@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31925 static int is_errata100(struct pt_regs *regs, unsigned long address)
31926 {
31927 #ifdef CONFIG_X86_64
31928- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31929+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31930 return 1;
31931 #endif
31932 return 0;
31933@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31934 }
31935
31936 static const char nx_warning[] = KERN_CRIT
31937-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31938+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31939 static const char smep_warning[] = KERN_CRIT
31940-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31941+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31942
31943 static void
31944 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31945@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31946 if (!oops_may_print())
31947 return;
31948
31949- if (error_code & PF_INSTR) {
31950+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31951 unsigned int level;
31952 pgd_t *pgd;
31953 pte_t *pte;
31954@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31955 pte = lookup_address_in_pgd(pgd, address, &level);
31956
31957 if (pte && pte_present(*pte) && !pte_exec(*pte))
31958- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31959+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31960 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31961 (pgd_flags(*pgd) & _PAGE_USER) &&
31962 (__read_cr4() & X86_CR4_SMEP))
31963- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31964+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31965 }
31966
31967+#ifdef CONFIG_PAX_KERNEXEC
31968+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31969+ if (current->signal->curr_ip)
31970+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31971+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31972+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31973+ else
31974+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31975+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31976+ }
31977+#endif
31978+
31979 printk(KERN_ALERT "BUG: unable to handle kernel ");
31980 if (address < PAGE_SIZE)
31981 printk(KERN_CONT "NULL pointer dereference");
31982@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31983 return;
31984 }
31985 #endif
31986+
31987+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31988+ if (pax_is_fetch_fault(regs, error_code, address)) {
31989+
31990+#ifdef CONFIG_PAX_EMUTRAMP
31991+ switch (pax_handle_fetch_fault(regs)) {
31992+ case 2:
31993+ return;
31994+ }
31995+#endif
31996+
31997+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31998+ do_group_exit(SIGKILL);
31999+ }
32000+#endif
32001+
32002 /* Kernel addresses are always protection faults: */
32003 if (address >= TASK_SIZE)
32004 error_code |= PF_PROT;
32005@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32006 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32007 printk(KERN_ERR
32008 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32009- tsk->comm, tsk->pid, address);
32010+ tsk->comm, task_pid_nr(tsk), address);
32011 code = BUS_MCEERR_AR;
32012 }
32013 #endif
32014@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32015 return 1;
32016 }
32017
32018+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32019+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32020+{
32021+ pte_t *pte;
32022+ pmd_t *pmd;
32023+ spinlock_t *ptl;
32024+ unsigned char pte_mask;
32025+
32026+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32027+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32028+ return 0;
32029+
32030+ /* PaX: it's our fault, let's handle it if we can */
32031+
32032+ /* PaX: take a look at read faults before acquiring any locks */
32033+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32034+ /* instruction fetch attempt from a protected page in user mode */
32035+ up_read(&mm->mmap_sem);
32036+
32037+#ifdef CONFIG_PAX_EMUTRAMP
32038+ switch (pax_handle_fetch_fault(regs)) {
32039+ case 2:
32040+ return 1;
32041+ }
32042+#endif
32043+
32044+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32045+ do_group_exit(SIGKILL);
32046+ }
32047+
32048+ pmd = pax_get_pmd(mm, address);
32049+ if (unlikely(!pmd))
32050+ return 0;
32051+
32052+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32053+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32054+ pte_unmap_unlock(pte, ptl);
32055+ return 0;
32056+ }
32057+
32058+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32059+ /* write attempt to a protected page in user mode */
32060+ pte_unmap_unlock(pte, ptl);
32061+ return 0;
32062+ }
32063+
32064+#ifdef CONFIG_SMP
32065+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32066+#else
32067+ if (likely(address > get_limit(regs->cs)))
32068+#endif
32069+ {
32070+ set_pte(pte, pte_mkread(*pte));
32071+ __flush_tlb_one(address);
32072+ pte_unmap_unlock(pte, ptl);
32073+ up_read(&mm->mmap_sem);
32074+ return 1;
32075+ }
32076+
32077+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32078+
32079+ /*
32080+ * PaX: fill DTLB with user rights and retry
32081+ */
32082+ __asm__ __volatile__ (
32083+ "orb %2,(%1)\n"
32084+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32085+/*
32086+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32087+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32088+ * page fault when examined during a TLB load attempt. this is true not only
32089+ * for PTEs holding a non-present entry but also present entries that will
32090+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32091+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32092+ * for our target pages since their PTEs are simply not in the TLBs at all.
32093+
32094+ * the best thing in omitting it is that we gain around 15-20% speed in the
32095+ * fast path of the page fault handler and can get rid of tracing since we
32096+ * can no longer flush unintended entries.
32097+ */
32098+ "invlpg (%0)\n"
32099+#endif
32100+ __copyuser_seg"testb $0,(%0)\n"
32101+ "xorb %3,(%1)\n"
32102+ :
32103+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32104+ : "memory", "cc");
32105+ pte_unmap_unlock(pte, ptl);
32106+ up_read(&mm->mmap_sem);
32107+ return 1;
32108+}
32109+#endif
32110+
32111 /*
32112 * Handle a spurious fault caused by a stale TLB entry.
32113 *
32114@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32115 static inline int
32116 access_error(unsigned long error_code, struct vm_area_struct *vma)
32117 {
32118+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32119+ return 1;
32120+
32121 if (error_code & PF_WRITE) {
32122 /* write, present and write, not present: */
32123 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32124@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32125 if (error_code & PF_USER)
32126 return false;
32127
32128- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32129+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32130 return false;
32131
32132 return true;
32133@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32134 tsk = current;
32135 mm = tsk->mm;
32136
32137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32138+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32139+ if (!search_exception_tables(regs->ip)) {
32140+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32141+ bad_area_nosemaphore(regs, error_code, address);
32142+ return;
32143+ }
32144+ if (address < pax_user_shadow_base) {
32145+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32146+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32147+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32148+ } else
32149+ address -= pax_user_shadow_base;
32150+ }
32151+#endif
32152+
32153 /*
32154 * Detect and handle instructions that would cause a page fault for
32155 * both a tracked kernel page and a userspace page.
32156@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32157 * User-mode registers count as a user access even for any
32158 * potential system fault or CPU buglet:
32159 */
32160- if (user_mode_vm(regs)) {
32161+ if (user_mode(regs)) {
32162 local_irq_enable();
32163 error_code |= PF_USER;
32164 flags |= FAULT_FLAG_USER;
32165@@ -1187,6 +1411,11 @@ retry:
32166 might_sleep();
32167 }
32168
32169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32170+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32171+ return;
32172+#endif
32173+
32174 vma = find_vma(mm, address);
32175 if (unlikely(!vma)) {
32176 bad_area(regs, error_code, address);
32177@@ -1198,18 +1427,24 @@ retry:
32178 bad_area(regs, error_code, address);
32179 return;
32180 }
32181- if (error_code & PF_USER) {
32182- /*
32183- * Accessing the stack below %sp is always a bug.
32184- * The large cushion allows instructions like enter
32185- * and pusha to work. ("enter $65535, $31" pushes
32186- * 32 pointers and then decrements %sp by 65535.)
32187- */
32188- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32189- bad_area(regs, error_code, address);
32190- return;
32191- }
32192+ /*
32193+ * Accessing the stack below %sp is always a bug.
32194+ * The large cushion allows instructions like enter
32195+ * and pusha to work. ("enter $65535, $31" pushes
32196+ * 32 pointers and then decrements %sp by 65535.)
32197+ */
32198+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32199+ bad_area(regs, error_code, address);
32200+ return;
32201 }
32202+
32203+#ifdef CONFIG_PAX_SEGMEXEC
32204+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32205+ bad_area(regs, error_code, address);
32206+ return;
32207+ }
32208+#endif
32209+
32210 if (unlikely(expand_stack(vma, address))) {
32211 bad_area(regs, error_code, address);
32212 return;
32213@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32214 }
32215 NOKPROBE_SYMBOL(trace_do_page_fault);
32216 #endif /* CONFIG_TRACING */
32217+
32218+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32219+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32220+{
32221+ struct mm_struct *mm = current->mm;
32222+ unsigned long ip = regs->ip;
32223+
32224+ if (v8086_mode(regs))
32225+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32226+
32227+#ifdef CONFIG_PAX_PAGEEXEC
32228+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32229+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32230+ return true;
32231+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32232+ return true;
32233+ return false;
32234+ }
32235+#endif
32236+
32237+#ifdef CONFIG_PAX_SEGMEXEC
32238+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32239+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32240+ return true;
32241+ return false;
32242+ }
32243+#endif
32244+
32245+ return false;
32246+}
32247+#endif
32248+
32249+#ifdef CONFIG_PAX_EMUTRAMP
32250+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32251+{
32252+ int err;
32253+
32254+ do { /* PaX: libffi trampoline emulation */
32255+ unsigned char mov, jmp;
32256+ unsigned int addr1, addr2;
32257+
32258+#ifdef CONFIG_X86_64
32259+ if ((regs->ip + 9) >> 32)
32260+ break;
32261+#endif
32262+
32263+ err = get_user(mov, (unsigned char __user *)regs->ip);
32264+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32265+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32266+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32267+
32268+ if (err)
32269+ break;
32270+
32271+ if (mov == 0xB8 && jmp == 0xE9) {
32272+ regs->ax = addr1;
32273+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32274+ return 2;
32275+ }
32276+ } while (0);
32277+
32278+ do { /* PaX: gcc trampoline emulation #1 */
32279+ unsigned char mov1, mov2;
32280+ unsigned short jmp;
32281+ unsigned int addr1, addr2;
32282+
32283+#ifdef CONFIG_X86_64
32284+ if ((regs->ip + 11) >> 32)
32285+ break;
32286+#endif
32287+
32288+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32289+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32290+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32291+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32292+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32293+
32294+ if (err)
32295+ break;
32296+
32297+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32298+ regs->cx = addr1;
32299+ regs->ax = addr2;
32300+ regs->ip = addr2;
32301+ return 2;
32302+ }
32303+ } while (0);
32304+
32305+ do { /* PaX: gcc trampoline emulation #2 */
32306+ unsigned char mov, jmp;
32307+ unsigned int addr1, addr2;
32308+
32309+#ifdef CONFIG_X86_64
32310+ if ((regs->ip + 9) >> 32)
32311+ break;
32312+#endif
32313+
32314+ err = get_user(mov, (unsigned char __user *)regs->ip);
32315+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32316+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32317+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32318+
32319+ if (err)
32320+ break;
32321+
32322+ if (mov == 0xB9 && jmp == 0xE9) {
32323+ regs->cx = addr1;
32324+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32325+ return 2;
32326+ }
32327+ } while (0);
32328+
32329+ return 1; /* PaX in action */
32330+}
32331+
32332+#ifdef CONFIG_X86_64
32333+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32334+{
32335+ int err;
32336+
32337+ do { /* PaX: libffi trampoline emulation */
32338+ unsigned short mov1, mov2, jmp1;
32339+ unsigned char stcclc, jmp2;
32340+ unsigned long addr1, addr2;
32341+
32342+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32343+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32344+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32345+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32346+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32347+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32348+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32349+
32350+ if (err)
32351+ break;
32352+
32353+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32354+ regs->r11 = addr1;
32355+ regs->r10 = addr2;
32356+ if (stcclc == 0xF8)
32357+ regs->flags &= ~X86_EFLAGS_CF;
32358+ else
32359+ regs->flags |= X86_EFLAGS_CF;
32360+ regs->ip = addr1;
32361+ return 2;
32362+ }
32363+ } while (0);
32364+
32365+ do { /* PaX: gcc trampoline emulation #1 */
32366+ unsigned short mov1, mov2, jmp1;
32367+ unsigned char jmp2;
32368+ unsigned int addr1;
32369+ unsigned long addr2;
32370+
32371+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32372+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32373+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32374+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32375+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32376+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32377+
32378+ if (err)
32379+ break;
32380+
32381+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32382+ regs->r11 = addr1;
32383+ regs->r10 = addr2;
32384+ regs->ip = addr1;
32385+ return 2;
32386+ }
32387+ } while (0);
32388+
32389+ do { /* PaX: gcc trampoline emulation #2 */
32390+ unsigned short mov1, mov2, jmp1;
32391+ unsigned char jmp2;
32392+ unsigned long addr1, addr2;
32393+
32394+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32395+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32396+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32397+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32398+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32399+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32400+
32401+ if (err)
32402+ break;
32403+
32404+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32405+ regs->r11 = addr1;
32406+ regs->r10 = addr2;
32407+ regs->ip = addr1;
32408+ return 2;
32409+ }
32410+ } while (0);
32411+
32412+ return 1; /* PaX in action */
32413+}
32414+#endif
32415+
32416+/*
32417+ * PaX: decide what to do with offenders (regs->ip = fault address)
32418+ *
32419+ * returns 1 when task should be killed
32420+ * 2 when gcc trampoline was detected
32421+ */
32422+static int pax_handle_fetch_fault(struct pt_regs *regs)
32423+{
32424+ if (v8086_mode(regs))
32425+ return 1;
32426+
32427+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32428+ return 1;
32429+
32430+#ifdef CONFIG_X86_32
32431+ return pax_handle_fetch_fault_32(regs);
32432+#else
32433+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32434+ return pax_handle_fetch_fault_32(regs);
32435+ else
32436+ return pax_handle_fetch_fault_64(regs);
32437+#endif
32438+}
32439+#endif
32440+
32441+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32442+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32443+{
32444+ long i;
32445+
32446+ printk(KERN_ERR "PAX: bytes at PC: ");
32447+ for (i = 0; i < 20; i++) {
32448+ unsigned char c;
32449+ if (get_user(c, (unsigned char __force_user *)pc+i))
32450+ printk(KERN_CONT "?? ");
32451+ else
32452+ printk(KERN_CONT "%02x ", c);
32453+ }
32454+ printk("\n");
32455+
32456+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32457+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32458+ unsigned long c;
32459+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32460+#ifdef CONFIG_X86_32
32461+ printk(KERN_CONT "???????? ");
32462+#else
32463+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32464+ printk(KERN_CONT "???????? ???????? ");
32465+ else
32466+ printk(KERN_CONT "???????????????? ");
32467+#endif
32468+ } else {
32469+#ifdef CONFIG_X86_64
32470+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32471+ printk(KERN_CONT "%08x ", (unsigned int)c);
32472+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32473+ } else
32474+#endif
32475+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32476+ }
32477+ }
32478+ printk("\n");
32479+}
32480+#endif
32481+
32482+/**
32483+ * probe_kernel_write(): safely attempt to write to a location
32484+ * @dst: address to write to
32485+ * @src: pointer to the data that shall be written
32486+ * @size: size of the data chunk
32487+ *
32488+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32489+ * happens, handle that and return -EFAULT.
32490+ */
32491+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32492+{
32493+ long ret;
32494+ mm_segment_t old_fs = get_fs();
32495+
32496+ set_fs(KERNEL_DS);
32497+ pagefault_disable();
32498+ pax_open_kernel();
32499+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32500+ pax_close_kernel();
32501+ pagefault_enable();
32502+ set_fs(old_fs);
32503+
32504+ return ret ? -EFAULT : 0;
32505+}
32506diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32507index 81bf3d2..7ef25c2 100644
32508--- a/arch/x86/mm/gup.c
32509+++ b/arch/x86/mm/gup.c
32510@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32511 addr = start;
32512 len = (unsigned long) nr_pages << PAGE_SHIFT;
32513 end = start + len;
32514- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32515+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32516 (void __user *)start, len)))
32517 return 0;
32518
32519@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32520 goto slow_irqon;
32521 #endif
32522
32523+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32524+ (void __user *)start, len)))
32525+ return 0;
32526+
32527 /*
32528 * XXX: batch / limit 'nr', to avoid large irq off latency
32529 * needs some instrumenting to determine the common sizes used by
32530diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32531index 4500142..53a363c 100644
32532--- a/arch/x86/mm/highmem_32.c
32533+++ b/arch/x86/mm/highmem_32.c
32534@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32535 idx = type + KM_TYPE_NR*smp_processor_id();
32536 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32537 BUG_ON(!pte_none(*(kmap_pte-idx)));
32538+
32539+ pax_open_kernel();
32540 set_pte(kmap_pte-idx, mk_pte(page, prot));
32541+ pax_close_kernel();
32542+
32543 arch_flush_lazy_mmu_mode();
32544
32545 return (void *)vaddr;
32546diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32547index 42982b2..7168fc3 100644
32548--- a/arch/x86/mm/hugetlbpage.c
32549+++ b/arch/x86/mm/hugetlbpage.c
32550@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32551 #ifdef CONFIG_HUGETLB_PAGE
32552 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32553 unsigned long addr, unsigned long len,
32554- unsigned long pgoff, unsigned long flags)
32555+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32556 {
32557 struct hstate *h = hstate_file(file);
32558 struct vm_unmapped_area_info info;
32559-
32560+
32561 info.flags = 0;
32562 info.length = len;
32563 info.low_limit = current->mm->mmap_legacy_base;
32564 info.high_limit = TASK_SIZE;
32565 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32566 info.align_offset = 0;
32567+ info.threadstack_offset = offset;
32568 return vm_unmapped_area(&info);
32569 }
32570
32571 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32572 unsigned long addr0, unsigned long len,
32573- unsigned long pgoff, unsigned long flags)
32574+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32575 {
32576 struct hstate *h = hstate_file(file);
32577 struct vm_unmapped_area_info info;
32578@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32579 info.high_limit = current->mm->mmap_base;
32580 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32581 info.align_offset = 0;
32582+ info.threadstack_offset = offset;
32583 addr = vm_unmapped_area(&info);
32584
32585 /*
32586@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32587 VM_BUG_ON(addr != -ENOMEM);
32588 info.flags = 0;
32589 info.low_limit = TASK_UNMAPPED_BASE;
32590+
32591+#ifdef CONFIG_PAX_RANDMMAP
32592+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32593+ info.low_limit += current->mm->delta_mmap;
32594+#endif
32595+
32596 info.high_limit = TASK_SIZE;
32597 addr = vm_unmapped_area(&info);
32598 }
32599@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32600 struct hstate *h = hstate_file(file);
32601 struct mm_struct *mm = current->mm;
32602 struct vm_area_struct *vma;
32603+ unsigned long pax_task_size = TASK_SIZE;
32604+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32605
32606 if (len & ~huge_page_mask(h))
32607 return -EINVAL;
32608- if (len > TASK_SIZE)
32609+
32610+#ifdef CONFIG_PAX_SEGMEXEC
32611+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32612+ pax_task_size = SEGMEXEC_TASK_SIZE;
32613+#endif
32614+
32615+ pax_task_size -= PAGE_SIZE;
32616+
32617+ if (len > pax_task_size)
32618 return -ENOMEM;
32619
32620 if (flags & MAP_FIXED) {
32621@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32622 return addr;
32623 }
32624
32625+#ifdef CONFIG_PAX_RANDMMAP
32626+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32627+#endif
32628+
32629 if (addr) {
32630 addr = ALIGN(addr, huge_page_size(h));
32631 vma = find_vma(mm, addr);
32632- if (TASK_SIZE - len >= addr &&
32633- (!vma || addr + len <= vma->vm_start))
32634+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32635 return addr;
32636 }
32637 if (mm->get_unmapped_area == arch_get_unmapped_area)
32638 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32639- pgoff, flags);
32640+ pgoff, flags, offset);
32641 else
32642 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32643- pgoff, flags);
32644+ pgoff, flags, offset);
32645 }
32646 #endif /* CONFIG_HUGETLB_PAGE */
32647
32648diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32649index a110efc..a31a18f 100644
32650--- a/arch/x86/mm/init.c
32651+++ b/arch/x86/mm/init.c
32652@@ -4,6 +4,7 @@
32653 #include <linux/swap.h>
32654 #include <linux/memblock.h>
32655 #include <linux/bootmem.h> /* for max_low_pfn */
32656+#include <linux/tboot.h>
32657
32658 #include <asm/cacheflush.h>
32659 #include <asm/e820.h>
32660@@ -17,6 +18,8 @@
32661 #include <asm/proto.h>
32662 #include <asm/dma.h> /* for MAX_DMA_PFN */
32663 #include <asm/microcode.h>
32664+#include <asm/desc.h>
32665+#include <asm/bios_ebda.h>
32666
32667 /*
32668 * We need to define the tracepoints somewhere, and tlb.c
32669@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32670 early_ioremap_page_table_range_init();
32671 #endif
32672
32673+#ifdef CONFIG_PAX_PER_CPU_PGD
32674+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32675+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32676+ KERNEL_PGD_PTRS);
32677+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32678+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32679+ KERNEL_PGD_PTRS);
32680+ load_cr3(get_cpu_pgd(0, kernel));
32681+#else
32682 load_cr3(swapper_pg_dir);
32683+#endif
32684+
32685 __flush_tlb_all();
32686
32687 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32688@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32689 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32690 * mmio resources as well as potential bios/acpi data regions.
32691 */
32692+
32693+#ifdef CONFIG_GRKERNSEC_KMEM
32694+static unsigned int ebda_start __read_only;
32695+static unsigned int ebda_end __read_only;
32696+#endif
32697+
32698 int devmem_is_allowed(unsigned long pagenr)
32699 {
32700- if (pagenr < 256)
32701+#ifdef CONFIG_GRKERNSEC_KMEM
32702+ /* allow BDA */
32703+ if (!pagenr)
32704 return 1;
32705+ /* allow EBDA */
32706+ if (pagenr >= ebda_start && pagenr < ebda_end)
32707+ return 1;
32708+ /* if tboot is in use, allow access to its hardcoded serial log range */
32709+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32710+ return 1;
32711+#else
32712+ if (!pagenr)
32713+ return 1;
32714+#ifdef CONFIG_VM86
32715+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32716+ return 1;
32717+#endif
32718+#endif
32719+
32720+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32721+ return 1;
32722+#ifdef CONFIG_GRKERNSEC_KMEM
32723+ /* throw out everything else below 1MB */
32724+ if (pagenr <= 256)
32725+ return 0;
32726+#endif
32727 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32728 return 0;
32729 if (!page_is_ram(pagenr))
32730@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32731 #endif
32732 }
32733
32734+#ifdef CONFIG_GRKERNSEC_KMEM
32735+static inline void gr_init_ebda(void)
32736+{
32737+ unsigned int ebda_addr;
32738+ unsigned int ebda_size = 0;
32739+
32740+ ebda_addr = get_bios_ebda();
32741+ if (ebda_addr) {
32742+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32743+ ebda_size <<= 10;
32744+ }
32745+ if (ebda_addr && ebda_size) {
32746+ ebda_start = ebda_addr >> PAGE_SHIFT;
32747+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32748+ } else {
32749+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32750+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32751+ }
32752+}
32753+#else
32754+static inline void gr_init_ebda(void) { }
32755+#endif
32756+
32757 void free_initmem(void)
32758 {
32759+#ifdef CONFIG_PAX_KERNEXEC
32760+#ifdef CONFIG_X86_32
32761+ /* PaX: limit KERNEL_CS to actual size */
32762+ unsigned long addr, limit;
32763+ struct desc_struct d;
32764+ int cpu;
32765+#else
32766+ pgd_t *pgd;
32767+ pud_t *pud;
32768+ pmd_t *pmd;
32769+ unsigned long addr, end;
32770+#endif
32771+#endif
32772+
32773+ gr_init_ebda();
32774+
32775+#ifdef CONFIG_PAX_KERNEXEC
32776+#ifdef CONFIG_X86_32
32777+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32778+ limit = (limit - 1UL) >> PAGE_SHIFT;
32779+
32780+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32781+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32782+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32783+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32784+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32785+ }
32786+
32787+ /* PaX: make KERNEL_CS read-only */
32788+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32789+ if (!paravirt_enabled())
32790+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32791+/*
32792+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32793+ pgd = pgd_offset_k(addr);
32794+ pud = pud_offset(pgd, addr);
32795+ pmd = pmd_offset(pud, addr);
32796+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32797+ }
32798+*/
32799+#ifdef CONFIG_X86_PAE
32800+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32801+/*
32802+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32803+ pgd = pgd_offset_k(addr);
32804+ pud = pud_offset(pgd, addr);
32805+ pmd = pmd_offset(pud, addr);
32806+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32807+ }
32808+*/
32809+#endif
32810+
32811+#ifdef CONFIG_MODULES
32812+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32813+#endif
32814+
32815+#else
32816+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32817+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32818+ pgd = pgd_offset_k(addr);
32819+ pud = pud_offset(pgd, addr);
32820+ pmd = pmd_offset(pud, addr);
32821+ if (!pmd_present(*pmd))
32822+ continue;
32823+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32824+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32825+ else
32826+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32827+ }
32828+
32829+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32830+ end = addr + KERNEL_IMAGE_SIZE;
32831+ for (; addr < end; addr += PMD_SIZE) {
32832+ pgd = pgd_offset_k(addr);
32833+ pud = pud_offset(pgd, addr);
32834+ pmd = pmd_offset(pud, addr);
32835+ if (!pmd_present(*pmd))
32836+ continue;
32837+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32838+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32839+ }
32840+#endif
32841+
32842+ flush_tlb_all();
32843+#endif
32844+
32845 free_init_pages("unused kernel",
32846 (unsigned long)(&__init_begin),
32847 (unsigned long)(&__init_end));
32848diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32849index c8140e1..59257fc 100644
32850--- a/arch/x86/mm/init_32.c
32851+++ b/arch/x86/mm/init_32.c
32852@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32853 bool __read_mostly __vmalloc_start_set = false;
32854
32855 /*
32856- * Creates a middle page table and puts a pointer to it in the
32857- * given global directory entry. This only returns the gd entry
32858- * in non-PAE compilation mode, since the middle layer is folded.
32859- */
32860-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32861-{
32862- pud_t *pud;
32863- pmd_t *pmd_table;
32864-
32865-#ifdef CONFIG_X86_PAE
32866- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32867- pmd_table = (pmd_t *)alloc_low_page();
32868- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32869- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32870- pud = pud_offset(pgd, 0);
32871- BUG_ON(pmd_table != pmd_offset(pud, 0));
32872-
32873- return pmd_table;
32874- }
32875-#endif
32876- pud = pud_offset(pgd, 0);
32877- pmd_table = pmd_offset(pud, 0);
32878-
32879- return pmd_table;
32880-}
32881-
32882-/*
32883 * Create a page table and place a pointer to it in a middle page
32884 * directory entry:
32885 */
32886@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32887 pte_t *page_table = (pte_t *)alloc_low_page();
32888
32889 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32890+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32891+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32892+#else
32893 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32894+#endif
32895 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32896 }
32897
32898 return pte_offset_kernel(pmd, 0);
32899 }
32900
32901+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32902+{
32903+ pud_t *pud;
32904+ pmd_t *pmd_table;
32905+
32906+ pud = pud_offset(pgd, 0);
32907+ pmd_table = pmd_offset(pud, 0);
32908+
32909+ return pmd_table;
32910+}
32911+
32912 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32913 {
32914 int pgd_idx = pgd_index(vaddr);
32915@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32916 int pgd_idx, pmd_idx;
32917 unsigned long vaddr;
32918 pgd_t *pgd;
32919+ pud_t *pud;
32920 pmd_t *pmd;
32921 pte_t *pte = NULL;
32922 unsigned long count = page_table_range_init_count(start, end);
32923@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32924 pgd = pgd_base + pgd_idx;
32925
32926 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32927- pmd = one_md_table_init(pgd);
32928- pmd = pmd + pmd_index(vaddr);
32929+ pud = pud_offset(pgd, vaddr);
32930+ pmd = pmd_offset(pud, vaddr);
32931+
32932+#ifdef CONFIG_X86_PAE
32933+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32934+#endif
32935+
32936 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32937 pmd++, pmd_idx++) {
32938 pte = page_table_kmap_check(one_page_table_init(pmd),
32939@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32940 }
32941 }
32942
32943-static inline int is_kernel_text(unsigned long addr)
32944+static inline int is_kernel_text(unsigned long start, unsigned long end)
32945 {
32946- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32947- return 1;
32948- return 0;
32949+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32950+ end <= ktla_ktva((unsigned long)_stext)) &&
32951+ (start >= ktla_ktva((unsigned long)_einittext) ||
32952+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32953+
32954+#ifdef CONFIG_ACPI_SLEEP
32955+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32956+#endif
32957+
32958+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32959+ return 0;
32960+ return 1;
32961 }
32962
32963 /*
32964@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32965 unsigned long last_map_addr = end;
32966 unsigned long start_pfn, end_pfn;
32967 pgd_t *pgd_base = swapper_pg_dir;
32968- int pgd_idx, pmd_idx, pte_ofs;
32969+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32970 unsigned long pfn;
32971 pgd_t *pgd;
32972+ pud_t *pud;
32973 pmd_t *pmd;
32974 pte_t *pte;
32975 unsigned pages_2m, pages_4k;
32976@@ -291,8 +295,13 @@ repeat:
32977 pfn = start_pfn;
32978 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32979 pgd = pgd_base + pgd_idx;
32980- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32981- pmd = one_md_table_init(pgd);
32982+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32983+ pud = pud_offset(pgd, 0);
32984+ pmd = pmd_offset(pud, 0);
32985+
32986+#ifdef CONFIG_X86_PAE
32987+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32988+#endif
32989
32990 if (pfn >= end_pfn)
32991 continue;
32992@@ -304,14 +313,13 @@ repeat:
32993 #endif
32994 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32995 pmd++, pmd_idx++) {
32996- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32997+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32998
32999 /*
33000 * Map with big pages if possible, otherwise
33001 * create normal page tables:
33002 */
33003 if (use_pse) {
33004- unsigned int addr2;
33005 pgprot_t prot = PAGE_KERNEL_LARGE;
33006 /*
33007 * first pass will use the same initial
33008@@ -322,11 +330,7 @@ repeat:
33009 _PAGE_PSE);
33010
33011 pfn &= PMD_MASK >> PAGE_SHIFT;
33012- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33013- PAGE_OFFSET + PAGE_SIZE-1;
33014-
33015- if (is_kernel_text(addr) ||
33016- is_kernel_text(addr2))
33017+ if (is_kernel_text(address, address + PMD_SIZE))
33018 prot = PAGE_KERNEL_LARGE_EXEC;
33019
33020 pages_2m++;
33021@@ -343,7 +347,7 @@ repeat:
33022 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33023 pte += pte_ofs;
33024 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33025- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33026+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33027 pgprot_t prot = PAGE_KERNEL;
33028 /*
33029 * first pass will use the same initial
33030@@ -351,7 +355,7 @@ repeat:
33031 */
33032 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33033
33034- if (is_kernel_text(addr))
33035+ if (is_kernel_text(address, address + PAGE_SIZE))
33036 prot = PAGE_KERNEL_EXEC;
33037
33038 pages_4k++;
33039@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33040
33041 pud = pud_offset(pgd, va);
33042 pmd = pmd_offset(pud, va);
33043- if (!pmd_present(*pmd))
33044+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33045 break;
33046
33047 /* should not be large page here */
33048@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33049
33050 static void __init pagetable_init(void)
33051 {
33052- pgd_t *pgd_base = swapper_pg_dir;
33053-
33054- permanent_kmaps_init(pgd_base);
33055+ permanent_kmaps_init(swapper_pg_dir);
33056 }
33057
33058-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33059+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33060 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33061
33062 /* user-defined highmem size */
33063@@ -787,10 +789,10 @@ void __init mem_init(void)
33064 ((unsigned long)&__init_end -
33065 (unsigned long)&__init_begin) >> 10,
33066
33067- (unsigned long)&_etext, (unsigned long)&_edata,
33068- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33069+ (unsigned long)&_sdata, (unsigned long)&_edata,
33070+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33071
33072- (unsigned long)&_text, (unsigned long)&_etext,
33073+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33074 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33075
33076 /*
33077@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33078 if (!kernel_set_to_readonly)
33079 return;
33080
33081+ start = ktla_ktva(start);
33082 pr_debug("Set kernel text: %lx - %lx for read write\n",
33083 start, start+size);
33084
33085@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33086 if (!kernel_set_to_readonly)
33087 return;
33088
33089+ start = ktla_ktva(start);
33090 pr_debug("Set kernel text: %lx - %lx for read only\n",
33091 start, start+size);
33092
33093@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33094 unsigned long start = PFN_ALIGN(_text);
33095 unsigned long size = PFN_ALIGN(_etext) - start;
33096
33097+ start = ktla_ktva(start);
33098 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33099 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33100 size >> 10);
33101diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33102index 30eb05a..ae671ac 100644
33103--- a/arch/x86/mm/init_64.c
33104+++ b/arch/x86/mm/init_64.c
33105@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33106 * around without checking the pgd every time.
33107 */
33108
33109-pteval_t __supported_pte_mask __read_mostly = ~0;
33110+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33111 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33112
33113 int force_personality32;
33114@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33115
33116 for (address = start; address <= end; address += PGDIR_SIZE) {
33117 const pgd_t *pgd_ref = pgd_offset_k(address);
33118+
33119+#ifdef CONFIG_PAX_PER_CPU_PGD
33120+ unsigned long cpu;
33121+#else
33122 struct page *page;
33123+#endif
33124
33125 /*
33126 * When it is called after memory hot remove, pgd_none()
33127@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33128 continue;
33129
33130 spin_lock(&pgd_lock);
33131+
33132+#ifdef CONFIG_PAX_PER_CPU_PGD
33133+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33134+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33135+
33136+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33137+ BUG_ON(pgd_page_vaddr(*pgd)
33138+ != pgd_page_vaddr(*pgd_ref));
33139+
33140+ if (removed) {
33141+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33142+ pgd_clear(pgd);
33143+ } else {
33144+ if (pgd_none(*pgd))
33145+ set_pgd(pgd, *pgd_ref);
33146+ }
33147+
33148+ pgd = pgd_offset_cpu(cpu, kernel, address);
33149+#else
33150 list_for_each_entry(page, &pgd_list, lru) {
33151 pgd_t *pgd;
33152 spinlock_t *pgt_lock;
33153@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33154 /* the pgt_lock only for Xen */
33155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33156 spin_lock(pgt_lock);
33157+#endif
33158
33159 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33160 BUG_ON(pgd_page_vaddr(*pgd)
33161@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33162 set_pgd(pgd, *pgd_ref);
33163 }
33164
33165+#ifndef CONFIG_PAX_PER_CPU_PGD
33166 spin_unlock(pgt_lock);
33167+#endif
33168+
33169 }
33170 spin_unlock(&pgd_lock);
33171 }
33172@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33173 {
33174 if (pgd_none(*pgd)) {
33175 pud_t *pud = (pud_t *)spp_getpage();
33176- pgd_populate(&init_mm, pgd, pud);
33177+ pgd_populate_kernel(&init_mm, pgd, pud);
33178 if (pud != pud_offset(pgd, 0))
33179 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33180 pud, pud_offset(pgd, 0));
33181@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33182 {
33183 if (pud_none(*pud)) {
33184 pmd_t *pmd = (pmd_t *) spp_getpage();
33185- pud_populate(&init_mm, pud, pmd);
33186+ pud_populate_kernel(&init_mm, pud, pmd);
33187 if (pmd != pmd_offset(pud, 0))
33188 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33189 pmd, pmd_offset(pud, 0));
33190@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33191 pmd = fill_pmd(pud, vaddr);
33192 pte = fill_pte(pmd, vaddr);
33193
33194+ pax_open_kernel();
33195 set_pte(pte, new_pte);
33196+ pax_close_kernel();
33197
33198 /*
33199 * It's enough to flush this one mapping.
33200@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33201 pgd = pgd_offset_k((unsigned long)__va(phys));
33202 if (pgd_none(*pgd)) {
33203 pud = (pud_t *) spp_getpage();
33204- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33205- _PAGE_USER));
33206+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33207 }
33208 pud = pud_offset(pgd, (unsigned long)__va(phys));
33209 if (pud_none(*pud)) {
33210 pmd = (pmd_t *) spp_getpage();
33211- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33212- _PAGE_USER));
33213+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33214 }
33215 pmd = pmd_offset(pud, phys);
33216 BUG_ON(!pmd_none(*pmd));
33217@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33218 prot);
33219
33220 spin_lock(&init_mm.page_table_lock);
33221- pud_populate(&init_mm, pud, pmd);
33222+ pud_populate_kernel(&init_mm, pud, pmd);
33223 spin_unlock(&init_mm.page_table_lock);
33224 }
33225 __flush_tlb_all();
33226@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33227 page_size_mask);
33228
33229 spin_lock(&init_mm.page_table_lock);
33230- pgd_populate(&init_mm, pgd, pud);
33231+ pgd_populate_kernel(&init_mm, pgd, pud);
33232 spin_unlock(&init_mm.page_table_lock);
33233 pgd_changed = true;
33234 }
33235diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33236index 9ca35fc..4b2b7b7 100644
33237--- a/arch/x86/mm/iomap_32.c
33238+++ b/arch/x86/mm/iomap_32.c
33239@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33240 type = kmap_atomic_idx_push();
33241 idx = type + KM_TYPE_NR * smp_processor_id();
33242 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33243+
33244+ pax_open_kernel();
33245 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33246+ pax_close_kernel();
33247+
33248 arch_flush_lazy_mmu_mode();
33249
33250 return (void *)vaddr;
33251diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33252index fdf617c..b9e85bc 100644
33253--- a/arch/x86/mm/ioremap.c
33254+++ b/arch/x86/mm/ioremap.c
33255@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33256 unsigned long i;
33257
33258 for (i = 0; i < nr_pages; ++i)
33259- if (pfn_valid(start_pfn + i) &&
33260- !PageReserved(pfn_to_page(start_pfn + i)))
33261+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33262+ !PageReserved(pfn_to_page(start_pfn + i))))
33263 return 1;
33264
33265 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33266@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33267 *
33268 * Caller must ensure there is only one unmapping for the same pointer.
33269 */
33270-void iounmap(volatile void __iomem *addr)
33271+void iounmap(const volatile void __iomem *addr)
33272 {
33273 struct vm_struct *p, *o;
33274
33275@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33276 */
33277 void *xlate_dev_mem_ptr(phys_addr_t phys)
33278 {
33279- void *addr;
33280- unsigned long start = phys & PAGE_MASK;
33281-
33282 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33283- if (page_is_ram(start >> PAGE_SHIFT))
33284+ if (page_is_ram(phys >> PAGE_SHIFT))
33285+#ifdef CONFIG_HIGHMEM
33286+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33287+#endif
33288 return __va(phys);
33289
33290- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33291- if (addr)
33292- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33293-
33294- return addr;
33295+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33296 }
33297
33298 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33299 {
33300 if (page_is_ram(phys >> PAGE_SHIFT))
33301+#ifdef CONFIG_HIGHMEM
33302+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33303+#endif
33304 return;
33305
33306 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33307 return;
33308 }
33309
33310-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33311+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33312
33313 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33314 {
33315@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33316 early_ioremap_setup();
33317
33318 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33319- memset(bm_pte, 0, sizeof(bm_pte));
33320- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33321+ pmd_populate_user(&init_mm, pmd, bm_pte);
33322
33323 /*
33324 * The boot-ioremap range spans multiple pmds, for which
33325diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33326index b4f2e7e..96c9c3e 100644
33327--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33328+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33329@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33330 * memory (e.g. tracked pages)? For now, we need this to avoid
33331 * invoking kmemcheck for PnP BIOS calls.
33332 */
33333- if (regs->flags & X86_VM_MASK)
33334+ if (v8086_mode(regs))
33335 return false;
33336- if (regs->cs != __KERNEL_CS)
33337+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33338 return false;
33339
33340 pte = kmemcheck_pte_lookup(address);
33341diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33342index df4552b..12c129c 100644
33343--- a/arch/x86/mm/mmap.c
33344+++ b/arch/x86/mm/mmap.c
33345@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33346 * Leave an at least ~128 MB hole with possible stack randomization.
33347 */
33348 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33349-#define MAX_GAP (TASK_SIZE/6*5)
33350+#define MAX_GAP (pax_task_size/6*5)
33351
33352 static int mmap_is_legacy(void)
33353 {
33354@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33355 return rnd << PAGE_SHIFT;
33356 }
33357
33358-static unsigned long mmap_base(void)
33359+static unsigned long mmap_base(struct mm_struct *mm)
33360 {
33361 unsigned long gap = rlimit(RLIMIT_STACK);
33362+ unsigned long pax_task_size = TASK_SIZE;
33363+
33364+#ifdef CONFIG_PAX_SEGMEXEC
33365+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33366+ pax_task_size = SEGMEXEC_TASK_SIZE;
33367+#endif
33368
33369 if (gap < MIN_GAP)
33370 gap = MIN_GAP;
33371 else if (gap > MAX_GAP)
33372 gap = MAX_GAP;
33373
33374- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33375+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33376 }
33377
33378 /*
33379 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33380 * does, but not when emulating X86_32
33381 */
33382-static unsigned long mmap_legacy_base(void)
33383+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33384 {
33385- if (mmap_is_ia32())
33386+ if (mmap_is_ia32()) {
33387+
33388+#ifdef CONFIG_PAX_SEGMEXEC
33389+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33390+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33391+ else
33392+#endif
33393+
33394 return TASK_UNMAPPED_BASE;
33395- else
33396+ } else
33397 return TASK_UNMAPPED_BASE + mmap_rnd();
33398 }
33399
33400@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33401 */
33402 void arch_pick_mmap_layout(struct mm_struct *mm)
33403 {
33404- mm->mmap_legacy_base = mmap_legacy_base();
33405- mm->mmap_base = mmap_base();
33406+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33407+ mm->mmap_base = mmap_base(mm);
33408+
33409+#ifdef CONFIG_PAX_RANDMMAP
33410+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33411+ mm->mmap_legacy_base += mm->delta_mmap;
33412+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33413+ }
33414+#endif
33415
33416 if (mmap_is_legacy()) {
33417 mm->mmap_base = mm->mmap_legacy_base;
33418diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33419index 0057a7a..95c7edd 100644
33420--- a/arch/x86/mm/mmio-mod.c
33421+++ b/arch/x86/mm/mmio-mod.c
33422@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33423 break;
33424 default:
33425 {
33426- unsigned char *ip = (unsigned char *)instptr;
33427+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33428 my_trace->opcode = MMIO_UNKNOWN_OP;
33429 my_trace->width = 0;
33430 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33431@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33432 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33433 void __iomem *addr)
33434 {
33435- static atomic_t next_id;
33436+ static atomic_unchecked_t next_id;
33437 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33438 /* These are page-unaligned. */
33439 struct mmiotrace_map map = {
33440@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33441 .private = trace
33442 },
33443 .phys = offset,
33444- .id = atomic_inc_return(&next_id)
33445+ .id = atomic_inc_return_unchecked(&next_id)
33446 };
33447 map.map_id = trace->id;
33448
33449@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33450 ioremap_trace_core(offset, size, addr);
33451 }
33452
33453-static void iounmap_trace_core(volatile void __iomem *addr)
33454+static void iounmap_trace_core(const volatile void __iomem *addr)
33455 {
33456 struct mmiotrace_map map = {
33457 .phys = 0,
33458@@ -328,7 +328,7 @@ not_enabled:
33459 }
33460 }
33461
33462-void mmiotrace_iounmap(volatile void __iomem *addr)
33463+void mmiotrace_iounmap(const volatile void __iomem *addr)
33464 {
33465 might_sleep();
33466 if (is_enabled()) /* recheck and proper locking in *_core() */
33467diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33468index cd4785b..25188b6 100644
33469--- a/arch/x86/mm/numa.c
33470+++ b/arch/x86/mm/numa.c
33471@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33472 }
33473 }
33474
33475-static int __init numa_register_memblks(struct numa_meminfo *mi)
33476+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33477 {
33478 unsigned long uninitialized_var(pfn_align);
33479 int i, nid;
33480diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33481index 536ea2f..f42c293 100644
33482--- a/arch/x86/mm/pageattr.c
33483+++ b/arch/x86/mm/pageattr.c
33484@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33485 */
33486 #ifdef CONFIG_PCI_BIOS
33487 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33488- pgprot_val(forbidden) |= _PAGE_NX;
33489+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33490 #endif
33491
33492 /*
33493@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33494 * Does not cover __inittext since that is gone later on. On
33495 * 64bit we do not enforce !NX on the low mapping
33496 */
33497- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33498- pgprot_val(forbidden) |= _PAGE_NX;
33499+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33500+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33501
33502+#ifdef CONFIG_DEBUG_RODATA
33503 /*
33504 * The .rodata section needs to be read-only. Using the pfn
33505 * catches all aliases.
33506@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33507 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33508 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33509 pgprot_val(forbidden) |= _PAGE_RW;
33510+#endif
33511
33512 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33513 /*
33514@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33515 }
33516 #endif
33517
33518+#ifdef CONFIG_PAX_KERNEXEC
33519+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33520+ pgprot_val(forbidden) |= _PAGE_RW;
33521+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33522+ }
33523+#endif
33524+
33525 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33526
33527 return prot;
33528@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33529 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33530 {
33531 /* change init_mm */
33532+ pax_open_kernel();
33533 set_pte_atomic(kpte, pte);
33534+
33535 #ifdef CONFIG_X86_32
33536 if (!SHARED_KERNEL_PMD) {
33537+
33538+#ifdef CONFIG_PAX_PER_CPU_PGD
33539+ unsigned long cpu;
33540+#else
33541 struct page *page;
33542+#endif
33543
33544+#ifdef CONFIG_PAX_PER_CPU_PGD
33545+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33546+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33547+#else
33548 list_for_each_entry(page, &pgd_list, lru) {
33549- pgd_t *pgd;
33550+ pgd_t *pgd = (pgd_t *)page_address(page);
33551+#endif
33552+
33553 pud_t *pud;
33554 pmd_t *pmd;
33555
33556- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33557+ pgd += pgd_index(address);
33558 pud = pud_offset(pgd, address);
33559 pmd = pmd_offset(pud, address);
33560 set_pte_atomic((pte_t *)pmd, pte);
33561 }
33562 }
33563 #endif
33564+ pax_close_kernel();
33565 }
33566
33567 static int
33568diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33569index 7ac6869..c0ba541 100644
33570--- a/arch/x86/mm/pat.c
33571+++ b/arch/x86/mm/pat.c
33572@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33573 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33574
33575 if (pg_flags == _PGMT_DEFAULT)
33576- return -1;
33577+ return _PAGE_CACHE_MODE_NUM;
33578 else if (pg_flags == _PGMT_WC)
33579 return _PAGE_CACHE_MODE_WC;
33580 else if (pg_flags == _PGMT_UC_MINUS)
33581@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33582
33583 page = pfn_to_page(pfn);
33584 type = get_page_memtype(page);
33585- if (type != -1) {
33586+ if (type != _PAGE_CACHE_MODE_NUM) {
33587 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33588 start, end - 1, type, req_type);
33589 if (new_type)
33590@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33591
33592 if (!entry) {
33593 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33594- current->comm, current->pid, start, end - 1);
33595+ current->comm, task_pid_nr(current), start, end - 1);
33596 return -EINVAL;
33597 }
33598
33599@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33600 page = pfn_to_page(paddr >> PAGE_SHIFT);
33601 rettype = get_page_memtype(page);
33602 /*
33603- * -1 from get_page_memtype() implies RAM page is in its
33604+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33605 * default state and not reserved, and hence of type WB
33606 */
33607- if (rettype == -1)
33608+ if (rettype == _PAGE_CACHE_MODE_NUM)
33609 rettype = _PAGE_CACHE_MODE_WB;
33610
33611 return rettype;
33612@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33613
33614 while (cursor < to) {
33615 if (!devmem_is_allowed(pfn)) {
33616- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33617- current->comm, from, to - 1);
33618+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33619+ current->comm, from, to - 1, cursor);
33620 return 0;
33621 }
33622 cursor += PAGE_SIZE;
33623@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33624 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33625 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33626 "for [mem %#010Lx-%#010Lx]\n",
33627- current->comm, current->pid,
33628+ current->comm, task_pid_nr(current),
33629 cattr_name(pcm),
33630 base, (unsigned long long)(base + size-1));
33631 return -EINVAL;
33632@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33633 pcm = lookup_memtype(paddr);
33634 if (want_pcm != pcm) {
33635 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33636- current->comm, current->pid,
33637+ current->comm, task_pid_nr(current),
33638 cattr_name(want_pcm),
33639 (unsigned long long)paddr,
33640 (unsigned long long)(paddr + size - 1),
33641@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33642 free_memtype(paddr, paddr + size);
33643 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33644 " for [mem %#010Lx-%#010Lx], got %s\n",
33645- current->comm, current->pid,
33646+ current->comm, task_pid_nr(current),
33647 cattr_name(want_pcm),
33648 (unsigned long long)paddr,
33649 (unsigned long long)(paddr + size - 1),
33650diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33651index 6582adc..fcc5d0b 100644
33652--- a/arch/x86/mm/pat_rbtree.c
33653+++ b/arch/x86/mm/pat_rbtree.c
33654@@ -161,7 +161,7 @@ success:
33655
33656 failure:
33657 printk(KERN_INFO "%s:%d conflicting memory types "
33658- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33659+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33660 end, cattr_name(found_type), cattr_name(match->type));
33661 return -EBUSY;
33662 }
33663diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33664index 9f0614d..92ae64a 100644
33665--- a/arch/x86/mm/pf_in.c
33666+++ b/arch/x86/mm/pf_in.c
33667@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33668 int i;
33669 enum reason_type rv = OTHERS;
33670
33671- p = (unsigned char *)ins_addr;
33672+ p = (unsigned char *)ktla_ktva(ins_addr);
33673 p += skip_prefix(p, &prf);
33674 p += get_opcode(p, &opcode);
33675
33676@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33677 struct prefix_bits prf;
33678 int i;
33679
33680- p = (unsigned char *)ins_addr;
33681+ p = (unsigned char *)ktla_ktva(ins_addr);
33682 p += skip_prefix(p, &prf);
33683 p += get_opcode(p, &opcode);
33684
33685@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33686 struct prefix_bits prf;
33687 int i;
33688
33689- p = (unsigned char *)ins_addr;
33690+ p = (unsigned char *)ktla_ktva(ins_addr);
33691 p += skip_prefix(p, &prf);
33692 p += get_opcode(p, &opcode);
33693
33694@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33695 struct prefix_bits prf;
33696 int i;
33697
33698- p = (unsigned char *)ins_addr;
33699+ p = (unsigned char *)ktla_ktva(ins_addr);
33700 p += skip_prefix(p, &prf);
33701 p += get_opcode(p, &opcode);
33702 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33703@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33704 struct prefix_bits prf;
33705 int i;
33706
33707- p = (unsigned char *)ins_addr;
33708+ p = (unsigned char *)ktla_ktva(ins_addr);
33709 p += skip_prefix(p, &prf);
33710 p += get_opcode(p, &opcode);
33711 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33712diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33713index 7b22ada..b11e66f 100644
33714--- a/arch/x86/mm/pgtable.c
33715+++ b/arch/x86/mm/pgtable.c
33716@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33717 list_del(&page->lru);
33718 }
33719
33720-#define UNSHARED_PTRS_PER_PGD \
33721- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33722+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33723+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33724
33725+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33726+{
33727+ unsigned int count = USER_PGD_PTRS;
33728
33729+ if (!pax_user_shadow_base)
33730+ return;
33731+
33732+ while (count--)
33733+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33734+}
33735+#endif
33736+
33737+#ifdef CONFIG_PAX_PER_CPU_PGD
33738+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33739+{
33740+ unsigned int count = USER_PGD_PTRS;
33741+
33742+ while (count--) {
33743+ pgd_t pgd;
33744+
33745+#ifdef CONFIG_X86_64
33746+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33747+#else
33748+ pgd = *src++;
33749+#endif
33750+
33751+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33752+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33753+#endif
33754+
33755+ *dst++ = pgd;
33756+ }
33757+
33758+}
33759+#endif
33760+
33761+#ifdef CONFIG_X86_64
33762+#define pxd_t pud_t
33763+#define pyd_t pgd_t
33764+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33765+#define pgtable_pxd_page_ctor(page) true
33766+#define pgtable_pxd_page_dtor(page) do {} while (0)
33767+#define pxd_free(mm, pud) pud_free((mm), (pud))
33768+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33769+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33770+#define PYD_SIZE PGDIR_SIZE
33771+#define mm_inc_nr_pxds(mm) do {} while (0)
33772+#define mm_dec_nr_pxds(mm) do {} while (0)
33773+#else
33774+#define pxd_t pmd_t
33775+#define pyd_t pud_t
33776+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33777+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33778+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33779+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33780+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33781+#define pyd_offset(mm, address) pud_offset((mm), (address))
33782+#define PYD_SIZE PUD_SIZE
33783+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33784+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33785+#endif
33786+
33787+#ifdef CONFIG_PAX_PER_CPU_PGD
33788+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33789+static inline void pgd_dtor(pgd_t *pgd) {}
33790+#else
33791 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33792 {
33793 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33794@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33795 pgd_list_del(pgd);
33796 spin_unlock(&pgd_lock);
33797 }
33798+#endif
33799
33800 /*
33801 * List of all pgd's needed for non-PAE so it can invalidate entries
33802@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33803 * -- nyc
33804 */
33805
33806-#ifdef CONFIG_X86_PAE
33807+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33808 /*
33809 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33810 * updating the top-level pagetable entries to guarantee the
33811@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33812 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33813 * and initialize the kernel pmds here.
33814 */
33815-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33816+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33817
33818 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33819 {
33820@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33821 */
33822 flush_tlb_mm(mm);
33823 }
33824+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33825+#define PREALLOCATED_PXDS USER_PGD_PTRS
33826 #else /* !CONFIG_X86_PAE */
33827
33828 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33829-#define PREALLOCATED_PMDS 0
33830+#define PREALLOCATED_PXDS 0
33831
33832 #endif /* CONFIG_X86_PAE */
33833
33834-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33835+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33836 {
33837 int i;
33838
33839- for(i = 0; i < PREALLOCATED_PMDS; i++)
33840- if (pmds[i]) {
33841- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33842- free_page((unsigned long)pmds[i]);
33843- mm_dec_nr_pmds(mm);
33844+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33845+ if (pxds[i]) {
33846+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33847+ free_page((unsigned long)pxds[i]);
33848+ mm_dec_nr_pxds(mm);
33849 }
33850 }
33851
33852-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33853+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33854 {
33855 int i;
33856 bool failed = false;
33857
33858- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33859- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33860- if (!pmd)
33861+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33862+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33863+ if (!pxd)
33864 failed = true;
33865- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33866- free_page((unsigned long)pmd);
33867- pmd = NULL;
33868+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33869+ free_page((unsigned long)pxd);
33870+ pxd = NULL;
33871 failed = true;
33872 }
33873- if (pmd)
33874- mm_inc_nr_pmds(mm);
33875- pmds[i] = pmd;
33876+ if (pxd)
33877+ mm_inc_nr_pxds(mm);
33878+ pxds[i] = pxd;
33879 }
33880
33881 if (failed) {
33882- free_pmds(mm, pmds);
33883+ free_pxds(mm, pxds);
33884 return -ENOMEM;
33885 }
33886
33887@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33888 * preallocate which never got a corresponding vma will need to be
33889 * freed manually.
33890 */
33891-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33892+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33893 {
33894 int i;
33895
33896- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33897+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33898 pgd_t pgd = pgdp[i];
33899
33900 if (pgd_val(pgd) != 0) {
33901- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33902+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33903
33904- pgdp[i] = native_make_pgd(0);
33905+ set_pgd(pgdp + i, native_make_pgd(0));
33906
33907- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33908- pmd_free(mm, pmd);
33909- mm_dec_nr_pmds(mm);
33910+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33911+ pxd_free(mm, pxd);
33912+ mm_dec_nr_pxds(mm);
33913 }
33914 }
33915 }
33916
33917-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33918+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33919 {
33920- pud_t *pud;
33921+ pyd_t *pyd;
33922 int i;
33923
33924- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33925+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33926 return;
33927
33928- pud = pud_offset(pgd, 0);
33929+#ifdef CONFIG_X86_64
33930+ pyd = pyd_offset(mm, 0L);
33931+#else
33932+ pyd = pyd_offset(pgd, 0L);
33933+#endif
33934
33935- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33936- pmd_t *pmd = pmds[i];
33937+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33938+ pxd_t *pxd = pxds[i];
33939
33940 if (i >= KERNEL_PGD_BOUNDARY)
33941- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33942- sizeof(pmd_t) * PTRS_PER_PMD);
33943+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33944+ sizeof(pxd_t) * PTRS_PER_PMD);
33945
33946- pud_populate(mm, pud, pmd);
33947+ pyd_populate(mm, pyd, pxd);
33948 }
33949 }
33950
33951 pgd_t *pgd_alloc(struct mm_struct *mm)
33952 {
33953 pgd_t *pgd;
33954- pmd_t *pmds[PREALLOCATED_PMDS];
33955+ pxd_t *pxds[PREALLOCATED_PXDS];
33956
33957 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33958
33959@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33960
33961 mm->pgd = pgd;
33962
33963- if (preallocate_pmds(mm, pmds) != 0)
33964+ if (preallocate_pxds(mm, pxds) != 0)
33965 goto out_free_pgd;
33966
33967 if (paravirt_pgd_alloc(mm) != 0)
33968- goto out_free_pmds;
33969+ goto out_free_pxds;
33970
33971 /*
33972 * Make sure that pre-populating the pmds is atomic with
33973@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33974 spin_lock(&pgd_lock);
33975
33976 pgd_ctor(mm, pgd);
33977- pgd_prepopulate_pmd(mm, pgd, pmds);
33978+ pgd_prepopulate_pxd(mm, pgd, pxds);
33979
33980 spin_unlock(&pgd_lock);
33981
33982 return pgd;
33983
33984-out_free_pmds:
33985- free_pmds(mm, pmds);
33986+out_free_pxds:
33987+ free_pxds(mm, pxds);
33988 out_free_pgd:
33989 free_page((unsigned long)pgd);
33990 out:
33991@@ -317,7 +389,7 @@ out:
33992
33993 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33994 {
33995- pgd_mop_up_pmds(mm, pgd);
33996+ pgd_mop_up_pxds(mm, pgd);
33997 pgd_dtor(pgd);
33998 paravirt_pgd_free(mm, pgd);
33999 free_page((unsigned long)pgd);
34000diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34001index 75cc097..79a097f 100644
34002--- a/arch/x86/mm/pgtable_32.c
34003+++ b/arch/x86/mm/pgtable_32.c
34004@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34005 return;
34006 }
34007 pte = pte_offset_kernel(pmd, vaddr);
34008+
34009+ pax_open_kernel();
34010 if (pte_val(pteval))
34011 set_pte_at(&init_mm, vaddr, pte, pteval);
34012 else
34013 pte_clear(&init_mm, vaddr, pte);
34014+ pax_close_kernel();
34015
34016 /*
34017 * It's enough to flush this one mapping.
34018diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34019index e666cbb..61788c45 100644
34020--- a/arch/x86/mm/physaddr.c
34021+++ b/arch/x86/mm/physaddr.c
34022@@ -10,7 +10,7 @@
34023 #ifdef CONFIG_X86_64
34024
34025 #ifdef CONFIG_DEBUG_VIRTUAL
34026-unsigned long __phys_addr(unsigned long x)
34027+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34028 {
34029 unsigned long y = x - __START_KERNEL_map;
34030
34031@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34032 #else
34033
34034 #ifdef CONFIG_DEBUG_VIRTUAL
34035-unsigned long __phys_addr(unsigned long x)
34036+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34037 {
34038 unsigned long phys_addr = x - PAGE_OFFSET;
34039 /* VMALLOC_* aren't constants */
34040diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34041index 90555bf..f5f1828 100644
34042--- a/arch/x86/mm/setup_nx.c
34043+++ b/arch/x86/mm/setup_nx.c
34044@@ -5,8 +5,10 @@
34045 #include <asm/pgtable.h>
34046 #include <asm/proto.h>
34047
34048+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34049 static int disable_nx;
34050
34051+#ifndef CONFIG_PAX_PAGEEXEC
34052 /*
34053 * noexec = on|off
34054 *
34055@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34056 return 0;
34057 }
34058 early_param("noexec", noexec_setup);
34059+#endif
34060+
34061+#endif
34062
34063 void x86_configure_nx(void)
34064 {
34065+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34066 if (cpu_has_nx && !disable_nx)
34067 __supported_pte_mask |= _PAGE_NX;
34068 else
34069+#endif
34070 __supported_pte_mask &= ~_PAGE_NX;
34071 }
34072
34073diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34074index 3250f23..7a97ba2 100644
34075--- a/arch/x86/mm/tlb.c
34076+++ b/arch/x86/mm/tlb.c
34077@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34078 BUG();
34079 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34080 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34081+
34082+#ifndef CONFIG_PAX_PER_CPU_PGD
34083 load_cr3(swapper_pg_dir);
34084+#endif
34085+
34086 /*
34087 * This gets called in the idle path where RCU
34088 * functions differently. Tracing normally
34089diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34090new file mode 100644
34091index 0000000..dace51c
34092--- /dev/null
34093+++ b/arch/x86/mm/uderef_64.c
34094@@ -0,0 +1,37 @@
34095+#include <linux/mm.h>
34096+#include <asm/pgtable.h>
34097+#include <asm/uaccess.h>
34098+
34099+#ifdef CONFIG_PAX_MEMORY_UDEREF
34100+/* PaX: due to the special call convention these functions must
34101+ * - remain leaf functions under all configurations,
34102+ * - never be called directly, only dereferenced from the wrappers.
34103+ */
34104+void __pax_open_userland(void)
34105+{
34106+ unsigned int cpu;
34107+
34108+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34109+ return;
34110+
34111+ cpu = raw_get_cpu();
34112+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34113+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34114+ raw_put_cpu_no_resched();
34115+}
34116+EXPORT_SYMBOL(__pax_open_userland);
34117+
34118+void __pax_close_userland(void)
34119+{
34120+ unsigned int cpu;
34121+
34122+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34123+ return;
34124+
34125+ cpu = raw_get_cpu();
34126+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34127+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34128+ raw_put_cpu_no_resched();
34129+}
34130+EXPORT_SYMBOL(__pax_close_userland);
34131+#endif
34132diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34133index 6440221..f84b5c7 100644
34134--- a/arch/x86/net/bpf_jit.S
34135+++ b/arch/x86/net/bpf_jit.S
34136@@ -9,6 +9,7 @@
34137 */
34138 #include <linux/linkage.h>
34139 #include <asm/dwarf2.h>
34140+#include <asm/alternative-asm.h>
34141
34142 /*
34143 * Calling convention :
34144@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34145 jle bpf_slow_path_word
34146 mov (SKBDATA,%rsi),%eax
34147 bswap %eax /* ntohl() */
34148+ pax_force_retaddr
34149 ret
34150
34151 sk_load_half:
34152@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34153 jle bpf_slow_path_half
34154 movzwl (SKBDATA,%rsi),%eax
34155 rol $8,%ax # ntohs()
34156+ pax_force_retaddr
34157 ret
34158
34159 sk_load_byte:
34160@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34161 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34162 jle bpf_slow_path_byte
34163 movzbl (SKBDATA,%rsi),%eax
34164+ pax_force_retaddr
34165 ret
34166
34167 /* rsi contains offset and can be scratched */
34168@@ -90,6 +94,7 @@ bpf_slow_path_word:
34169 js bpf_error
34170 mov - MAX_BPF_STACK + 32(%rbp),%eax
34171 bswap %eax
34172+ pax_force_retaddr
34173 ret
34174
34175 bpf_slow_path_half:
34176@@ -98,12 +103,14 @@ bpf_slow_path_half:
34177 mov - MAX_BPF_STACK + 32(%rbp),%ax
34178 rol $8,%ax
34179 movzwl %ax,%eax
34180+ pax_force_retaddr
34181 ret
34182
34183 bpf_slow_path_byte:
34184 bpf_slow_path_common(1)
34185 js bpf_error
34186 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34187+ pax_force_retaddr
34188 ret
34189
34190 #define sk_negative_common(SIZE) \
34191@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34192 sk_negative_common(4)
34193 mov (%rax), %eax
34194 bswap %eax
34195+ pax_force_retaddr
34196 ret
34197
34198 bpf_slow_path_half_neg:
34199@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34200 mov (%rax),%ax
34201 rol $8,%ax
34202 movzwl %ax,%eax
34203+ pax_force_retaddr
34204 ret
34205
34206 bpf_slow_path_byte_neg:
34207@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34208 .globl sk_load_byte_negative_offset
34209 sk_negative_common(1)
34210 movzbl (%rax), %eax
34211+ pax_force_retaddr
34212 ret
34213
34214 bpf_error:
34215@@ -156,4 +166,5 @@ bpf_error:
34216 mov - MAX_BPF_STACK + 16(%rbp),%r14
34217 mov - MAX_BPF_STACK + 24(%rbp),%r15
34218 leaveq
34219+ pax_force_retaddr
34220 ret
34221diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34222index 9875143..00f6656 100644
34223--- a/arch/x86/net/bpf_jit_comp.c
34224+++ b/arch/x86/net/bpf_jit_comp.c
34225@@ -13,7 +13,11 @@
34226 #include <linux/if_vlan.h>
34227 #include <asm/cacheflush.h>
34228
34229+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34230+int bpf_jit_enable __read_only;
34231+#else
34232 int bpf_jit_enable __read_mostly;
34233+#endif
34234
34235 /*
34236 * assembly code in arch/x86/net/bpf_jit.S
34237@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34238 static void jit_fill_hole(void *area, unsigned int size)
34239 {
34240 /* fill whole space with int3 instructions */
34241+ pax_open_kernel();
34242 memset(area, 0xcc, size);
34243+ pax_close_kernel();
34244 }
34245
34246 struct jit_context {
34247@@ -896,7 +902,9 @@ common_load:
34248 pr_err("bpf_jit_compile fatal error\n");
34249 return -EFAULT;
34250 }
34251+ pax_open_kernel();
34252 memcpy(image + proglen, temp, ilen);
34253+ pax_close_kernel();
34254 }
34255 proglen += ilen;
34256 addrs[i] = proglen;
34257@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34258
34259 if (image) {
34260 bpf_flush_icache(header, image + proglen);
34261- set_memory_ro((unsigned long)header, header->pages);
34262 prog->bpf_func = (void *)image;
34263 prog->jited = true;
34264 }
34265@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34266 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34267 struct bpf_binary_header *header = (void *)addr;
34268
34269- if (!fp->jited)
34270- goto free_filter;
34271+ if (fp->jited)
34272+ bpf_jit_binary_free(header);
34273
34274- set_memory_rw(addr, header->pages);
34275- bpf_jit_binary_free(header);
34276-
34277-free_filter:
34278 bpf_prog_unlock_free(fp);
34279 }
34280diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34281index 5d04be5..2beeaa2 100644
34282--- a/arch/x86/oprofile/backtrace.c
34283+++ b/arch/x86/oprofile/backtrace.c
34284@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34285 struct stack_frame_ia32 *fp;
34286 unsigned long bytes;
34287
34288- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34289+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34290 if (bytes != 0)
34291 return NULL;
34292
34293- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34294+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34295
34296 oprofile_add_trace(bufhead[0].return_address);
34297
34298@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34299 struct stack_frame bufhead[2];
34300 unsigned long bytes;
34301
34302- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34303+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34304 if (bytes != 0)
34305 return NULL;
34306
34307@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34308 {
34309 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34310
34311- if (!user_mode_vm(regs)) {
34312+ if (!user_mode(regs)) {
34313 unsigned long stack = kernel_stack_pointer(regs);
34314 if (depth)
34315 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34316diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34317index 1d2e639..f6ef82a 100644
34318--- a/arch/x86/oprofile/nmi_int.c
34319+++ b/arch/x86/oprofile/nmi_int.c
34320@@ -23,6 +23,7 @@
34321 #include <asm/nmi.h>
34322 #include <asm/msr.h>
34323 #include <asm/apic.h>
34324+#include <asm/pgtable.h>
34325
34326 #include "op_counter.h"
34327 #include "op_x86_model.h"
34328@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34329 if (ret)
34330 return ret;
34331
34332- if (!model->num_virt_counters)
34333- model->num_virt_counters = model->num_counters;
34334+ if (!model->num_virt_counters) {
34335+ pax_open_kernel();
34336+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34337+ pax_close_kernel();
34338+ }
34339
34340 mux_init(ops);
34341
34342diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34343index 50d86c0..7985318 100644
34344--- a/arch/x86/oprofile/op_model_amd.c
34345+++ b/arch/x86/oprofile/op_model_amd.c
34346@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34347 num_counters = AMD64_NUM_COUNTERS;
34348 }
34349
34350- op_amd_spec.num_counters = num_counters;
34351- op_amd_spec.num_controls = num_counters;
34352- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34353+ pax_open_kernel();
34354+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34355+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34356+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34357+ pax_close_kernel();
34358
34359 return 0;
34360 }
34361diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34362index d90528e..0127e2b 100644
34363--- a/arch/x86/oprofile/op_model_ppro.c
34364+++ b/arch/x86/oprofile/op_model_ppro.c
34365@@ -19,6 +19,7 @@
34366 #include <asm/msr.h>
34367 #include <asm/apic.h>
34368 #include <asm/nmi.h>
34369+#include <asm/pgtable.h>
34370
34371 #include "op_x86_model.h"
34372 #include "op_counter.h"
34373@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34374
34375 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34376
34377- op_arch_perfmon_spec.num_counters = num_counters;
34378- op_arch_perfmon_spec.num_controls = num_counters;
34379+ pax_open_kernel();
34380+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34381+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34382+ pax_close_kernel();
34383 }
34384
34385 static int arch_perfmon_init(struct oprofile_operations *ignore)
34386diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34387index 71e8a67..6a313bb 100644
34388--- a/arch/x86/oprofile/op_x86_model.h
34389+++ b/arch/x86/oprofile/op_x86_model.h
34390@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34391 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34392 struct op_msrs const * const msrs);
34393 #endif
34394-};
34395+} __do_const;
34396
34397 struct op_counter_config;
34398
34399diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34400index 852aa4c..71613f2 100644
34401--- a/arch/x86/pci/intel_mid_pci.c
34402+++ b/arch/x86/pci/intel_mid_pci.c
34403@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34404 pci_mmcfg_late_init();
34405 pcibios_enable_irq = intel_mid_pci_irq_enable;
34406 pcibios_disable_irq = intel_mid_pci_irq_disable;
34407- pci_root_ops = intel_mid_pci_ops;
34408+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34409 pci_soc_mode = 1;
34410 /* Continue with standard init */
34411 return 1;
34412diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34413index 5dc6ca5..25c03f5 100644
34414--- a/arch/x86/pci/irq.c
34415+++ b/arch/x86/pci/irq.c
34416@@ -51,7 +51,7 @@ struct irq_router {
34417 struct irq_router_handler {
34418 u16 vendor;
34419 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34420-};
34421+} __do_const;
34422
34423 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34424 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34425@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34426 return 0;
34427 }
34428
34429-static __initdata struct irq_router_handler pirq_routers[] = {
34430+static __initconst const struct irq_router_handler pirq_routers[] = {
34431 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34432 { PCI_VENDOR_ID_AL, ali_router_probe },
34433 { PCI_VENDOR_ID_ITE, ite_router_probe },
34434@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34435 static void __init pirq_find_router(struct irq_router *r)
34436 {
34437 struct irq_routing_table *rt = pirq_table;
34438- struct irq_router_handler *h;
34439+ const struct irq_router_handler *h;
34440
34441 #ifdef CONFIG_PCI_BIOS
34442 if (!rt->signature) {
34443@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34444 return 0;
34445 }
34446
34447-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34448+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34449 {
34450 .callback = fix_broken_hp_bios_irq9,
34451 .ident = "HP Pavilion N5400 Series Laptop",
34452diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34453index 9b83b90..4112152 100644
34454--- a/arch/x86/pci/pcbios.c
34455+++ b/arch/x86/pci/pcbios.c
34456@@ -79,7 +79,7 @@ union bios32 {
34457 static struct {
34458 unsigned long address;
34459 unsigned short segment;
34460-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34461+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34462
34463 /*
34464 * Returns the entry point for the given service, NULL on error
34465@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34466 unsigned long length; /* %ecx */
34467 unsigned long entry; /* %edx */
34468 unsigned long flags;
34469+ struct desc_struct d, *gdt;
34470
34471 local_irq_save(flags);
34472- __asm__("lcall *(%%edi); cld"
34473+
34474+ gdt = get_cpu_gdt_table(smp_processor_id());
34475+
34476+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34477+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34478+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34479+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34480+
34481+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34482 : "=a" (return_code),
34483 "=b" (address),
34484 "=c" (length),
34485 "=d" (entry)
34486 : "0" (service),
34487 "1" (0),
34488- "D" (&bios32_indirect));
34489+ "D" (&bios32_indirect),
34490+ "r"(__PCIBIOS_DS)
34491+ : "memory");
34492+
34493+ pax_open_kernel();
34494+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34495+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34496+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34497+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34498+ pax_close_kernel();
34499+
34500 local_irq_restore(flags);
34501
34502 switch (return_code) {
34503- case 0:
34504- return address + entry;
34505- case 0x80: /* Not present */
34506- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34507- return 0;
34508- default: /* Shouldn't happen */
34509- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34510- service, return_code);
34511+ case 0: {
34512+ int cpu;
34513+ unsigned char flags;
34514+
34515+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34516+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34517+ printk(KERN_WARNING "bios32_service: not valid\n");
34518 return 0;
34519+ }
34520+ address = address + PAGE_OFFSET;
34521+ length += 16UL; /* some BIOSs underreport this... */
34522+ flags = 4;
34523+ if (length >= 64*1024*1024) {
34524+ length >>= PAGE_SHIFT;
34525+ flags |= 8;
34526+ }
34527+
34528+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34529+ gdt = get_cpu_gdt_table(cpu);
34530+ pack_descriptor(&d, address, length, 0x9b, flags);
34531+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34532+ pack_descriptor(&d, address, length, 0x93, flags);
34533+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34534+ }
34535+ return entry;
34536+ }
34537+ case 0x80: /* Not present */
34538+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34539+ return 0;
34540+ default: /* Shouldn't happen */
34541+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34542+ service, return_code);
34543+ return 0;
34544 }
34545 }
34546
34547 static struct {
34548 unsigned long address;
34549 unsigned short segment;
34550-} pci_indirect = { 0, __KERNEL_CS };
34551+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34552
34553-static int pci_bios_present;
34554+static int pci_bios_present __read_only;
34555
34556 static int __init check_pcibios(void)
34557 {
34558@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34559 unsigned long flags, pcibios_entry;
34560
34561 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34562- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34563+ pci_indirect.address = pcibios_entry;
34564
34565 local_irq_save(flags);
34566- __asm__(
34567- "lcall *(%%edi); cld\n\t"
34568+ __asm__("movw %w6, %%ds\n\t"
34569+ "lcall *%%ss:(%%edi); cld\n\t"
34570+ "push %%ss\n\t"
34571+ "pop %%ds\n\t"
34572 "jc 1f\n\t"
34573 "xor %%ah, %%ah\n"
34574 "1:"
34575@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34576 "=b" (ebx),
34577 "=c" (ecx)
34578 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34579- "D" (&pci_indirect)
34580+ "D" (&pci_indirect),
34581+ "r" (__PCIBIOS_DS)
34582 : "memory");
34583 local_irq_restore(flags);
34584
34585@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34586
34587 switch (len) {
34588 case 1:
34589- __asm__("lcall *(%%esi); cld\n\t"
34590+ __asm__("movw %w6, %%ds\n\t"
34591+ "lcall *%%ss:(%%esi); cld\n\t"
34592+ "push %%ss\n\t"
34593+ "pop %%ds\n\t"
34594 "jc 1f\n\t"
34595 "xor %%ah, %%ah\n"
34596 "1:"
34597@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34598 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34599 "b" (bx),
34600 "D" ((long)reg),
34601- "S" (&pci_indirect));
34602+ "S" (&pci_indirect),
34603+ "r" (__PCIBIOS_DS));
34604 /*
34605 * Zero-extend the result beyond 8 bits, do not trust the
34606 * BIOS having done it:
34607@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34608 *value &= 0xff;
34609 break;
34610 case 2:
34611- __asm__("lcall *(%%esi); cld\n\t"
34612+ __asm__("movw %w6, %%ds\n\t"
34613+ "lcall *%%ss:(%%esi); cld\n\t"
34614+ "push %%ss\n\t"
34615+ "pop %%ds\n\t"
34616 "jc 1f\n\t"
34617 "xor %%ah, %%ah\n"
34618 "1:"
34619@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34620 : "1" (PCIBIOS_READ_CONFIG_WORD),
34621 "b" (bx),
34622 "D" ((long)reg),
34623- "S" (&pci_indirect));
34624+ "S" (&pci_indirect),
34625+ "r" (__PCIBIOS_DS));
34626 /*
34627 * Zero-extend the result beyond 16 bits, do not trust the
34628 * BIOS having done it:
34629@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34630 *value &= 0xffff;
34631 break;
34632 case 4:
34633- __asm__("lcall *(%%esi); cld\n\t"
34634+ __asm__("movw %w6, %%ds\n\t"
34635+ "lcall *%%ss:(%%esi); cld\n\t"
34636+ "push %%ss\n\t"
34637+ "pop %%ds\n\t"
34638 "jc 1f\n\t"
34639 "xor %%ah, %%ah\n"
34640 "1:"
34641@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34642 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34643 "b" (bx),
34644 "D" ((long)reg),
34645- "S" (&pci_indirect));
34646+ "S" (&pci_indirect),
34647+ "r" (__PCIBIOS_DS));
34648 break;
34649 }
34650
34651@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34652
34653 switch (len) {
34654 case 1:
34655- __asm__("lcall *(%%esi); cld\n\t"
34656+ __asm__("movw %w6, %%ds\n\t"
34657+ "lcall *%%ss:(%%esi); cld\n\t"
34658+ "push %%ss\n\t"
34659+ "pop %%ds\n\t"
34660 "jc 1f\n\t"
34661 "xor %%ah, %%ah\n"
34662 "1:"
34663@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34664 "c" (value),
34665 "b" (bx),
34666 "D" ((long)reg),
34667- "S" (&pci_indirect));
34668+ "S" (&pci_indirect),
34669+ "r" (__PCIBIOS_DS));
34670 break;
34671 case 2:
34672- __asm__("lcall *(%%esi); cld\n\t"
34673+ __asm__("movw %w6, %%ds\n\t"
34674+ "lcall *%%ss:(%%esi); cld\n\t"
34675+ "push %%ss\n\t"
34676+ "pop %%ds\n\t"
34677 "jc 1f\n\t"
34678 "xor %%ah, %%ah\n"
34679 "1:"
34680@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34681 "c" (value),
34682 "b" (bx),
34683 "D" ((long)reg),
34684- "S" (&pci_indirect));
34685+ "S" (&pci_indirect),
34686+ "r" (__PCIBIOS_DS));
34687 break;
34688 case 4:
34689- __asm__("lcall *(%%esi); cld\n\t"
34690+ __asm__("movw %w6, %%ds\n\t"
34691+ "lcall *%%ss:(%%esi); cld\n\t"
34692+ "push %%ss\n\t"
34693+ "pop %%ds\n\t"
34694 "jc 1f\n\t"
34695 "xor %%ah, %%ah\n"
34696 "1:"
34697@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34698 "c" (value),
34699 "b" (bx),
34700 "D" ((long)reg),
34701- "S" (&pci_indirect));
34702+ "S" (&pci_indirect),
34703+ "r" (__PCIBIOS_DS));
34704 break;
34705 }
34706
34707@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34708
34709 DBG("PCI: Fetching IRQ routing table... ");
34710 __asm__("push %%es\n\t"
34711+ "movw %w8, %%ds\n\t"
34712 "push %%ds\n\t"
34713 "pop %%es\n\t"
34714- "lcall *(%%esi); cld\n\t"
34715+ "lcall *%%ss:(%%esi); cld\n\t"
34716 "pop %%es\n\t"
34717+ "push %%ss\n\t"
34718+ "pop %%ds\n"
34719 "jc 1f\n\t"
34720 "xor %%ah, %%ah\n"
34721 "1:"
34722@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34723 "1" (0),
34724 "D" ((long) &opt),
34725 "S" (&pci_indirect),
34726- "m" (opt)
34727+ "m" (opt),
34728+ "r" (__PCIBIOS_DS)
34729 : "memory");
34730 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34731 if (ret & 0xff00)
34732@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34733 {
34734 int ret;
34735
34736- __asm__("lcall *(%%esi); cld\n\t"
34737+ __asm__("movw %w5, %%ds\n\t"
34738+ "lcall *%%ss:(%%esi); cld\n\t"
34739+ "push %%ss\n\t"
34740+ "pop %%ds\n"
34741 "jc 1f\n\t"
34742 "xor %%ah, %%ah\n"
34743 "1:"
34744@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34745 : "0" (PCIBIOS_SET_PCI_HW_INT),
34746 "b" ((dev->bus->number << 8) | dev->devfn),
34747 "c" ((irq << 8) | (pin + 10)),
34748- "S" (&pci_indirect));
34749+ "S" (&pci_indirect),
34750+ "r" (__PCIBIOS_DS));
34751 return !(ret & 0xff00);
34752 }
34753 EXPORT_SYMBOL(pcibios_set_irq_routing);
34754diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34755index 40e7cda..c7e6672 100644
34756--- a/arch/x86/platform/efi/efi_32.c
34757+++ b/arch/x86/platform/efi/efi_32.c
34758@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34759 {
34760 struct desc_ptr gdt_descr;
34761
34762+#ifdef CONFIG_PAX_KERNEXEC
34763+ struct desc_struct d;
34764+#endif
34765+
34766 local_irq_save(efi_rt_eflags);
34767
34768 load_cr3(initial_page_table);
34769 __flush_tlb_all();
34770
34771+#ifdef CONFIG_PAX_KERNEXEC
34772+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34773+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34774+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34775+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34776+#endif
34777+
34778 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34779 gdt_descr.size = GDT_SIZE - 1;
34780 load_gdt(&gdt_descr);
34781@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34782 {
34783 struct desc_ptr gdt_descr;
34784
34785+#ifdef CONFIG_PAX_KERNEXEC
34786+ struct desc_struct d;
34787+
34788+ memset(&d, 0, sizeof d);
34789+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34790+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34791+#endif
34792+
34793 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34794 gdt_descr.size = GDT_SIZE - 1;
34795 load_gdt(&gdt_descr);
34796
34797+#ifdef CONFIG_PAX_PER_CPU_PGD
34798+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34799+#else
34800 load_cr3(swapper_pg_dir);
34801+#endif
34802+
34803 __flush_tlb_all();
34804
34805 local_irq_restore(efi_rt_eflags);
34806diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34807index 17e80d8..9fa6e41 100644
34808--- a/arch/x86/platform/efi/efi_64.c
34809+++ b/arch/x86/platform/efi/efi_64.c
34810@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34811 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34812 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34813 }
34814+
34815+#ifdef CONFIG_PAX_PER_CPU_PGD
34816+ load_cr3(swapper_pg_dir);
34817+#endif
34818+
34819 __flush_tlb_all();
34820 }
34821
34822@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34823 for (pgd = 0; pgd < n_pgds; pgd++)
34824 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34825 kfree(save_pgd);
34826+
34827+#ifdef CONFIG_PAX_PER_CPU_PGD
34828+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34829+#endif
34830+
34831 __flush_tlb_all();
34832 local_irq_restore(efi_flags);
34833 early_code_mapping_set_exec(0);
34834@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34835 unsigned npages;
34836 pgd_t *pgd;
34837
34838- if (efi_enabled(EFI_OLD_MEMMAP))
34839+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34840+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34841+ * able to execute the EFI services.
34842+ */
34843+ if (__supported_pte_mask & _PAGE_NX) {
34844+ unsigned long addr = (unsigned long) __va(0);
34845+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34846+
34847+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34848+#ifdef CONFIG_PAX_PER_CPU_PGD
34849+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34850+#endif
34851+ set_pgd(pgd_offset_k(addr), pe);
34852+ }
34853+
34854 return 0;
34855+ }
34856
34857 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34858 pgd = __va(efi_scratch.efi_pgt);
34859diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34860index 040192b..7d3300f 100644
34861--- a/arch/x86/platform/efi/efi_stub_32.S
34862+++ b/arch/x86/platform/efi/efi_stub_32.S
34863@@ -6,7 +6,9 @@
34864 */
34865
34866 #include <linux/linkage.h>
34867+#include <linux/init.h>
34868 #include <asm/page_types.h>
34869+#include <asm/segment.h>
34870
34871 /*
34872 * efi_call_phys(void *, ...) is a function with variable parameters.
34873@@ -20,7 +22,7 @@
34874 * service functions will comply with gcc calling convention, too.
34875 */
34876
34877-.text
34878+__INIT
34879 ENTRY(efi_call_phys)
34880 /*
34881 * 0. The function can only be called in Linux kernel. So CS has been
34882@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34883 * The mapping of lower virtual memory has been created in prolog and
34884 * epilog.
34885 */
34886- movl $1f, %edx
34887- subl $__PAGE_OFFSET, %edx
34888- jmp *%edx
34889+#ifdef CONFIG_PAX_KERNEXEC
34890+ movl $(__KERNEXEC_EFI_DS), %edx
34891+ mov %edx, %ds
34892+ mov %edx, %es
34893+ mov %edx, %ss
34894+ addl $2f,(1f)
34895+ ljmp *(1f)
34896+
34897+__INITDATA
34898+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34899+.previous
34900+
34901+2:
34902+ subl $2b,(1b)
34903+#else
34904+ jmp 1f-__PAGE_OFFSET
34905 1:
34906+#endif
34907
34908 /*
34909 * 2. Now on the top of stack is the return
34910@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34911 * parameter 2, ..., param n. To make things easy, we save the return
34912 * address of efi_call_phys in a global variable.
34913 */
34914- popl %edx
34915- movl %edx, saved_return_addr
34916- /* get the function pointer into ECX*/
34917- popl %ecx
34918- movl %ecx, efi_rt_function_ptr
34919- movl $2f, %edx
34920- subl $__PAGE_OFFSET, %edx
34921- pushl %edx
34922+ popl (saved_return_addr)
34923+ popl (efi_rt_function_ptr)
34924
34925 /*
34926 * 3. Clear PG bit in %CR0.
34927@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34928 /*
34929 * 5. Call the physical function.
34930 */
34931- jmp *%ecx
34932+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34933
34934-2:
34935 /*
34936 * 6. After EFI runtime service returns, control will return to
34937 * following instruction. We'd better readjust stack pointer first.
34938@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34939 movl %cr0, %edx
34940 orl $0x80000000, %edx
34941 movl %edx, %cr0
34942- jmp 1f
34943-1:
34944+
34945 /*
34946 * 8. Now restore the virtual mode from flat mode by
34947 * adding EIP with PAGE_OFFSET.
34948 */
34949- movl $1f, %edx
34950- jmp *%edx
34951+#ifdef CONFIG_PAX_KERNEXEC
34952+ movl $(__KERNEL_DS), %edx
34953+ mov %edx, %ds
34954+ mov %edx, %es
34955+ mov %edx, %ss
34956+ ljmp $(__KERNEL_CS),$1f
34957+#else
34958+ jmp 1f+__PAGE_OFFSET
34959+#endif
34960 1:
34961
34962 /*
34963 * 9. Balance the stack. And because EAX contain the return value,
34964 * we'd better not clobber it.
34965 */
34966- leal efi_rt_function_ptr, %edx
34967- movl (%edx), %ecx
34968- pushl %ecx
34969+ pushl (efi_rt_function_ptr)
34970
34971 /*
34972- * 10. Push the saved return address onto the stack and return.
34973+ * 10. Return to the saved return address.
34974 */
34975- leal saved_return_addr, %edx
34976- movl (%edx), %ecx
34977- pushl %ecx
34978- ret
34979+ jmpl *(saved_return_addr)
34980 ENDPROC(efi_call_phys)
34981 .previous
34982
34983-.data
34984+__INITDATA
34985 saved_return_addr:
34986 .long 0
34987 efi_rt_function_ptr:
34988diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34989index 86d0f9e..6d499f4 100644
34990--- a/arch/x86/platform/efi/efi_stub_64.S
34991+++ b/arch/x86/platform/efi/efi_stub_64.S
34992@@ -11,6 +11,7 @@
34993 #include <asm/msr.h>
34994 #include <asm/processor-flags.h>
34995 #include <asm/page_types.h>
34996+#include <asm/alternative-asm.h>
34997
34998 #define SAVE_XMM \
34999 mov %rsp, %rax; \
35000@@ -88,6 +89,7 @@ ENTRY(efi_call)
35001 RESTORE_PGT
35002 addq $48, %rsp
35003 RESTORE_XMM
35004+ pax_force_retaddr 0, 1
35005 ret
35006 ENDPROC(efi_call)
35007
35008diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35009index 3005f0c..d06aeb0 100644
35010--- a/arch/x86/platform/intel-mid/intel-mid.c
35011+++ b/arch/x86/platform/intel-mid/intel-mid.c
35012@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35013 /* intel_mid_ops to store sub arch ops */
35014 struct intel_mid_ops *intel_mid_ops;
35015 /* getter function for sub arch ops*/
35016-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35017+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35018 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35019 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35020
35021@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35022 {
35023 };
35024
35025-static void intel_mid_reboot(void)
35026+static void __noreturn intel_mid_reboot(void)
35027 {
35028 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35029+ BUG();
35030 }
35031
35032 static unsigned long __init intel_mid_calibrate_tsc(void)
35033diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35034index 3c1c386..59a68ed 100644
35035--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35036+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35037@@ -13,6 +13,6 @@
35038 /* For every CPU addition a new get_<cpuname>_ops interface needs
35039 * to be added.
35040 */
35041-extern void *get_penwell_ops(void);
35042-extern void *get_cloverview_ops(void);
35043-extern void *get_tangier_ops(void);
35044+extern const void *get_penwell_ops(void);
35045+extern const void *get_cloverview_ops(void);
35046+extern const void *get_tangier_ops(void);
35047diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35048index 23381d2..8ddc10e 100644
35049--- a/arch/x86/platform/intel-mid/mfld.c
35050+++ b/arch/x86/platform/intel-mid/mfld.c
35051@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35052 pm_power_off = mfld_power_off;
35053 }
35054
35055-void *get_penwell_ops(void)
35056+const void *get_penwell_ops(void)
35057 {
35058 return &penwell_ops;
35059 }
35060
35061-void *get_cloverview_ops(void)
35062+const void *get_cloverview_ops(void)
35063 {
35064 return &penwell_ops;
35065 }
35066diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35067index aaca917..66eadbc 100644
35068--- a/arch/x86/platform/intel-mid/mrfl.c
35069+++ b/arch/x86/platform/intel-mid/mrfl.c
35070@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35071 .arch_setup = tangier_arch_setup,
35072 };
35073
35074-void *get_tangier_ops(void)
35075+const void *get_tangier_ops(void)
35076 {
35077 return &tangier_ops;
35078 }
35079diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35080index c9a0838..fae0977 100644
35081--- a/arch/x86/platform/intel-quark/imr_selftest.c
35082+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35083@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35084 */
35085 static void __init imr_self_test(void)
35086 {
35087- phys_addr_t base = virt_to_phys(&_text);
35088+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35089 size_t size = virt_to_phys(&__end_rodata) - base;
35090 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35091 int ret;
35092diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35093index d6ee929..3637cb5 100644
35094--- a/arch/x86/platform/olpc/olpc_dt.c
35095+++ b/arch/x86/platform/olpc/olpc_dt.c
35096@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35097 return res;
35098 }
35099
35100-static struct of_pdt_ops prom_olpc_ops __initdata = {
35101+static struct of_pdt_ops prom_olpc_ops __initconst = {
35102 .nextprop = olpc_dt_nextprop,
35103 .getproplen = olpc_dt_getproplen,
35104 .getproperty = olpc_dt_getproperty,
35105diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35106index 3e32ed5..cc0adc5 100644
35107--- a/arch/x86/power/cpu.c
35108+++ b/arch/x86/power/cpu.c
35109@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35110 static void fix_processor_context(void)
35111 {
35112 int cpu = smp_processor_id();
35113- struct tss_struct *t = &per_cpu(init_tss, cpu);
35114-#ifdef CONFIG_X86_64
35115- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35116- tss_desc tss;
35117-#endif
35118+ struct tss_struct *t = init_tss + cpu;
35119+
35120 set_tss_desc(cpu, t); /*
35121 * This just modifies memory; should not be
35122 * necessary. But... This is necessary, because
35123@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35124 */
35125
35126 #ifdef CONFIG_X86_64
35127- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35128- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35129- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35130-
35131 syscall_init(); /* This sets MSR_*STAR and related */
35132 #endif
35133 load_TR_desc(); /* This does ltr */
35134diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35135index 0b7a63d..0d0f2c2 100644
35136--- a/arch/x86/realmode/init.c
35137+++ b/arch/x86/realmode/init.c
35138@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35139 __va(real_mode_header->trampoline_header);
35140
35141 #ifdef CONFIG_X86_32
35142- trampoline_header->start = __pa_symbol(startup_32_smp);
35143+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35144+
35145+#ifdef CONFIG_PAX_KERNEXEC
35146+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35147+#endif
35148+
35149+ trampoline_header->boot_cs = __BOOT_CS;
35150 trampoline_header->gdt_limit = __BOOT_DS + 7;
35151 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35152 #else
35153@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35154 *trampoline_cr4_features = __read_cr4();
35155
35156 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35157- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35158+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35159 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35160 #endif
35161 }
35162diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35163index 2730d77..2e4cd19 100644
35164--- a/arch/x86/realmode/rm/Makefile
35165+++ b/arch/x86/realmode/rm/Makefile
35166@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35167
35168 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35169 -I$(srctree)/arch/x86/boot
35170+ifdef CONSTIFY_PLUGIN
35171+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35172+endif
35173 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35174 GCOV_PROFILE := n
35175diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35176index a28221d..93c40f1 100644
35177--- a/arch/x86/realmode/rm/header.S
35178+++ b/arch/x86/realmode/rm/header.S
35179@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35180 #endif
35181 /* APM/BIOS reboot */
35182 .long pa_machine_real_restart_asm
35183-#ifdef CONFIG_X86_64
35184+#ifdef CONFIG_X86_32
35185+ .long __KERNEL_CS
35186+#else
35187 .long __KERNEL32_CS
35188 #endif
35189 END(real_mode_header)
35190diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35191index 48ddd76..c26749f 100644
35192--- a/arch/x86/realmode/rm/trampoline_32.S
35193+++ b/arch/x86/realmode/rm/trampoline_32.S
35194@@ -24,6 +24,12 @@
35195 #include <asm/page_types.h>
35196 #include "realmode.h"
35197
35198+#ifdef CONFIG_PAX_KERNEXEC
35199+#define ta(X) (X)
35200+#else
35201+#define ta(X) (pa_ ## X)
35202+#endif
35203+
35204 .text
35205 .code16
35206
35207@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35208
35209 cli # We should be safe anyway
35210
35211- movl tr_start, %eax # where we need to go
35212-
35213 movl $0xA5A5A5A5, trampoline_status
35214 # write marker for master knows we're running
35215
35216@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35217 movw $1, %dx # protected mode (PE) bit
35218 lmsw %dx # into protected mode
35219
35220- ljmpl $__BOOT_CS, $pa_startup_32
35221+ ljmpl *(trampoline_header)
35222
35223 .section ".text32","ax"
35224 .code32
35225@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35226 .balign 8
35227 GLOBAL(trampoline_header)
35228 tr_start: .space 4
35229- tr_gdt_pad: .space 2
35230+ tr_boot_cs: .space 2
35231 tr_gdt: .space 6
35232 END(trampoline_header)
35233
35234diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35235index dac7b20..72dbaca 100644
35236--- a/arch/x86/realmode/rm/trampoline_64.S
35237+++ b/arch/x86/realmode/rm/trampoline_64.S
35238@@ -93,6 +93,7 @@ ENTRY(startup_32)
35239 movl %edx, %gs
35240
35241 movl pa_tr_cr4, %eax
35242+ andl $~X86_CR4_PCIDE, %eax
35243 movl %eax, %cr4 # Enable PAE mode
35244
35245 # Setup trampoline 4 level pagetables
35246@@ -106,7 +107,7 @@ ENTRY(startup_32)
35247 wrmsr
35248
35249 # Enable paging and in turn activate Long Mode
35250- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35251+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35252 movl %eax, %cr0
35253
35254 /*
35255diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35256index 9e7e147..25a4158 100644
35257--- a/arch/x86/realmode/rm/wakeup_asm.S
35258+++ b/arch/x86/realmode/rm/wakeup_asm.S
35259@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35260 lgdtl pmode_gdt
35261
35262 /* This really couldn't... */
35263- movl pmode_entry, %eax
35264 movl pmode_cr0, %ecx
35265 movl %ecx, %cr0
35266- ljmpl $__KERNEL_CS, $pa_startup_32
35267- /* -> jmp *%eax in trampoline_32.S */
35268+
35269+ ljmpl *pmode_entry
35270 #else
35271 jmp trampoline_start
35272 #endif
35273diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35274index 604a37e..e49702a 100644
35275--- a/arch/x86/tools/Makefile
35276+++ b/arch/x86/tools/Makefile
35277@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35278
35279 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35280
35281-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35282+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35283 hostprogs-y += relocs
35284 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35285 PHONY += relocs
35286diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35287index 0c2fae8..88036b7 100644
35288--- a/arch/x86/tools/relocs.c
35289+++ b/arch/x86/tools/relocs.c
35290@@ -1,5 +1,7 @@
35291 /* This is included from relocs_32/64.c */
35292
35293+#include "../../../include/generated/autoconf.h"
35294+
35295 #define ElfW(type) _ElfW(ELF_BITS, type)
35296 #define _ElfW(bits, type) __ElfW(bits, type)
35297 #define __ElfW(bits, type) Elf##bits##_##type
35298@@ -11,6 +13,7 @@
35299 #define Elf_Sym ElfW(Sym)
35300
35301 static Elf_Ehdr ehdr;
35302+static Elf_Phdr *phdr;
35303
35304 struct relocs {
35305 uint32_t *offset;
35306@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35307 }
35308 }
35309
35310+static void read_phdrs(FILE *fp)
35311+{
35312+ unsigned int i;
35313+
35314+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35315+ if (!phdr) {
35316+ die("Unable to allocate %d program headers\n",
35317+ ehdr.e_phnum);
35318+ }
35319+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35320+ die("Seek to %d failed: %s\n",
35321+ ehdr.e_phoff, strerror(errno));
35322+ }
35323+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35324+ die("Cannot read ELF program headers: %s\n",
35325+ strerror(errno));
35326+ }
35327+ for(i = 0; i < ehdr.e_phnum; i++) {
35328+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35329+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35330+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35331+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35332+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35333+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35334+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35335+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35336+ }
35337+
35338+}
35339+
35340 static void read_shdrs(FILE *fp)
35341 {
35342- int i;
35343+ unsigned int i;
35344 Elf_Shdr shdr;
35345
35346 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35347@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35348
35349 static void read_strtabs(FILE *fp)
35350 {
35351- int i;
35352+ unsigned int i;
35353 for (i = 0; i < ehdr.e_shnum; i++) {
35354 struct section *sec = &secs[i];
35355 if (sec->shdr.sh_type != SHT_STRTAB) {
35356@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35357
35358 static void read_symtabs(FILE *fp)
35359 {
35360- int i,j;
35361+ unsigned int i,j;
35362 for (i = 0; i < ehdr.e_shnum; i++) {
35363 struct section *sec = &secs[i];
35364 if (sec->shdr.sh_type != SHT_SYMTAB) {
35365@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35366 }
35367
35368
35369-static void read_relocs(FILE *fp)
35370+static void read_relocs(FILE *fp, int use_real_mode)
35371 {
35372- int i,j;
35373+ unsigned int i,j;
35374+ uint32_t base;
35375+
35376 for (i = 0; i < ehdr.e_shnum; i++) {
35377 struct section *sec = &secs[i];
35378 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35379@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35380 die("Cannot read symbol table: %s\n",
35381 strerror(errno));
35382 }
35383+ base = 0;
35384+
35385+#ifdef CONFIG_X86_32
35386+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35387+ if (phdr[j].p_type != PT_LOAD )
35388+ continue;
35389+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35390+ continue;
35391+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35392+ break;
35393+ }
35394+#endif
35395+
35396 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35397 Elf_Rel *rel = &sec->reltab[j];
35398- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35399+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35400 rel->r_info = elf_xword_to_cpu(rel->r_info);
35401 #if (SHT_REL_TYPE == SHT_RELA)
35402 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35403@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35404
35405 static void print_absolute_symbols(void)
35406 {
35407- int i;
35408+ unsigned int i;
35409 const char *format;
35410
35411 if (ELF_BITS == 64)
35412@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35413 for (i = 0; i < ehdr.e_shnum; i++) {
35414 struct section *sec = &secs[i];
35415 char *sym_strtab;
35416- int j;
35417+ unsigned int j;
35418
35419 if (sec->shdr.sh_type != SHT_SYMTAB) {
35420 continue;
35421@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35422
35423 static void print_absolute_relocs(void)
35424 {
35425- int i, printed = 0;
35426+ unsigned int i, printed = 0;
35427 const char *format;
35428
35429 if (ELF_BITS == 64)
35430@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35431 struct section *sec_applies, *sec_symtab;
35432 char *sym_strtab;
35433 Elf_Sym *sh_symtab;
35434- int j;
35435+ unsigned int j;
35436 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35437 continue;
35438 }
35439@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35440 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35441 Elf_Sym *sym, const char *symname))
35442 {
35443- int i;
35444+ unsigned int i;
35445 /* Walk through the relocations */
35446 for (i = 0; i < ehdr.e_shnum; i++) {
35447 char *sym_strtab;
35448 Elf_Sym *sh_symtab;
35449 struct section *sec_applies, *sec_symtab;
35450- int j;
35451+ unsigned int j;
35452 struct section *sec = &secs[i];
35453
35454 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35455@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35456 {
35457 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35458 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35459+ char *sym_strtab = sec->link->link->strtab;
35460+
35461+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35462+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35463+ return 0;
35464+
35465+#ifdef CONFIG_PAX_KERNEXEC
35466+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35467+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35468+ return 0;
35469+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35470+ return 0;
35471+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35472+ return 0;
35473+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35474+ return 0;
35475+#endif
35476
35477 switch (r_type) {
35478 case R_386_NONE:
35479@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35480
35481 static void emit_relocs(int as_text, int use_real_mode)
35482 {
35483- int i;
35484+ unsigned int i;
35485 int (*write_reloc)(uint32_t, FILE *) = write32;
35486 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35487 const char *symname);
35488@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35489 {
35490 regex_init(use_real_mode);
35491 read_ehdr(fp);
35492+ read_phdrs(fp);
35493 read_shdrs(fp);
35494 read_strtabs(fp);
35495 read_symtabs(fp);
35496- read_relocs(fp);
35497+ read_relocs(fp, use_real_mode);
35498 if (ELF_BITS == 64)
35499 percpu_init();
35500 if (show_absolute_syms) {
35501diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35502index f40281e..92728c9 100644
35503--- a/arch/x86/um/mem_32.c
35504+++ b/arch/x86/um/mem_32.c
35505@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35506 gate_vma.vm_start = FIXADDR_USER_START;
35507 gate_vma.vm_end = FIXADDR_USER_END;
35508 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35509- gate_vma.vm_page_prot = __P101;
35510+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35511
35512 return 0;
35513 }
35514diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35515index 80ffa5b..a33bd15 100644
35516--- a/arch/x86/um/tls_32.c
35517+++ b/arch/x86/um/tls_32.c
35518@@ -260,7 +260,7 @@ out:
35519 if (unlikely(task == current &&
35520 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35521 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35522- "without flushed TLS.", current->pid);
35523+ "without flushed TLS.", task_pid_nr(current));
35524 }
35525
35526 return 0;
35527diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35528index 7b9be98..39bb57f 100644
35529--- a/arch/x86/vdso/Makefile
35530+++ b/arch/x86/vdso/Makefile
35531@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35532 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35533 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35534
35535-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35536+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35537 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35538 GCOV_PROFILE := n
35539
35540diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35541index 0224987..c7d65a5 100644
35542--- a/arch/x86/vdso/vdso2c.h
35543+++ b/arch/x86/vdso/vdso2c.h
35544@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35545 unsigned long load_size = -1; /* Work around bogus warning */
35546 unsigned long mapping_size;
35547 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35548- int i;
35549+ unsigned int i;
35550 unsigned long j;
35551 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35552 *alt_sec = NULL;
35553diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35554index e904c27..b9eaa03 100644
35555--- a/arch/x86/vdso/vdso32-setup.c
35556+++ b/arch/x86/vdso/vdso32-setup.c
35557@@ -14,6 +14,7 @@
35558 #include <asm/cpufeature.h>
35559 #include <asm/processor.h>
35560 #include <asm/vdso.h>
35561+#include <asm/mman.h>
35562
35563 #ifdef CONFIG_COMPAT_VDSO
35564 #define VDSO_DEFAULT 0
35565diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35566index 1c9f750..cfddb1a 100644
35567--- a/arch/x86/vdso/vma.c
35568+++ b/arch/x86/vdso/vma.c
35569@@ -19,10 +19,7 @@
35570 #include <asm/page.h>
35571 #include <asm/hpet.h>
35572 #include <asm/desc.h>
35573-
35574-#if defined(CONFIG_X86_64)
35575-unsigned int __read_mostly vdso64_enabled = 1;
35576-#endif
35577+#include <asm/mman.h>
35578
35579 void __init init_vdso_image(const struct vdso_image *image)
35580 {
35581@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35582 .pages = no_pages,
35583 };
35584
35585+#ifdef CONFIG_PAX_RANDMMAP
35586+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35587+ calculate_addr = false;
35588+#endif
35589+
35590 if (calculate_addr) {
35591 addr = vdso_addr(current->mm->start_stack,
35592 image->size - image->sym_vvar_start);
35593@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35594 down_write(&mm->mmap_sem);
35595
35596 addr = get_unmapped_area(NULL, addr,
35597- image->size - image->sym_vvar_start, 0, 0);
35598+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35599 if (IS_ERR_VALUE(addr)) {
35600 ret = addr;
35601 goto up_fail;
35602 }
35603
35604 text_start = addr - image->sym_vvar_start;
35605- current->mm->context.vdso = (void __user *)text_start;
35606+ mm->context.vdso = text_start;
35607
35608 /*
35609 * MAYWRITE to allow gdb to COW and set breakpoints
35610@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35611 hpet_address >> PAGE_SHIFT,
35612 PAGE_SIZE,
35613 pgprot_noncached(PAGE_READONLY));
35614-
35615- if (ret)
35616- goto up_fail;
35617 }
35618 #endif
35619
35620 up_fail:
35621 if (ret)
35622- current->mm->context.vdso = NULL;
35623+ current->mm->context.vdso = 0;
35624
35625 up_write(&mm->mmap_sem);
35626 return ret;
35627@@ -191,8 +190,8 @@ static int load_vdso32(void)
35628
35629 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35630 current_thread_info()->sysenter_return =
35631- current->mm->context.vdso +
35632- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35633+ (void __force_user *)(current->mm->context.vdso +
35634+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35635
35636 return 0;
35637 }
35638@@ -201,9 +200,6 @@ static int load_vdso32(void)
35639 #ifdef CONFIG_X86_64
35640 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35641 {
35642- if (!vdso64_enabled)
35643- return 0;
35644-
35645 return map_vdso(&vdso_image_64, true);
35646 }
35647
35648@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35649 int uses_interp)
35650 {
35651 #ifdef CONFIG_X86_X32_ABI
35652- if (test_thread_flag(TIF_X32)) {
35653- if (!vdso64_enabled)
35654- return 0;
35655-
35656+ if (test_thread_flag(TIF_X32))
35657 return map_vdso(&vdso_image_x32, true);
35658- }
35659 #endif
35660
35661 return load_vdso32();
35662@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35663 #endif
35664
35665 #ifdef CONFIG_X86_64
35666-static __init int vdso_setup(char *s)
35667-{
35668- vdso64_enabled = simple_strtoul(s, NULL, 0);
35669- return 0;
35670-}
35671-__setup("vdso=", vdso_setup);
35672-#endif
35673-
35674-#ifdef CONFIG_X86_64
35675 static void vgetcpu_cpu_init(void *arg)
35676 {
35677 int cpu = smp_processor_id();
35678diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35679index e88fda8..76ce7ce 100644
35680--- a/arch/x86/xen/Kconfig
35681+++ b/arch/x86/xen/Kconfig
35682@@ -9,6 +9,7 @@ config XEN
35683 select XEN_HAVE_PVMMU
35684 depends on X86_64 || (X86_32 && X86_PAE)
35685 depends on X86_TSC
35686+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35687 help
35688 This is the Linux Xen port. Enabling this will allow the
35689 kernel to boot in a paravirtualized environment under the
35690diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35691index 5240f56..0c12163 100644
35692--- a/arch/x86/xen/enlighten.c
35693+++ b/arch/x86/xen/enlighten.c
35694@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35695
35696 struct shared_info xen_dummy_shared_info;
35697
35698-void *xen_initial_gdt;
35699-
35700 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35701 __read_mostly int xen_have_vector_callback;
35702 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35703@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35704 {
35705 unsigned long va = dtr->address;
35706 unsigned int size = dtr->size + 1;
35707- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35708- unsigned long frames[pages];
35709+ unsigned long frames[65536 / PAGE_SIZE];
35710 int f;
35711
35712 /*
35713@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35714 {
35715 unsigned long va = dtr->address;
35716 unsigned int size = dtr->size + 1;
35717- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35718- unsigned long frames[pages];
35719+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35720 int f;
35721
35722 /*
35723@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35724 * 8-byte entries, or 16 4k pages..
35725 */
35726
35727- BUG_ON(size > 65536);
35728+ BUG_ON(size > GDT_SIZE);
35729 BUG_ON(va & ~PAGE_MASK);
35730
35731 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35732@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35733 return 0;
35734 }
35735
35736-static void set_xen_basic_apic_ops(void)
35737+static void __init set_xen_basic_apic_ops(void)
35738 {
35739 apic->read = xen_apic_read;
35740 apic->write = xen_apic_write;
35741@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35742 #endif
35743 };
35744
35745-static void xen_reboot(int reason)
35746+static __noreturn void xen_reboot(int reason)
35747 {
35748 struct sched_shutdown r = { .reason = reason };
35749
35750- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35751- BUG();
35752+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35753+ BUG();
35754 }
35755
35756-static void xen_restart(char *msg)
35757+static __noreturn void xen_restart(char *msg)
35758 {
35759 xen_reboot(SHUTDOWN_reboot);
35760 }
35761
35762-static void xen_emergency_restart(void)
35763+static __noreturn void xen_emergency_restart(void)
35764 {
35765 xen_reboot(SHUTDOWN_reboot);
35766 }
35767
35768-static void xen_machine_halt(void)
35769+static __noreturn void xen_machine_halt(void)
35770 {
35771 xen_reboot(SHUTDOWN_poweroff);
35772 }
35773
35774-static void xen_machine_power_off(void)
35775+static __noreturn void xen_machine_power_off(void)
35776 {
35777 if (pm_power_off)
35778 pm_power_off();
35779@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35780 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35781 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35782
35783- setup_stack_canary_segment(0);
35784- switch_to_new_gdt(0);
35785+ setup_stack_canary_segment(cpu);
35786+#ifdef CONFIG_X86_64
35787+ load_percpu_segment(cpu);
35788+#endif
35789+ switch_to_new_gdt(cpu);
35790
35791 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35792 pv_cpu_ops.load_gdt = xen_load_gdt;
35793@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35794 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35795
35796 /* Work out if we support NX */
35797- x86_configure_nx();
35798+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35799+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35800+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35801+ unsigned l, h;
35802+
35803+ __supported_pte_mask |= _PAGE_NX;
35804+ rdmsr(MSR_EFER, l, h);
35805+ l |= EFER_NX;
35806+ wrmsr(MSR_EFER, l, h);
35807+ }
35808+#endif
35809
35810 /* Get mfn list */
35811 xen_build_dynamic_phys_to_machine();
35812@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35813
35814 machine_ops = xen_machine_ops;
35815
35816- /*
35817- * The only reliable way to retain the initial address of the
35818- * percpu gdt_page is to remember it here, so we can go and
35819- * mark it RW later, when the initial percpu area is freed.
35820- */
35821- xen_initial_gdt = &per_cpu(gdt_page, 0);
35822-
35823 xen_smp_init();
35824
35825 #ifdef CONFIG_ACPI_NUMA
35826diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35827index adca9e2..35d6a98 100644
35828--- a/arch/x86/xen/mmu.c
35829+++ b/arch/x86/xen/mmu.c
35830@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35831 return val;
35832 }
35833
35834-static pteval_t pte_pfn_to_mfn(pteval_t val)
35835+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35836 {
35837 if (val & _PAGE_PRESENT) {
35838 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35839@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35840 * L3_k[511] -> level2_fixmap_pgt */
35841 convert_pfn_mfn(level3_kernel_pgt);
35842
35843+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35844+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35845+ convert_pfn_mfn(level3_vmemmap_pgt);
35846 /* L3_k[511][506] -> level1_fixmap_pgt */
35847+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35848 convert_pfn_mfn(level2_fixmap_pgt);
35849 }
35850 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35851@@ -1860,11 +1864,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35852 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35853 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35854 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35855+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35856+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35857+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35858 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35859 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35860+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35861 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35862 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35863 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35864+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35865
35866 /* Pin down new L4 */
35867 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35868@@ -2048,6 +2057,7 @@ static void __init xen_post_allocator_init(void)
35869 pv_mmu_ops.set_pud = xen_set_pud;
35870 #if PAGETABLE_LEVELS == 4
35871 pv_mmu_ops.set_pgd = xen_set_pgd;
35872+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35873 #endif
35874
35875 /* This will work as long as patching hasn't happened yet
35876@@ -2126,6 +2136,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35877 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35878 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35879 .set_pgd = xen_set_pgd_hyper,
35880+ .set_pgd_batched = xen_set_pgd_hyper,
35881
35882 .alloc_pud = xen_alloc_pmd_init,
35883 .release_pud = xen_release_pmd_init,
35884diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35885index 08e8489..b1e182f 100644
35886--- a/arch/x86/xen/smp.c
35887+++ b/arch/x86/xen/smp.c
35888@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35889
35890 if (xen_pv_domain()) {
35891 if (!xen_feature(XENFEAT_writable_page_tables))
35892- /* We've switched to the "real" per-cpu gdt, so make
35893- * sure the old memory can be recycled. */
35894- make_lowmem_page_readwrite(xen_initial_gdt);
35895-
35896 #ifdef CONFIG_X86_32
35897 /*
35898 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35899 * expects __USER_DS
35900 */
35901- loadsegment(ds, __USER_DS);
35902- loadsegment(es, __USER_DS);
35903+ loadsegment(ds, __KERNEL_DS);
35904+ loadsegment(es, __KERNEL_DS);
35905 #endif
35906
35907 xen_filter_cpu_maps();
35908@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35909 #ifdef CONFIG_X86_32
35910 /* Note: PVH is not yet supported on x86_32. */
35911 ctxt->user_regs.fs = __KERNEL_PERCPU;
35912- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35913+ savesegment(gs, ctxt->user_regs.gs);
35914 #endif
35915 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35916
35917@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35918 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35919 ctxt->flags = VGCF_IN_KERNEL;
35920 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35921- ctxt->user_regs.ds = __USER_DS;
35922- ctxt->user_regs.es = __USER_DS;
35923+ ctxt->user_regs.ds = __KERNEL_DS;
35924+ ctxt->user_regs.es = __KERNEL_DS;
35925 ctxt->user_regs.ss = __KERNEL_DS;
35926
35927 xen_copy_trap_info(ctxt->trap_ctxt);
35928@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35929 int rc;
35930
35931 per_cpu(current_task, cpu) = idle;
35932+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35933 #ifdef CONFIG_X86_32
35934 irq_ctx_init(cpu);
35935 #else
35936 clear_tsk_thread_flag(idle, TIF_FORK);
35937 #endif
35938- per_cpu(kernel_stack, cpu) =
35939- (unsigned long)task_stack_page(idle) -
35940- KERNEL_STACK_OFFSET + THREAD_SIZE;
35941+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35942
35943 xen_setup_runstate_info(cpu);
35944 xen_setup_timer(cpu);
35945@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35946
35947 void __init xen_smp_init(void)
35948 {
35949- smp_ops = xen_smp_ops;
35950+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35951 xen_fill_possible_map();
35952 }
35953
35954diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35955index fd92a64..1f72641 100644
35956--- a/arch/x86/xen/xen-asm_32.S
35957+++ b/arch/x86/xen/xen-asm_32.S
35958@@ -99,7 +99,7 @@ ENTRY(xen_iret)
35959 pushw %fs
35960 movl $(__KERNEL_PERCPU), %eax
35961 movl %eax, %fs
35962- movl %fs:xen_vcpu, %eax
35963+ mov PER_CPU_VAR(xen_vcpu), %eax
35964 POP_FS
35965 #else
35966 movl %ss:xen_vcpu, %eax
35967diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35968index 674b2225..f1f5dc1 100644
35969--- a/arch/x86/xen/xen-head.S
35970+++ b/arch/x86/xen/xen-head.S
35971@@ -39,6 +39,17 @@ ENTRY(startup_xen)
35972 #ifdef CONFIG_X86_32
35973 mov %esi,xen_start_info
35974 mov $init_thread_union+THREAD_SIZE,%esp
35975+#ifdef CONFIG_SMP
35976+ movl $cpu_gdt_table,%edi
35977+ movl $__per_cpu_load,%eax
35978+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35979+ rorl $16,%eax
35980+ movb %al,__KERNEL_PERCPU + 4(%edi)
35981+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35982+ movl $__per_cpu_end - 1,%eax
35983+ subl $__per_cpu_start,%eax
35984+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35985+#endif
35986 #else
35987 mov %rsi,xen_start_info
35988 mov $init_thread_union+THREAD_SIZE,%rsp
35989diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35990index 9e195c6..523ed36 100644
35991--- a/arch/x86/xen/xen-ops.h
35992+++ b/arch/x86/xen/xen-ops.h
35993@@ -16,8 +16,6 @@ void xen_syscall_target(void);
35994 void xen_syscall32_target(void);
35995 #endif
35996
35997-extern void *xen_initial_gdt;
35998-
35999 struct trap_info;
36000 void xen_copy_trap_info(struct trap_info *traps);
36001
36002diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36003index 525bd3d..ef888b1 100644
36004--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36005+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36006@@ -119,9 +119,9 @@
36007 ----------------------------------------------------------------------*/
36008
36009 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36010-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36011 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36012 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36013+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36014
36015 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36016 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36017diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36018index 2f33760..835e50a 100644
36019--- a/arch/xtensa/variants/fsf/include/variant/core.h
36020+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36021@@ -11,6 +11,7 @@
36022 #ifndef _XTENSA_CORE_H
36023 #define _XTENSA_CORE_H
36024
36025+#include <linux/const.h>
36026
36027 /****************************************************************************
36028 Parameters Useful for Any Code, USER or PRIVILEGED
36029@@ -112,9 +113,9 @@
36030 ----------------------------------------------------------------------*/
36031
36032 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36033-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36034 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36035 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36036+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36037
36038 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36039 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36040diff --git a/block/bio.c b/block/bio.c
36041index f66a4ea..73ddf55 100644
36042--- a/block/bio.c
36043+++ b/block/bio.c
36044@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36045 /*
36046 * Overflow, abort
36047 */
36048- if (end < start)
36049+ if (end < start || end - start > INT_MAX - nr_pages)
36050 return ERR_PTR(-EINVAL);
36051
36052 nr_pages += end - start;
36053@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36054 /*
36055 * Overflow, abort
36056 */
36057- if (end < start)
36058+ if (end < start || end - start > INT_MAX - nr_pages)
36059 return ERR_PTR(-EINVAL);
36060
36061 nr_pages += end - start;
36062diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36063index 0736729..2ec3b48 100644
36064--- a/block/blk-iopoll.c
36065+++ b/block/blk-iopoll.c
36066@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36067 }
36068 EXPORT_SYMBOL(blk_iopoll_complete);
36069
36070-static void blk_iopoll_softirq(struct softirq_action *h)
36071+static __latent_entropy void blk_iopoll_softirq(void)
36072 {
36073 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36074 int rearm = 0, budget = blk_iopoll_budget;
36075diff --git a/block/blk-map.c b/block/blk-map.c
36076index b8d2725..08c52b0 100644
36077--- a/block/blk-map.c
36078+++ b/block/blk-map.c
36079@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36080 if (!len || !kbuf)
36081 return -EINVAL;
36082
36083- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36084+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36085 if (do_copy)
36086 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36087 else
36088diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36089index 53b1737..08177d2e 100644
36090--- a/block/blk-softirq.c
36091+++ b/block/blk-softirq.c
36092@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36093 * Softirq action handler - move entries to local list and loop over them
36094 * while passing them to the queue registered handler.
36095 */
36096-static void blk_done_softirq(struct softirq_action *h)
36097+static __latent_entropy void blk_done_softirq(void)
36098 {
36099 struct list_head *cpu_list, local_list;
36100
36101diff --git a/block/bsg.c b/block/bsg.c
36102index d214e92..9649863 100644
36103--- a/block/bsg.c
36104+++ b/block/bsg.c
36105@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36106 struct sg_io_v4 *hdr, struct bsg_device *bd,
36107 fmode_t has_write_perm)
36108 {
36109+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36110+ unsigned char *cmdptr;
36111+
36112 if (hdr->request_len > BLK_MAX_CDB) {
36113 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36114 if (!rq->cmd)
36115 return -ENOMEM;
36116- }
36117+ cmdptr = rq->cmd;
36118+ } else
36119+ cmdptr = tmpcmd;
36120
36121- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36122+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36123 hdr->request_len))
36124 return -EFAULT;
36125
36126+ if (cmdptr != rq->cmd)
36127+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36128+
36129 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36130 if (blk_verify_command(rq->cmd, has_write_perm))
36131 return -EPERM;
36132diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36133index f678c73..f35aa18 100644
36134--- a/block/compat_ioctl.c
36135+++ b/block/compat_ioctl.c
36136@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36137 cgc = compat_alloc_user_space(sizeof(*cgc));
36138 cgc32 = compat_ptr(arg);
36139
36140- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36141+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36142 get_user(data, &cgc32->buffer) ||
36143 put_user(compat_ptr(data), &cgc->buffer) ||
36144 copy_in_user(&cgc->buflen, &cgc32->buflen,
36145@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36146 err |= __get_user(f->spec1, &uf->spec1);
36147 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36148 err |= __get_user(name, &uf->name);
36149- f->name = compat_ptr(name);
36150+ f->name = (void __force_kernel *)compat_ptr(name);
36151 if (err) {
36152 err = -EFAULT;
36153 goto out;
36154diff --git a/block/genhd.c b/block/genhd.c
36155index 0a536dc..b8f7aca 100644
36156--- a/block/genhd.c
36157+++ b/block/genhd.c
36158@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36159
36160 /*
36161 * Register device numbers dev..(dev+range-1)
36162- * range must be nonzero
36163+ * Noop if @range is zero.
36164 * The hash chain is sorted on range, so that subranges can override.
36165 */
36166 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36167 struct kobject *(*probe)(dev_t, int *, void *),
36168 int (*lock)(dev_t, void *), void *data)
36169 {
36170- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36171+ if (range)
36172+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36173 }
36174
36175 EXPORT_SYMBOL(blk_register_region);
36176
36177+/* undo blk_register_region(), noop if @range is zero */
36178 void blk_unregister_region(dev_t devt, unsigned long range)
36179 {
36180- kobj_unmap(bdev_map, devt, range);
36181+ if (range)
36182+ kobj_unmap(bdev_map, devt, range);
36183 }
36184
36185 EXPORT_SYMBOL(blk_unregister_region);
36186diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36187index 26cb624..a49c3a5 100644
36188--- a/block/partitions/efi.c
36189+++ b/block/partitions/efi.c
36190@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36191 if (!gpt)
36192 return NULL;
36193
36194+ if (!le32_to_cpu(gpt->num_partition_entries))
36195+ return NULL;
36196+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36197+ if (!pte)
36198+ return NULL;
36199+
36200 count = le32_to_cpu(gpt->num_partition_entries) *
36201 le32_to_cpu(gpt->sizeof_partition_entry);
36202- if (!count)
36203- return NULL;
36204- pte = kmalloc(count, GFP_KERNEL);
36205- if (!pte)
36206- return NULL;
36207-
36208 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36209 (u8 *) pte, count) < count) {
36210 kfree(pte);
36211diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36212index e1f71c3..02d295a 100644
36213--- a/block/scsi_ioctl.c
36214+++ b/block/scsi_ioctl.c
36215@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36216 return put_user(0, p);
36217 }
36218
36219-static int sg_get_timeout(struct request_queue *q)
36220+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36221 {
36222 return jiffies_to_clock_t(q->sg_timeout);
36223 }
36224@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36225 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36226 struct sg_io_hdr *hdr, fmode_t mode)
36227 {
36228- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36229+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36230+ unsigned char *cmdptr;
36231+
36232+ if (rq->cmd != rq->__cmd)
36233+ cmdptr = rq->cmd;
36234+ else
36235+ cmdptr = tmpcmd;
36236+
36237+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36238 return -EFAULT;
36239+
36240+ if (cmdptr != rq->cmd)
36241+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36242+
36243 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36244 return -EPERM;
36245
36246@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36247 int err;
36248 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36249 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36250+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36251+ unsigned char *cmdptr;
36252
36253 if (!sic)
36254 return -EINVAL;
36255@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36256 */
36257 err = -EFAULT;
36258 rq->cmd_len = cmdlen;
36259- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36260+
36261+ if (rq->cmd != rq->__cmd)
36262+ cmdptr = rq->cmd;
36263+ else
36264+ cmdptr = tmpcmd;
36265+
36266+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36267 goto error;
36268
36269+ if (rq->cmd != cmdptr)
36270+ memcpy(rq->cmd, cmdptr, cmdlen);
36271+
36272 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36273 goto error;
36274
36275diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36276index 650afac1..f3307de 100644
36277--- a/crypto/cryptd.c
36278+++ b/crypto/cryptd.c
36279@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36280
36281 struct cryptd_blkcipher_request_ctx {
36282 crypto_completion_t complete;
36283-};
36284+} __no_const;
36285
36286 struct cryptd_hash_ctx {
36287 struct crypto_shash *child;
36288@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36289
36290 struct cryptd_aead_request_ctx {
36291 crypto_completion_t complete;
36292-};
36293+} __no_const;
36294
36295 static void cryptd_queue_worker(struct work_struct *work);
36296
36297diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36298index c305d41..a96de79 100644
36299--- a/crypto/pcrypt.c
36300+++ b/crypto/pcrypt.c
36301@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36302 int ret;
36303
36304 pinst->kobj.kset = pcrypt_kset;
36305- ret = kobject_add(&pinst->kobj, NULL, name);
36306+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36307 if (!ret)
36308 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36309
36310diff --git a/crypto/zlib.c b/crypto/zlib.c
36311index 0eefa9d..0fa3d29 100644
36312--- a/crypto/zlib.c
36313+++ b/crypto/zlib.c
36314@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36315 zlib_comp_exit(ctx);
36316
36317 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36318- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36319+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36320 : MAX_WBITS;
36321 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36322- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36323+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36324 : DEF_MEM_LEVEL;
36325
36326 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36327diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36328index 3b37676..898edfa 100644
36329--- a/drivers/acpi/acpica/hwxfsleep.c
36330+++ b/drivers/acpi/acpica/hwxfsleep.c
36331@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36332 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36333
36334 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36335- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36336- acpi_hw_extended_sleep},
36337- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36338- acpi_hw_extended_wake_prep},
36339- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36340+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36341+ .extended_function = acpi_hw_extended_sleep},
36342+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36343+ .extended_function = acpi_hw_extended_wake_prep},
36344+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36345+ .extended_function = acpi_hw_extended_wake}
36346 };
36347
36348 /*
36349diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36350index 16129c7..8b675cd 100644
36351--- a/drivers/acpi/apei/apei-internal.h
36352+++ b/drivers/acpi/apei/apei-internal.h
36353@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36354 struct apei_exec_ins_type {
36355 u32 flags;
36356 apei_exec_ins_func_t run;
36357-};
36358+} __do_const;
36359
36360 struct apei_exec_context {
36361 u32 ip;
36362diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36363index e82d097..0c855c1 100644
36364--- a/drivers/acpi/apei/ghes.c
36365+++ b/drivers/acpi/apei/ghes.c
36366@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36367 const struct acpi_hest_generic *generic,
36368 const struct acpi_hest_generic_status *estatus)
36369 {
36370- static atomic_t seqno;
36371+ static atomic_unchecked_t seqno;
36372 unsigned int curr_seqno;
36373 char pfx_seq[64];
36374
36375@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36376 else
36377 pfx = KERN_ERR;
36378 }
36379- curr_seqno = atomic_inc_return(&seqno);
36380+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36381 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36382 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36383 pfx_seq, generic->header.source_id);
36384diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36385index a83e3c6..c3d617f 100644
36386--- a/drivers/acpi/bgrt.c
36387+++ b/drivers/acpi/bgrt.c
36388@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36389 if (!bgrt_image)
36390 return -ENODEV;
36391
36392- bin_attr_image.private = bgrt_image;
36393- bin_attr_image.size = bgrt_image_size;
36394+ pax_open_kernel();
36395+ *(void **)&bin_attr_image.private = bgrt_image;
36396+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36397+ pax_close_kernel();
36398
36399 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36400 if (!bgrt_kobj)
36401diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36402index 9b693d5..8953d54 100644
36403--- a/drivers/acpi/blacklist.c
36404+++ b/drivers/acpi/blacklist.c
36405@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36406 u32 is_critical_error;
36407 };
36408
36409-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36410+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36411
36412 /*
36413 * POLICY: If *anything* doesn't work, put it on the blacklist.
36414@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36415 return 0;
36416 }
36417
36418-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36419+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36420 {
36421 .callback = dmi_disable_osi_vista,
36422 .ident = "Fujitsu Siemens",
36423diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36424index 8b67bd0..b59593e 100644
36425--- a/drivers/acpi/bus.c
36426+++ b/drivers/acpi/bus.c
36427@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36428 }
36429 #endif
36430
36431-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36432+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36433 /*
36434 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36435 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36436@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36437 {}
36438 };
36439 #else
36440-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36441+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36442 {}
36443 };
36444 #endif
36445diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36446index c68e724..e863008 100644
36447--- a/drivers/acpi/custom_method.c
36448+++ b/drivers/acpi/custom_method.c
36449@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36450 struct acpi_table_header table;
36451 acpi_status status;
36452
36453+#ifdef CONFIG_GRKERNSEC_KMEM
36454+ return -EPERM;
36455+#endif
36456+
36457 if (!(*ppos)) {
36458 /* parse the table header to get the table length */
36459 if (count <= sizeof(struct acpi_table_header))
36460diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36461index 735db11..91e07ff 100644
36462--- a/drivers/acpi/device_pm.c
36463+++ b/drivers/acpi/device_pm.c
36464@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36465
36466 #endif /* CONFIG_PM_SLEEP */
36467
36468+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36469+
36470 static struct dev_pm_domain acpi_general_pm_domain = {
36471 .ops = {
36472 .runtime_suspend = acpi_subsys_runtime_suspend,
36473@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36474 .restore_early = acpi_subsys_resume_early,
36475 #endif
36476 },
36477+ .detach = acpi_dev_pm_detach
36478 };
36479
36480 /**
36481@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36482 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36483 }
36484
36485- dev->pm_domain->detach = acpi_dev_pm_detach;
36486 return 0;
36487 }
36488 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36489diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36490index a8dd2f7..e15950e 100644
36491--- a/drivers/acpi/ec.c
36492+++ b/drivers/acpi/ec.c
36493@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36494 return 0;
36495 }
36496
36497-static struct dmi_system_id ec_dmi_table[] __initdata = {
36498+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36499 {
36500 ec_skip_dsdt_scan, "Compal JFL92", {
36501 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36502diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36503index 139d9e4..9a9d799 100644
36504--- a/drivers/acpi/pci_slot.c
36505+++ b/drivers/acpi/pci_slot.c
36506@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36507 return 0;
36508 }
36509
36510-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36511+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36512 /*
36513 * Fujitsu Primequest machines will return 1023 to indicate an
36514 * error if the _SUN method is evaluated on SxFy objects that
36515diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36516index d9f7158..168e742 100644
36517--- a/drivers/acpi/processor_driver.c
36518+++ b/drivers/acpi/processor_driver.c
36519@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36520 return NOTIFY_OK;
36521 }
36522
36523-static struct notifier_block __refdata acpi_cpu_notifier = {
36524+static struct notifier_block __refconst acpi_cpu_notifier = {
36525 .notifier_call = acpi_cpu_soft_notify,
36526 };
36527
36528diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36529index f98db0b..8309c83 100644
36530--- a/drivers/acpi/processor_idle.c
36531+++ b/drivers/acpi/processor_idle.c
36532@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36533 {
36534 int i, count = CPUIDLE_DRIVER_STATE_START;
36535 struct acpi_processor_cx *cx;
36536- struct cpuidle_state *state;
36537+ cpuidle_state_no_const *state;
36538 struct cpuidle_driver *drv = &acpi_idle_driver;
36539
36540 if (!pr->flags.power_setup_done)
36541diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36542index e5dd808..1eceed1 100644
36543--- a/drivers/acpi/processor_pdc.c
36544+++ b/drivers/acpi/processor_pdc.c
36545@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36546 return 0;
36547 }
36548
36549-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36550+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36551 {
36552 set_no_mwait, "Extensa 5220", {
36553 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36554diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36555index 7f251dd..47b262c 100644
36556--- a/drivers/acpi/sleep.c
36557+++ b/drivers/acpi/sleep.c
36558@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36559 return 0;
36560 }
36561
36562-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36563+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36564 {
36565 .callback = init_old_suspend_ordering,
36566 .ident = "Abit KN9 (nForce4 variant)",
36567diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36568index 13e577c..cef11ee 100644
36569--- a/drivers/acpi/sysfs.c
36570+++ b/drivers/acpi/sysfs.c
36571@@ -423,11 +423,11 @@ static u32 num_counters;
36572 static struct attribute **all_attrs;
36573 static u32 acpi_gpe_count;
36574
36575-static struct attribute_group interrupt_stats_attr_group = {
36576+static attribute_group_no_const interrupt_stats_attr_group = {
36577 .name = "interrupts",
36578 };
36579
36580-static struct kobj_attribute *counter_attrs;
36581+static kobj_attribute_no_const *counter_attrs;
36582
36583 static void delete_gpe_attr_array(void)
36584 {
36585diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36586index d24fa19..782f1e6 100644
36587--- a/drivers/acpi/thermal.c
36588+++ b/drivers/acpi/thermal.c
36589@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36590 return 0;
36591 }
36592
36593-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36594+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36595 /*
36596 * Award BIOS on this AOpen makes thermal control almost worthless.
36597 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36598diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36599index 26eb70c..4d66ddf 100644
36600--- a/drivers/acpi/video.c
36601+++ b/drivers/acpi/video.c
36602@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36603 return 0;
36604 }
36605
36606-static struct dmi_system_id video_dmi_table[] __initdata = {
36607+static const struct dmi_system_id video_dmi_table[] __initconst = {
36608 /*
36609 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36610 */
36611diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36612index 61a9c07..ea98fa1 100644
36613--- a/drivers/ata/libahci.c
36614+++ b/drivers/ata/libahci.c
36615@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36616 }
36617 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36618
36619-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36620+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36621 struct ata_taskfile *tf, int is_cmd, u16 flags,
36622 unsigned long timeout_msec)
36623 {
36624diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36625index 23dac3b..89ada44 100644
36626--- a/drivers/ata/libata-core.c
36627+++ b/drivers/ata/libata-core.c
36628@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36629 static void ata_dev_xfermask(struct ata_device *dev);
36630 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36631
36632-atomic_t ata_print_id = ATOMIC_INIT(0);
36633+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36634
36635 struct ata_force_param {
36636 const char *name;
36637@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36638 struct ata_port *ap;
36639 unsigned int tag;
36640
36641- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36642+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36643 ap = qc->ap;
36644
36645 qc->flags = 0;
36646@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36647 struct ata_port *ap;
36648 struct ata_link *link;
36649
36650- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36651+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36652 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36653 ap = qc->ap;
36654 link = qc->dev->link;
36655@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36656 return;
36657
36658 spin_lock(&lock);
36659+ pax_open_kernel();
36660
36661 for (cur = ops->inherits; cur; cur = cur->inherits) {
36662 void **inherit = (void **)cur;
36663@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36664 if (IS_ERR(*pp))
36665 *pp = NULL;
36666
36667- ops->inherits = NULL;
36668+ *(struct ata_port_operations **)&ops->inherits = NULL;
36669
36670+ pax_close_kernel();
36671 spin_unlock(&lock);
36672 }
36673
36674@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36675
36676 /* give ports names and add SCSI hosts */
36677 for (i = 0; i < host->n_ports; i++) {
36678- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36679+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36680 host->ports[i]->local_port_no = i + 1;
36681 }
36682
36683diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36684index b061ba2..fdcd85f 100644
36685--- a/drivers/ata/libata-scsi.c
36686+++ b/drivers/ata/libata-scsi.c
36687@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36688
36689 if (rc)
36690 return rc;
36691- ap->print_id = atomic_inc_return(&ata_print_id);
36692+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36693 return 0;
36694 }
36695 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36696diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36697index f840ca1..edd6ef3 100644
36698--- a/drivers/ata/libata.h
36699+++ b/drivers/ata/libata.h
36700@@ -53,7 +53,7 @@ enum {
36701 ATA_DNXFER_QUIET = (1 << 31),
36702 };
36703
36704-extern atomic_t ata_print_id;
36705+extern atomic_unchecked_t ata_print_id;
36706 extern int atapi_passthru16;
36707 extern int libata_fua;
36708 extern int libata_noacpi;
36709diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36710index a9b0c82..207d97d 100644
36711--- a/drivers/ata/pata_arasan_cf.c
36712+++ b/drivers/ata/pata_arasan_cf.c
36713@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36714 /* Handle platform specific quirks */
36715 if (quirk) {
36716 if (quirk & CF_BROKEN_PIO) {
36717- ap->ops->set_piomode = NULL;
36718+ pax_open_kernel();
36719+ *(void **)&ap->ops->set_piomode = NULL;
36720+ pax_close_kernel();
36721 ap->pio_mask = 0;
36722 }
36723 if (quirk & CF_BROKEN_MWDMA)
36724diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36725index f9b983a..887b9d8 100644
36726--- a/drivers/atm/adummy.c
36727+++ b/drivers/atm/adummy.c
36728@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36729 vcc->pop(vcc, skb);
36730 else
36731 dev_kfree_skb_any(skb);
36732- atomic_inc(&vcc->stats->tx);
36733+ atomic_inc_unchecked(&vcc->stats->tx);
36734
36735 return 0;
36736 }
36737diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36738index f1a9198..f466a4a 100644
36739--- a/drivers/atm/ambassador.c
36740+++ b/drivers/atm/ambassador.c
36741@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36742 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36743
36744 // VC layer stats
36745- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36746+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36747
36748 // free the descriptor
36749 kfree (tx_descr);
36750@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36751 dump_skb ("<<<", vc, skb);
36752
36753 // VC layer stats
36754- atomic_inc(&atm_vcc->stats->rx);
36755+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36756 __net_timestamp(skb);
36757 // end of our responsibility
36758 atm_vcc->push (atm_vcc, skb);
36759@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36760 } else {
36761 PRINTK (KERN_INFO, "dropped over-size frame");
36762 // should we count this?
36763- atomic_inc(&atm_vcc->stats->rx_drop);
36764+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36765 }
36766
36767 } else {
36768@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36769 }
36770
36771 if (check_area (skb->data, skb->len)) {
36772- atomic_inc(&atm_vcc->stats->tx_err);
36773+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36774 return -ENOMEM; // ?
36775 }
36776
36777diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36778index 480fa6f..947067c 100644
36779--- a/drivers/atm/atmtcp.c
36780+++ b/drivers/atm/atmtcp.c
36781@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36782 if (vcc->pop) vcc->pop(vcc,skb);
36783 else dev_kfree_skb(skb);
36784 if (dev_data) return 0;
36785- atomic_inc(&vcc->stats->tx_err);
36786+ atomic_inc_unchecked(&vcc->stats->tx_err);
36787 return -ENOLINK;
36788 }
36789 size = skb->len+sizeof(struct atmtcp_hdr);
36790@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36791 if (!new_skb) {
36792 if (vcc->pop) vcc->pop(vcc,skb);
36793 else dev_kfree_skb(skb);
36794- atomic_inc(&vcc->stats->tx_err);
36795+ atomic_inc_unchecked(&vcc->stats->tx_err);
36796 return -ENOBUFS;
36797 }
36798 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36799@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36800 if (vcc->pop) vcc->pop(vcc,skb);
36801 else dev_kfree_skb(skb);
36802 out_vcc->push(out_vcc,new_skb);
36803- atomic_inc(&vcc->stats->tx);
36804- atomic_inc(&out_vcc->stats->rx);
36805+ atomic_inc_unchecked(&vcc->stats->tx);
36806+ atomic_inc_unchecked(&out_vcc->stats->rx);
36807 return 0;
36808 }
36809
36810@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36811 read_unlock(&vcc_sklist_lock);
36812 if (!out_vcc) {
36813 result = -EUNATCH;
36814- atomic_inc(&vcc->stats->tx_err);
36815+ atomic_inc_unchecked(&vcc->stats->tx_err);
36816 goto done;
36817 }
36818 skb_pull(skb,sizeof(struct atmtcp_hdr));
36819@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36820 __net_timestamp(new_skb);
36821 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36822 out_vcc->push(out_vcc,new_skb);
36823- atomic_inc(&vcc->stats->tx);
36824- atomic_inc(&out_vcc->stats->rx);
36825+ atomic_inc_unchecked(&vcc->stats->tx);
36826+ atomic_inc_unchecked(&out_vcc->stats->rx);
36827 done:
36828 if (vcc->pop) vcc->pop(vcc,skb);
36829 else dev_kfree_skb(skb);
36830diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36831index 6339efd..2b441d5 100644
36832--- a/drivers/atm/eni.c
36833+++ b/drivers/atm/eni.c
36834@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36835 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36836 vcc->dev->number);
36837 length = 0;
36838- atomic_inc(&vcc->stats->rx_err);
36839+ atomic_inc_unchecked(&vcc->stats->rx_err);
36840 }
36841 else {
36842 length = ATM_CELL_SIZE-1; /* no HEC */
36843@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36844 size);
36845 }
36846 eff = length = 0;
36847- atomic_inc(&vcc->stats->rx_err);
36848+ atomic_inc_unchecked(&vcc->stats->rx_err);
36849 }
36850 else {
36851 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36852@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36853 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36854 vcc->dev->number,vcc->vci,length,size << 2,descr);
36855 length = eff = 0;
36856- atomic_inc(&vcc->stats->rx_err);
36857+ atomic_inc_unchecked(&vcc->stats->rx_err);
36858 }
36859 }
36860 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36861@@ -770,7 +770,7 @@ rx_dequeued++;
36862 vcc->push(vcc,skb);
36863 pushed++;
36864 }
36865- atomic_inc(&vcc->stats->rx);
36866+ atomic_inc_unchecked(&vcc->stats->rx);
36867 }
36868 wake_up(&eni_dev->rx_wait);
36869 }
36870@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36871 DMA_TO_DEVICE);
36872 if (vcc->pop) vcc->pop(vcc,skb);
36873 else dev_kfree_skb_irq(skb);
36874- atomic_inc(&vcc->stats->tx);
36875+ atomic_inc_unchecked(&vcc->stats->tx);
36876 wake_up(&eni_dev->tx_wait);
36877 dma_complete++;
36878 }
36879diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36880index 82f2ae0..f205c02 100644
36881--- a/drivers/atm/firestream.c
36882+++ b/drivers/atm/firestream.c
36883@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36884 }
36885 }
36886
36887- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36888+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36889
36890 fs_dprintk (FS_DEBUG_TXMEM, "i");
36891 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36892@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36893 #endif
36894 skb_put (skb, qe->p1 & 0xffff);
36895 ATM_SKB(skb)->vcc = atm_vcc;
36896- atomic_inc(&atm_vcc->stats->rx);
36897+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36898 __net_timestamp(skb);
36899 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36900 atm_vcc->push (atm_vcc, skb);
36901@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36902 kfree (pe);
36903 }
36904 if (atm_vcc)
36905- atomic_inc(&atm_vcc->stats->rx_drop);
36906+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36907 break;
36908 case 0x1f: /* Reassembly abort: no buffers. */
36909 /* Silently increment error counter. */
36910 if (atm_vcc)
36911- atomic_inc(&atm_vcc->stats->rx_drop);
36912+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36913 break;
36914 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36915 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36916diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36917index 75dde90..4309ead 100644
36918--- a/drivers/atm/fore200e.c
36919+++ b/drivers/atm/fore200e.c
36920@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36921 #endif
36922 /* check error condition */
36923 if (*entry->status & STATUS_ERROR)
36924- atomic_inc(&vcc->stats->tx_err);
36925+ atomic_inc_unchecked(&vcc->stats->tx_err);
36926 else
36927- atomic_inc(&vcc->stats->tx);
36928+ atomic_inc_unchecked(&vcc->stats->tx);
36929 }
36930 }
36931
36932@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36933 if (skb == NULL) {
36934 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36935
36936- atomic_inc(&vcc->stats->rx_drop);
36937+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36938 return -ENOMEM;
36939 }
36940
36941@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36942
36943 dev_kfree_skb_any(skb);
36944
36945- atomic_inc(&vcc->stats->rx_drop);
36946+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36947 return -ENOMEM;
36948 }
36949
36950 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36951
36952 vcc->push(vcc, skb);
36953- atomic_inc(&vcc->stats->rx);
36954+ atomic_inc_unchecked(&vcc->stats->rx);
36955
36956 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36957
36958@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36959 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36960 fore200e->atm_dev->number,
36961 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36962- atomic_inc(&vcc->stats->rx_err);
36963+ atomic_inc_unchecked(&vcc->stats->rx_err);
36964 }
36965 }
36966
36967@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36968 goto retry_here;
36969 }
36970
36971- atomic_inc(&vcc->stats->tx_err);
36972+ atomic_inc_unchecked(&vcc->stats->tx_err);
36973
36974 fore200e->tx_sat++;
36975 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36976diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36977index 93dca2e..c5daa69 100644
36978--- a/drivers/atm/he.c
36979+++ b/drivers/atm/he.c
36980@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36981
36982 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36983 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36984- atomic_inc(&vcc->stats->rx_drop);
36985+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36986 goto return_host_buffers;
36987 }
36988
36989@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36990 RBRQ_LEN_ERR(he_dev->rbrq_head)
36991 ? "LEN_ERR" : "",
36992 vcc->vpi, vcc->vci);
36993- atomic_inc(&vcc->stats->rx_err);
36994+ atomic_inc_unchecked(&vcc->stats->rx_err);
36995 goto return_host_buffers;
36996 }
36997
36998@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36999 vcc->push(vcc, skb);
37000 spin_lock(&he_dev->global_lock);
37001
37002- atomic_inc(&vcc->stats->rx);
37003+ atomic_inc_unchecked(&vcc->stats->rx);
37004
37005 return_host_buffers:
37006 ++pdus_assembled;
37007@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37008 tpd->vcc->pop(tpd->vcc, tpd->skb);
37009 else
37010 dev_kfree_skb_any(tpd->skb);
37011- atomic_inc(&tpd->vcc->stats->tx_err);
37012+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37013 }
37014 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37015 return;
37016@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37017 vcc->pop(vcc, skb);
37018 else
37019 dev_kfree_skb_any(skb);
37020- atomic_inc(&vcc->stats->tx_err);
37021+ atomic_inc_unchecked(&vcc->stats->tx_err);
37022 return -EINVAL;
37023 }
37024
37025@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37026 vcc->pop(vcc, skb);
37027 else
37028 dev_kfree_skb_any(skb);
37029- atomic_inc(&vcc->stats->tx_err);
37030+ atomic_inc_unchecked(&vcc->stats->tx_err);
37031 return -EINVAL;
37032 }
37033 #endif
37034@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37035 vcc->pop(vcc, skb);
37036 else
37037 dev_kfree_skb_any(skb);
37038- atomic_inc(&vcc->stats->tx_err);
37039+ atomic_inc_unchecked(&vcc->stats->tx_err);
37040 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37041 return -ENOMEM;
37042 }
37043@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37044 vcc->pop(vcc, skb);
37045 else
37046 dev_kfree_skb_any(skb);
37047- atomic_inc(&vcc->stats->tx_err);
37048+ atomic_inc_unchecked(&vcc->stats->tx_err);
37049 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37050 return -ENOMEM;
37051 }
37052@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37053 __enqueue_tpd(he_dev, tpd, cid);
37054 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37055
37056- atomic_inc(&vcc->stats->tx);
37057+ atomic_inc_unchecked(&vcc->stats->tx);
37058
37059 return 0;
37060 }
37061diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37062index 527bbd5..96570c8 100644
37063--- a/drivers/atm/horizon.c
37064+++ b/drivers/atm/horizon.c
37065@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37066 {
37067 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37068 // VC layer stats
37069- atomic_inc(&vcc->stats->rx);
37070+ atomic_inc_unchecked(&vcc->stats->rx);
37071 __net_timestamp(skb);
37072 // end of our responsibility
37073 vcc->push (vcc, skb);
37074@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37075 dev->tx_iovec = NULL;
37076
37077 // VC layer stats
37078- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37079+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37080
37081 // free the skb
37082 hrz_kfree_skb (skb);
37083diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37084index 074616b..d6b3d5f 100644
37085--- a/drivers/atm/idt77252.c
37086+++ b/drivers/atm/idt77252.c
37087@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37088 else
37089 dev_kfree_skb(skb);
37090
37091- atomic_inc(&vcc->stats->tx);
37092+ atomic_inc_unchecked(&vcc->stats->tx);
37093 }
37094
37095 atomic_dec(&scq->used);
37096@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37097 if ((sb = dev_alloc_skb(64)) == NULL) {
37098 printk("%s: Can't allocate buffers for aal0.\n",
37099 card->name);
37100- atomic_add(i, &vcc->stats->rx_drop);
37101+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37102 break;
37103 }
37104 if (!atm_charge(vcc, sb->truesize)) {
37105 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37106 card->name);
37107- atomic_add(i - 1, &vcc->stats->rx_drop);
37108+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37109 dev_kfree_skb(sb);
37110 break;
37111 }
37112@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37113 ATM_SKB(sb)->vcc = vcc;
37114 __net_timestamp(sb);
37115 vcc->push(vcc, sb);
37116- atomic_inc(&vcc->stats->rx);
37117+ atomic_inc_unchecked(&vcc->stats->rx);
37118
37119 cell += ATM_CELL_PAYLOAD;
37120 }
37121@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37122 "(CDC: %08x)\n",
37123 card->name, len, rpp->len, readl(SAR_REG_CDC));
37124 recycle_rx_pool_skb(card, rpp);
37125- atomic_inc(&vcc->stats->rx_err);
37126+ atomic_inc_unchecked(&vcc->stats->rx_err);
37127 return;
37128 }
37129 if (stat & SAR_RSQE_CRC) {
37130 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37131 recycle_rx_pool_skb(card, rpp);
37132- atomic_inc(&vcc->stats->rx_err);
37133+ atomic_inc_unchecked(&vcc->stats->rx_err);
37134 return;
37135 }
37136 if (skb_queue_len(&rpp->queue) > 1) {
37137@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37138 RXPRINTK("%s: Can't alloc RX skb.\n",
37139 card->name);
37140 recycle_rx_pool_skb(card, rpp);
37141- atomic_inc(&vcc->stats->rx_err);
37142+ atomic_inc_unchecked(&vcc->stats->rx_err);
37143 return;
37144 }
37145 if (!atm_charge(vcc, skb->truesize)) {
37146@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37147 __net_timestamp(skb);
37148
37149 vcc->push(vcc, skb);
37150- atomic_inc(&vcc->stats->rx);
37151+ atomic_inc_unchecked(&vcc->stats->rx);
37152
37153 return;
37154 }
37155@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37156 __net_timestamp(skb);
37157
37158 vcc->push(vcc, skb);
37159- atomic_inc(&vcc->stats->rx);
37160+ atomic_inc_unchecked(&vcc->stats->rx);
37161
37162 if (skb->truesize > SAR_FB_SIZE_3)
37163 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37164@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37165 if (vcc->qos.aal != ATM_AAL0) {
37166 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37167 card->name, vpi, vci);
37168- atomic_inc(&vcc->stats->rx_drop);
37169+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37170 goto drop;
37171 }
37172
37173 if ((sb = dev_alloc_skb(64)) == NULL) {
37174 printk("%s: Can't allocate buffers for AAL0.\n",
37175 card->name);
37176- atomic_inc(&vcc->stats->rx_err);
37177+ atomic_inc_unchecked(&vcc->stats->rx_err);
37178 goto drop;
37179 }
37180
37181@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37182 ATM_SKB(sb)->vcc = vcc;
37183 __net_timestamp(sb);
37184 vcc->push(vcc, sb);
37185- atomic_inc(&vcc->stats->rx);
37186+ atomic_inc_unchecked(&vcc->stats->rx);
37187
37188 drop:
37189 skb_pull(queue, 64);
37190@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37191
37192 if (vc == NULL) {
37193 printk("%s: NULL connection in send().\n", card->name);
37194- atomic_inc(&vcc->stats->tx_err);
37195+ atomic_inc_unchecked(&vcc->stats->tx_err);
37196 dev_kfree_skb(skb);
37197 return -EINVAL;
37198 }
37199 if (!test_bit(VCF_TX, &vc->flags)) {
37200 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37201- atomic_inc(&vcc->stats->tx_err);
37202+ atomic_inc_unchecked(&vcc->stats->tx_err);
37203 dev_kfree_skb(skb);
37204 return -EINVAL;
37205 }
37206@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37207 break;
37208 default:
37209 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37210- atomic_inc(&vcc->stats->tx_err);
37211+ atomic_inc_unchecked(&vcc->stats->tx_err);
37212 dev_kfree_skb(skb);
37213 return -EINVAL;
37214 }
37215
37216 if (skb_shinfo(skb)->nr_frags != 0) {
37217 printk("%s: No scatter-gather yet.\n", card->name);
37218- atomic_inc(&vcc->stats->tx_err);
37219+ atomic_inc_unchecked(&vcc->stats->tx_err);
37220 dev_kfree_skb(skb);
37221 return -EINVAL;
37222 }
37223@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37224
37225 err = queue_skb(card, vc, skb, oam);
37226 if (err) {
37227- atomic_inc(&vcc->stats->tx_err);
37228+ atomic_inc_unchecked(&vcc->stats->tx_err);
37229 dev_kfree_skb(skb);
37230 return err;
37231 }
37232@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37233 skb = dev_alloc_skb(64);
37234 if (!skb) {
37235 printk("%s: Out of memory in send_oam().\n", card->name);
37236- atomic_inc(&vcc->stats->tx_err);
37237+ atomic_inc_unchecked(&vcc->stats->tx_err);
37238 return -ENOMEM;
37239 }
37240 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37241diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37242index 924f8e2..3375a3e 100644
37243--- a/drivers/atm/iphase.c
37244+++ b/drivers/atm/iphase.c
37245@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37246 status = (u_short) (buf_desc_ptr->desc_mode);
37247 if (status & (RX_CER | RX_PTE | RX_OFL))
37248 {
37249- atomic_inc(&vcc->stats->rx_err);
37250+ atomic_inc_unchecked(&vcc->stats->rx_err);
37251 IF_ERR(printk("IA: bad packet, dropping it");)
37252 if (status & RX_CER) {
37253 IF_ERR(printk(" cause: packet CRC error\n");)
37254@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37255 len = dma_addr - buf_addr;
37256 if (len > iadev->rx_buf_sz) {
37257 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37258- atomic_inc(&vcc->stats->rx_err);
37259+ atomic_inc_unchecked(&vcc->stats->rx_err);
37260 goto out_free_desc;
37261 }
37262
37263@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37264 ia_vcc = INPH_IA_VCC(vcc);
37265 if (ia_vcc == NULL)
37266 {
37267- atomic_inc(&vcc->stats->rx_err);
37268+ atomic_inc_unchecked(&vcc->stats->rx_err);
37269 atm_return(vcc, skb->truesize);
37270 dev_kfree_skb_any(skb);
37271 goto INCR_DLE;
37272@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37273 if ((length > iadev->rx_buf_sz) || (length >
37274 (skb->len - sizeof(struct cpcs_trailer))))
37275 {
37276- atomic_inc(&vcc->stats->rx_err);
37277+ atomic_inc_unchecked(&vcc->stats->rx_err);
37278 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37279 length, skb->len);)
37280 atm_return(vcc, skb->truesize);
37281@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37282
37283 IF_RX(printk("rx_dle_intr: skb push");)
37284 vcc->push(vcc,skb);
37285- atomic_inc(&vcc->stats->rx);
37286+ atomic_inc_unchecked(&vcc->stats->rx);
37287 iadev->rx_pkt_cnt++;
37288 }
37289 INCR_DLE:
37290@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37291 {
37292 struct k_sonet_stats *stats;
37293 stats = &PRIV(_ia_dev[board])->sonet_stats;
37294- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37295- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37296- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37297- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37298- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37299- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37300- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37301- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37302- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37303+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37304+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37305+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37306+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37307+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37308+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37309+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37310+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37311+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37312 }
37313 ia_cmds.status = 0;
37314 break;
37315@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37316 if ((desc == 0) || (desc > iadev->num_tx_desc))
37317 {
37318 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37319- atomic_inc(&vcc->stats->tx);
37320+ atomic_inc_unchecked(&vcc->stats->tx);
37321 if (vcc->pop)
37322 vcc->pop(vcc, skb);
37323 else
37324@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37325 ATM_DESC(skb) = vcc->vci;
37326 skb_queue_tail(&iadev->tx_dma_q, skb);
37327
37328- atomic_inc(&vcc->stats->tx);
37329+ atomic_inc_unchecked(&vcc->stats->tx);
37330 iadev->tx_pkt_cnt++;
37331 /* Increment transaction counter */
37332 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37333
37334 #if 0
37335 /* add flow control logic */
37336- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37337+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37338 if (iavcc->vc_desc_cnt > 10) {
37339 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37340 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37341diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37342index ce43ae3..969de38 100644
37343--- a/drivers/atm/lanai.c
37344+++ b/drivers/atm/lanai.c
37345@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37346 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37347 lanai_endtx(lanai, lvcc);
37348 lanai_free_skb(lvcc->tx.atmvcc, skb);
37349- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37350+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37351 }
37352
37353 /* Try to fill the buffer - don't call unless there is backlog */
37354@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37355 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37356 __net_timestamp(skb);
37357 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37358- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37359+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37360 out:
37361 lvcc->rx.buf.ptr = end;
37362 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37363@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37364 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37365 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37366 lanai->stats.service_rxnotaal5++;
37367- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37368+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37369 return 0;
37370 }
37371 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37372@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37373 int bytes;
37374 read_unlock(&vcc_sklist_lock);
37375 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37376- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37377+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37378 lvcc->stats.x.aal5.service_trash++;
37379 bytes = (SERVICE_GET_END(s) * 16) -
37380 (((unsigned long) lvcc->rx.buf.ptr) -
37381@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37382 }
37383 if (s & SERVICE_STREAM) {
37384 read_unlock(&vcc_sklist_lock);
37385- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37386+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37387 lvcc->stats.x.aal5.service_stream++;
37388 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37389 "PDU on VCI %d!\n", lanai->number, vci);
37390@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37391 return 0;
37392 }
37393 DPRINTK("got rx crc error on vci %d\n", vci);
37394- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37395+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37396 lvcc->stats.x.aal5.service_rxcrc++;
37397 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37398 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37399diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37400index b7e1cc0..eb336bfe 100644
37401--- a/drivers/atm/nicstar.c
37402+++ b/drivers/atm/nicstar.c
37403@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37404 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37405 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37406 card->index);
37407- atomic_inc(&vcc->stats->tx_err);
37408+ atomic_inc_unchecked(&vcc->stats->tx_err);
37409 dev_kfree_skb_any(skb);
37410 return -EINVAL;
37411 }
37412@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37413 if (!vc->tx) {
37414 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37415 card->index);
37416- atomic_inc(&vcc->stats->tx_err);
37417+ atomic_inc_unchecked(&vcc->stats->tx_err);
37418 dev_kfree_skb_any(skb);
37419 return -EINVAL;
37420 }
37421@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37422 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37423 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37424 card->index);
37425- atomic_inc(&vcc->stats->tx_err);
37426+ atomic_inc_unchecked(&vcc->stats->tx_err);
37427 dev_kfree_skb_any(skb);
37428 return -EINVAL;
37429 }
37430
37431 if (skb_shinfo(skb)->nr_frags != 0) {
37432 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37433- atomic_inc(&vcc->stats->tx_err);
37434+ atomic_inc_unchecked(&vcc->stats->tx_err);
37435 dev_kfree_skb_any(skb);
37436 return -EINVAL;
37437 }
37438@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37439 }
37440
37441 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37442- atomic_inc(&vcc->stats->tx_err);
37443+ atomic_inc_unchecked(&vcc->stats->tx_err);
37444 dev_kfree_skb_any(skb);
37445 return -EIO;
37446 }
37447- atomic_inc(&vcc->stats->tx);
37448+ atomic_inc_unchecked(&vcc->stats->tx);
37449
37450 return 0;
37451 }
37452@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37453 printk
37454 ("nicstar%d: Can't allocate buffers for aal0.\n",
37455 card->index);
37456- atomic_add(i, &vcc->stats->rx_drop);
37457+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37458 break;
37459 }
37460 if (!atm_charge(vcc, sb->truesize)) {
37461 RXPRINTK
37462 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37463 card->index);
37464- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37465+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37466 dev_kfree_skb_any(sb);
37467 break;
37468 }
37469@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37470 ATM_SKB(sb)->vcc = vcc;
37471 __net_timestamp(sb);
37472 vcc->push(vcc, sb);
37473- atomic_inc(&vcc->stats->rx);
37474+ atomic_inc_unchecked(&vcc->stats->rx);
37475 cell += ATM_CELL_PAYLOAD;
37476 }
37477
37478@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37479 if (iovb == NULL) {
37480 printk("nicstar%d: Out of iovec buffers.\n",
37481 card->index);
37482- atomic_inc(&vcc->stats->rx_drop);
37483+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37484 recycle_rx_buf(card, skb);
37485 return;
37486 }
37487@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37488 small or large buffer itself. */
37489 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37490 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37491- atomic_inc(&vcc->stats->rx_err);
37492+ atomic_inc_unchecked(&vcc->stats->rx_err);
37493 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37494 NS_MAX_IOVECS);
37495 NS_PRV_IOVCNT(iovb) = 0;
37496@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37497 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37498 card->index);
37499 which_list(card, skb);
37500- atomic_inc(&vcc->stats->rx_err);
37501+ atomic_inc_unchecked(&vcc->stats->rx_err);
37502 recycle_rx_buf(card, skb);
37503 vc->rx_iov = NULL;
37504 recycle_iov_buf(card, iovb);
37505@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37506 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37507 card->index);
37508 which_list(card, skb);
37509- atomic_inc(&vcc->stats->rx_err);
37510+ atomic_inc_unchecked(&vcc->stats->rx_err);
37511 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37512 NS_PRV_IOVCNT(iovb));
37513 vc->rx_iov = NULL;
37514@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37515 printk(" - PDU size mismatch.\n");
37516 else
37517 printk(".\n");
37518- atomic_inc(&vcc->stats->rx_err);
37519+ atomic_inc_unchecked(&vcc->stats->rx_err);
37520 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37521 NS_PRV_IOVCNT(iovb));
37522 vc->rx_iov = NULL;
37523@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37524 /* skb points to a small buffer */
37525 if (!atm_charge(vcc, skb->truesize)) {
37526 push_rxbufs(card, skb);
37527- atomic_inc(&vcc->stats->rx_drop);
37528+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37529 } else {
37530 skb_put(skb, len);
37531 dequeue_sm_buf(card, skb);
37532@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37533 ATM_SKB(skb)->vcc = vcc;
37534 __net_timestamp(skb);
37535 vcc->push(vcc, skb);
37536- atomic_inc(&vcc->stats->rx);
37537+ atomic_inc_unchecked(&vcc->stats->rx);
37538 }
37539 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37540 struct sk_buff *sb;
37541@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37542 if (len <= NS_SMBUFSIZE) {
37543 if (!atm_charge(vcc, sb->truesize)) {
37544 push_rxbufs(card, sb);
37545- atomic_inc(&vcc->stats->rx_drop);
37546+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37547 } else {
37548 skb_put(sb, len);
37549 dequeue_sm_buf(card, sb);
37550@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37551 ATM_SKB(sb)->vcc = vcc;
37552 __net_timestamp(sb);
37553 vcc->push(vcc, sb);
37554- atomic_inc(&vcc->stats->rx);
37555+ atomic_inc_unchecked(&vcc->stats->rx);
37556 }
37557
37558 push_rxbufs(card, skb);
37559@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37560
37561 if (!atm_charge(vcc, skb->truesize)) {
37562 push_rxbufs(card, skb);
37563- atomic_inc(&vcc->stats->rx_drop);
37564+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37565 } else {
37566 dequeue_lg_buf(card, skb);
37567 #ifdef NS_USE_DESTRUCTORS
37568@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37569 ATM_SKB(skb)->vcc = vcc;
37570 __net_timestamp(skb);
37571 vcc->push(vcc, skb);
37572- atomic_inc(&vcc->stats->rx);
37573+ atomic_inc_unchecked(&vcc->stats->rx);
37574 }
37575
37576 push_rxbufs(card, sb);
37577@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37578 printk
37579 ("nicstar%d: Out of huge buffers.\n",
37580 card->index);
37581- atomic_inc(&vcc->stats->rx_drop);
37582+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37583 recycle_iovec_rx_bufs(card,
37584 (struct iovec *)
37585 iovb->data,
37586@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37587 card->hbpool.count++;
37588 } else
37589 dev_kfree_skb_any(hb);
37590- atomic_inc(&vcc->stats->rx_drop);
37591+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37592 } else {
37593 /* Copy the small buffer to the huge buffer */
37594 sb = (struct sk_buff *)iov->iov_base;
37595@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37596 #endif /* NS_USE_DESTRUCTORS */
37597 __net_timestamp(hb);
37598 vcc->push(vcc, hb);
37599- atomic_inc(&vcc->stats->rx);
37600+ atomic_inc_unchecked(&vcc->stats->rx);
37601 }
37602 }
37603
37604diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37605index 74e18b0..f16afa0 100644
37606--- a/drivers/atm/solos-pci.c
37607+++ b/drivers/atm/solos-pci.c
37608@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37609 }
37610 atm_charge(vcc, skb->truesize);
37611 vcc->push(vcc, skb);
37612- atomic_inc(&vcc->stats->rx);
37613+ atomic_inc_unchecked(&vcc->stats->rx);
37614 break;
37615
37616 case PKT_STATUS:
37617@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37618 vcc = SKB_CB(oldskb)->vcc;
37619
37620 if (vcc) {
37621- atomic_inc(&vcc->stats->tx);
37622+ atomic_inc_unchecked(&vcc->stats->tx);
37623 solos_pop(vcc, oldskb);
37624 } else {
37625 dev_kfree_skb_irq(oldskb);
37626diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37627index 0215934..ce9f5b1 100644
37628--- a/drivers/atm/suni.c
37629+++ b/drivers/atm/suni.c
37630@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37631
37632
37633 #define ADD_LIMITED(s,v) \
37634- atomic_add((v),&stats->s); \
37635- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37636+ atomic_add_unchecked((v),&stats->s); \
37637+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37638
37639
37640 static void suni_hz(unsigned long from_timer)
37641diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37642index 5120a96..e2572bd 100644
37643--- a/drivers/atm/uPD98402.c
37644+++ b/drivers/atm/uPD98402.c
37645@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37646 struct sonet_stats tmp;
37647 int error = 0;
37648
37649- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37650+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37651 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37652 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37653 if (zero && !error) {
37654@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37655
37656
37657 #define ADD_LIMITED(s,v) \
37658- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37659- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37660- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37661+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37662+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37663+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37664
37665
37666 static void stat_event(struct atm_dev *dev)
37667@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37668 if (reason & uPD98402_INT_PFM) stat_event(dev);
37669 if (reason & uPD98402_INT_PCO) {
37670 (void) GET(PCOCR); /* clear interrupt cause */
37671- atomic_add(GET(HECCT),
37672+ atomic_add_unchecked(GET(HECCT),
37673 &PRIV(dev)->sonet_stats.uncorr_hcs);
37674 }
37675 if ((reason & uPD98402_INT_RFO) &&
37676@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37677 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37678 uPD98402_INT_LOS),PIMR); /* enable them */
37679 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37680- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37681- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37682- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37683+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37684+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37685+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37686 return 0;
37687 }
37688
37689diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37690index cecfb94..87009ec 100644
37691--- a/drivers/atm/zatm.c
37692+++ b/drivers/atm/zatm.c
37693@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37694 }
37695 if (!size) {
37696 dev_kfree_skb_irq(skb);
37697- if (vcc) atomic_inc(&vcc->stats->rx_err);
37698+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37699 continue;
37700 }
37701 if (!atm_charge(vcc,skb->truesize)) {
37702@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37703 skb->len = size;
37704 ATM_SKB(skb)->vcc = vcc;
37705 vcc->push(vcc,skb);
37706- atomic_inc(&vcc->stats->rx);
37707+ atomic_inc_unchecked(&vcc->stats->rx);
37708 }
37709 zout(pos & 0xffff,MTA(mbx));
37710 #if 0 /* probably a stupid idea */
37711@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37712 skb_queue_head(&zatm_vcc->backlog,skb);
37713 break;
37714 }
37715- atomic_inc(&vcc->stats->tx);
37716+ atomic_inc_unchecked(&vcc->stats->tx);
37717 wake_up(&zatm_vcc->tx_wait);
37718 }
37719
37720diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37721index 876bae5..8978785 100644
37722--- a/drivers/base/bus.c
37723+++ b/drivers/base/bus.c
37724@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37725 return -EINVAL;
37726
37727 mutex_lock(&subsys->p->mutex);
37728- list_add_tail(&sif->node, &subsys->p->interfaces);
37729+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37730 if (sif->add_dev) {
37731 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37732 while ((dev = subsys_dev_iter_next(&iter)))
37733@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37734 subsys = sif->subsys;
37735
37736 mutex_lock(&subsys->p->mutex);
37737- list_del_init(&sif->node);
37738+ pax_list_del_init((struct list_head *)&sif->node);
37739 if (sif->remove_dev) {
37740 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37741 while ((dev = subsys_dev_iter_next(&iter)))
37742diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37743index 25798db..15f130e 100644
37744--- a/drivers/base/devtmpfs.c
37745+++ b/drivers/base/devtmpfs.c
37746@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37747 if (!thread)
37748 return 0;
37749
37750- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37751+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37752 if (err)
37753 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37754 else
37755@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37756 *err = sys_unshare(CLONE_NEWNS);
37757 if (*err)
37758 goto out;
37759- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37760+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37761 if (*err)
37762 goto out;
37763- sys_chdir("/.."); /* will traverse into overmounted root */
37764- sys_chroot(".");
37765+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37766+ sys_chroot((char __force_user *)".");
37767 complete(&setup_done);
37768 while (1) {
37769 spin_lock(&req_lock);
37770diff --git a/drivers/base/node.c b/drivers/base/node.c
37771index 36fabe43..8cfc112 100644
37772--- a/drivers/base/node.c
37773+++ b/drivers/base/node.c
37774@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37775 struct node_attr {
37776 struct device_attribute attr;
37777 enum node_states state;
37778-};
37779+} __do_const;
37780
37781 static ssize_t show_node_state(struct device *dev,
37782 struct device_attribute *attr, char *buf)
37783diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37784index 45937f8..b9a342e 100644
37785--- a/drivers/base/power/domain.c
37786+++ b/drivers/base/power/domain.c
37787@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37788 {
37789 struct cpuidle_driver *cpuidle_drv;
37790 struct gpd_cpuidle_data *cpuidle_data;
37791- struct cpuidle_state *idle_state;
37792+ cpuidle_state_no_const *idle_state;
37793 int ret = 0;
37794
37795 if (IS_ERR_OR_NULL(genpd) || state < 0)
37796@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37797 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37798 {
37799 struct gpd_cpuidle_data *cpuidle_data;
37800- struct cpuidle_state *idle_state;
37801+ cpuidle_state_no_const *idle_state;
37802 int ret = 0;
37803
37804 if (IS_ERR_OR_NULL(genpd))
37805@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37806 return ret;
37807 }
37808
37809- dev->pm_domain->detach = genpd_dev_pm_detach;
37810+ pax_open_kernel();
37811+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37812+ pax_close_kernel();
37813+
37814 pm_genpd_poweron(pd);
37815
37816 return 0;
37817diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37818index d2be3f9..0a3167a 100644
37819--- a/drivers/base/power/sysfs.c
37820+++ b/drivers/base/power/sysfs.c
37821@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37822 return -EIO;
37823 }
37824 }
37825- return sprintf(buf, p);
37826+ return sprintf(buf, "%s", p);
37827 }
37828
37829 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37830diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37831index aab7158..b172db2 100644
37832--- a/drivers/base/power/wakeup.c
37833+++ b/drivers/base/power/wakeup.c
37834@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37835 * They need to be modified together atomically, so it's better to use one
37836 * atomic variable to hold them both.
37837 */
37838-static atomic_t combined_event_count = ATOMIC_INIT(0);
37839+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37840
37841 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37842 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37843
37844 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37845 {
37846- unsigned int comb = atomic_read(&combined_event_count);
37847+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37848
37849 *cnt = (comb >> IN_PROGRESS_BITS);
37850 *inpr = comb & MAX_IN_PROGRESS;
37851@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37852 ws->start_prevent_time = ws->last_time;
37853
37854 /* Increment the counter of events in progress. */
37855- cec = atomic_inc_return(&combined_event_count);
37856+ cec = atomic_inc_return_unchecked(&combined_event_count);
37857
37858 trace_wakeup_source_activate(ws->name, cec);
37859 }
37860@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37861 * Increment the counter of registered wakeup events and decrement the
37862 * couter of wakeup events in progress simultaneously.
37863 */
37864- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37865+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37866 trace_wakeup_source_deactivate(ws->name, cec);
37867
37868 split_counters(&cnt, &inpr);
37869diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37870index 8d98a32..61d3165 100644
37871--- a/drivers/base/syscore.c
37872+++ b/drivers/base/syscore.c
37873@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37874 void register_syscore_ops(struct syscore_ops *ops)
37875 {
37876 mutex_lock(&syscore_ops_lock);
37877- list_add_tail(&ops->node, &syscore_ops_list);
37878+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37879 mutex_unlock(&syscore_ops_lock);
37880 }
37881 EXPORT_SYMBOL_GPL(register_syscore_ops);
37882@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37883 void unregister_syscore_ops(struct syscore_ops *ops)
37884 {
37885 mutex_lock(&syscore_ops_lock);
37886- list_del(&ops->node);
37887+ pax_list_del((struct list_head *)&ops->node);
37888 mutex_unlock(&syscore_ops_lock);
37889 }
37890 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37891diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37892index ff20f19..018f1da 100644
37893--- a/drivers/block/cciss.c
37894+++ b/drivers/block/cciss.c
37895@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37896 while (!list_empty(&h->reqQ)) {
37897 c = list_entry(h->reqQ.next, CommandList_struct, list);
37898 /* can't do anything if fifo is full */
37899- if ((h->access.fifo_full(h))) {
37900+ if ((h->access->fifo_full(h))) {
37901 dev_warn(&h->pdev->dev, "fifo full\n");
37902 break;
37903 }
37904@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37905 h->Qdepth--;
37906
37907 /* Tell the controller execute command */
37908- h->access.submit_command(h, c);
37909+ h->access->submit_command(h, c);
37910
37911 /* Put job onto the completed Q */
37912 addQ(&h->cmpQ, c);
37913@@ -3444,17 +3444,17 @@ startio:
37914
37915 static inline unsigned long get_next_completion(ctlr_info_t *h)
37916 {
37917- return h->access.command_completed(h);
37918+ return h->access->command_completed(h);
37919 }
37920
37921 static inline int interrupt_pending(ctlr_info_t *h)
37922 {
37923- return h->access.intr_pending(h);
37924+ return h->access->intr_pending(h);
37925 }
37926
37927 static inline long interrupt_not_for_us(ctlr_info_t *h)
37928 {
37929- return ((h->access.intr_pending(h) == 0) ||
37930+ return ((h->access->intr_pending(h) == 0) ||
37931 (h->interrupts_enabled == 0));
37932 }
37933
37934@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37935 u32 a;
37936
37937 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37938- return h->access.command_completed(h);
37939+ return h->access->command_completed(h);
37940
37941 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37942 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37943@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37944 trans_support & CFGTBL_Trans_use_short_tags);
37945
37946 /* Change the access methods to the performant access methods */
37947- h->access = SA5_performant_access;
37948+ h->access = &SA5_performant_access;
37949 h->transMethod = CFGTBL_Trans_Performant;
37950
37951 return;
37952@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37953 if (prod_index < 0)
37954 return -ENODEV;
37955 h->product_name = products[prod_index].product_name;
37956- h->access = *(products[prod_index].access);
37957+ h->access = products[prod_index].access;
37958
37959 if (cciss_board_disabled(h)) {
37960 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37961@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37962 }
37963
37964 /* make sure the board interrupts are off */
37965- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37966+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37967 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37968 if (rc)
37969 goto clean2;
37970@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37971 * fake ones to scoop up any residual completions.
37972 */
37973 spin_lock_irqsave(&h->lock, flags);
37974- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37975+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37976 spin_unlock_irqrestore(&h->lock, flags);
37977 free_irq(h->intr[h->intr_mode], h);
37978 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37979@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37980 dev_info(&h->pdev->dev, "Board READY.\n");
37981 dev_info(&h->pdev->dev,
37982 "Waiting for stale completions to drain.\n");
37983- h->access.set_intr_mask(h, CCISS_INTR_ON);
37984+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37985 msleep(10000);
37986- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37987+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37988
37989 rc = controller_reset_failed(h->cfgtable);
37990 if (rc)
37991@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37992 cciss_scsi_setup(h);
37993
37994 /* Turn the interrupts on so we can service requests */
37995- h->access.set_intr_mask(h, CCISS_INTR_ON);
37996+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37997
37998 /* Get the firmware version */
37999 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38000@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38001 kfree(flush_buf);
38002 if (return_code != IO_OK)
38003 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38004- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38005+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38006 free_irq(h->intr[h->intr_mode], h);
38007 }
38008
38009diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38010index 7fda30e..2f27946 100644
38011--- a/drivers/block/cciss.h
38012+++ b/drivers/block/cciss.h
38013@@ -101,7 +101,7 @@ struct ctlr_info
38014 /* information about each logical volume */
38015 drive_info_struct *drv[CISS_MAX_LUN];
38016
38017- struct access_method access;
38018+ struct access_method *access;
38019
38020 /* queue and queue Info */
38021 struct list_head reqQ;
38022@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38023 }
38024
38025 static struct access_method SA5_access = {
38026- SA5_submit_command,
38027- SA5_intr_mask,
38028- SA5_fifo_full,
38029- SA5_intr_pending,
38030- SA5_completed,
38031+ .submit_command = SA5_submit_command,
38032+ .set_intr_mask = SA5_intr_mask,
38033+ .fifo_full = SA5_fifo_full,
38034+ .intr_pending = SA5_intr_pending,
38035+ .command_completed = SA5_completed,
38036 };
38037
38038 static struct access_method SA5B_access = {
38039- SA5_submit_command,
38040- SA5B_intr_mask,
38041- SA5_fifo_full,
38042- SA5B_intr_pending,
38043- SA5_completed,
38044+ .submit_command = SA5_submit_command,
38045+ .set_intr_mask = SA5B_intr_mask,
38046+ .fifo_full = SA5_fifo_full,
38047+ .intr_pending = SA5B_intr_pending,
38048+ .command_completed = SA5_completed,
38049 };
38050
38051 static struct access_method SA5_performant_access = {
38052- SA5_submit_command,
38053- SA5_performant_intr_mask,
38054- SA5_fifo_full,
38055- SA5_performant_intr_pending,
38056- SA5_performant_completed,
38057+ .submit_command = SA5_submit_command,
38058+ .set_intr_mask = SA5_performant_intr_mask,
38059+ .fifo_full = SA5_fifo_full,
38060+ .intr_pending = SA5_performant_intr_pending,
38061+ .command_completed = SA5_performant_completed,
38062 };
38063
38064 struct board_type {
38065diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38066index 2b94403..fd6ad1f 100644
38067--- a/drivers/block/cpqarray.c
38068+++ b/drivers/block/cpqarray.c
38069@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38070 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38071 goto Enomem4;
38072 }
38073- hba[i]->access.set_intr_mask(hba[i], 0);
38074+ hba[i]->access->set_intr_mask(hba[i], 0);
38075 if (request_irq(hba[i]->intr, do_ida_intr,
38076 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38077 {
38078@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38079 add_timer(&hba[i]->timer);
38080
38081 /* Enable IRQ now that spinlock and rate limit timer are set up */
38082- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38083+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38084
38085 for(j=0; j<NWD; j++) {
38086 struct gendisk *disk = ida_gendisk[i][j];
38087@@ -694,7 +694,7 @@ DBGINFO(
38088 for(i=0; i<NR_PRODUCTS; i++) {
38089 if (board_id == products[i].board_id) {
38090 c->product_name = products[i].product_name;
38091- c->access = *(products[i].access);
38092+ c->access = products[i].access;
38093 break;
38094 }
38095 }
38096@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38097 hba[ctlr]->intr = intr;
38098 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38099 hba[ctlr]->product_name = products[j].product_name;
38100- hba[ctlr]->access = *(products[j].access);
38101+ hba[ctlr]->access = products[j].access;
38102 hba[ctlr]->ctlr = ctlr;
38103 hba[ctlr]->board_id = board_id;
38104 hba[ctlr]->pci_dev = NULL; /* not PCI */
38105@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38106
38107 while((c = h->reqQ) != NULL) {
38108 /* Can't do anything if we're busy */
38109- if (h->access.fifo_full(h) == 0)
38110+ if (h->access->fifo_full(h) == 0)
38111 return;
38112
38113 /* Get the first entry from the request Q */
38114@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38115 h->Qdepth--;
38116
38117 /* Tell the controller to do our bidding */
38118- h->access.submit_command(h, c);
38119+ h->access->submit_command(h, c);
38120
38121 /* Get onto the completion Q */
38122 addQ(&h->cmpQ, c);
38123@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38124 unsigned long flags;
38125 __u32 a,a1;
38126
38127- istat = h->access.intr_pending(h);
38128+ istat = h->access->intr_pending(h);
38129 /* Is this interrupt for us? */
38130 if (istat == 0)
38131 return IRQ_NONE;
38132@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38133 */
38134 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38135 if (istat & FIFO_NOT_EMPTY) {
38136- while((a = h->access.command_completed(h))) {
38137+ while((a = h->access->command_completed(h))) {
38138 a1 = a; a &= ~3;
38139 if ((c = h->cmpQ) == NULL)
38140 {
38141@@ -1448,11 +1448,11 @@ static int sendcmd(
38142 /*
38143 * Disable interrupt
38144 */
38145- info_p->access.set_intr_mask(info_p, 0);
38146+ info_p->access->set_intr_mask(info_p, 0);
38147 /* Make sure there is room in the command FIFO */
38148 /* Actually it should be completely empty at this time. */
38149 for (i = 200000; i > 0; i--) {
38150- temp = info_p->access.fifo_full(info_p);
38151+ temp = info_p->access->fifo_full(info_p);
38152 if (temp != 0) {
38153 break;
38154 }
38155@@ -1465,7 +1465,7 @@ DBG(
38156 /*
38157 * Send the cmd
38158 */
38159- info_p->access.submit_command(info_p, c);
38160+ info_p->access->submit_command(info_p, c);
38161 complete = pollcomplete(ctlr);
38162
38163 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38164@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38165 * we check the new geometry. Then turn interrupts back on when
38166 * we're done.
38167 */
38168- host->access.set_intr_mask(host, 0);
38169+ host->access->set_intr_mask(host, 0);
38170 getgeometry(ctlr);
38171- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38172+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38173
38174 for(i=0; i<NWD; i++) {
38175 struct gendisk *disk = ida_gendisk[ctlr][i];
38176@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38177 /* Wait (up to 2 seconds) for a command to complete */
38178
38179 for (i = 200000; i > 0; i--) {
38180- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38181+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38182 if (done == 0) {
38183 udelay(10); /* a short fixed delay */
38184 } else
38185diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38186index be73e9d..7fbf140 100644
38187--- a/drivers/block/cpqarray.h
38188+++ b/drivers/block/cpqarray.h
38189@@ -99,7 +99,7 @@ struct ctlr_info {
38190 drv_info_t drv[NWD];
38191 struct proc_dir_entry *proc;
38192
38193- struct access_method access;
38194+ struct access_method *access;
38195
38196 cmdlist_t *reqQ;
38197 cmdlist_t *cmpQ;
38198diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38199index 434c77d..6d3219a 100644
38200--- a/drivers/block/drbd/drbd_bitmap.c
38201+++ b/drivers/block/drbd/drbd_bitmap.c
38202@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38203 submit_bio(rw, bio);
38204 /* this should not count as user activity and cause the
38205 * resync to throttle -- see drbd_rs_should_slow_down(). */
38206- atomic_add(len >> 9, &device->rs_sect_ev);
38207+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38208 }
38209 }
38210
38211diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38212index b905e98..0812ed8 100644
38213--- a/drivers/block/drbd/drbd_int.h
38214+++ b/drivers/block/drbd/drbd_int.h
38215@@ -385,7 +385,7 @@ struct drbd_epoch {
38216 struct drbd_connection *connection;
38217 struct list_head list;
38218 unsigned int barrier_nr;
38219- atomic_t epoch_size; /* increased on every request added. */
38220+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38221 atomic_t active; /* increased on every req. added, and dec on every finished. */
38222 unsigned long flags;
38223 };
38224@@ -946,7 +946,7 @@ struct drbd_device {
38225 unsigned int al_tr_number;
38226 int al_tr_cycle;
38227 wait_queue_head_t seq_wait;
38228- atomic_t packet_seq;
38229+ atomic_unchecked_t packet_seq;
38230 unsigned int peer_seq;
38231 spinlock_t peer_seq_lock;
38232 unsigned long comm_bm_set; /* communicated number of set bits. */
38233@@ -955,8 +955,8 @@ struct drbd_device {
38234 struct mutex own_state_mutex;
38235 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38236 char congestion_reason; /* Why we where congested... */
38237- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38238- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38239+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38240+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38241 int rs_last_sect_ev; /* counter to compare with */
38242 int rs_last_events; /* counter of read or write "events" (unit sectors)
38243 * on the lower level device when we last looked. */
38244diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38245index 1fc8342..7e7742b 100644
38246--- a/drivers/block/drbd/drbd_main.c
38247+++ b/drivers/block/drbd/drbd_main.c
38248@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38249 p->sector = sector;
38250 p->block_id = block_id;
38251 p->blksize = blksize;
38252- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38253+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38254 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38255 }
38256
38257@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38258 return -EIO;
38259 p->sector = cpu_to_be64(req->i.sector);
38260 p->block_id = (unsigned long)req;
38261- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38262+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38263 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38264 if (device->state.conn >= C_SYNC_SOURCE &&
38265 device->state.conn <= C_PAUSED_SYNC_T)
38266@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38267 atomic_set(&device->unacked_cnt, 0);
38268 atomic_set(&device->local_cnt, 0);
38269 atomic_set(&device->pp_in_use_by_net, 0);
38270- atomic_set(&device->rs_sect_in, 0);
38271- atomic_set(&device->rs_sect_ev, 0);
38272+ atomic_set_unchecked(&device->rs_sect_in, 0);
38273+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38274 atomic_set(&device->ap_in_flight, 0);
38275 atomic_set(&device->md_io.in_use, 0);
38276
38277@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38278 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38279 struct drbd_resource *resource = connection->resource;
38280
38281- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38282- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38283+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38284+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38285 kfree(connection->current_epoch);
38286
38287 idr_destroy(&connection->peer_devices);
38288diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38289index 74df8cf..e41fc24 100644
38290--- a/drivers/block/drbd/drbd_nl.c
38291+++ b/drivers/block/drbd/drbd_nl.c
38292@@ -3637,13 +3637,13 @@ finish:
38293
38294 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38295 {
38296- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38297+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38298 struct sk_buff *msg;
38299 struct drbd_genlmsghdr *d_out;
38300 unsigned seq;
38301 int err = -ENOMEM;
38302
38303- seq = atomic_inc_return(&drbd_genl_seq);
38304+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38305 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38306 if (!msg)
38307 goto failed;
38308diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38309index cee2035..22f66bd 100644
38310--- a/drivers/block/drbd/drbd_receiver.c
38311+++ b/drivers/block/drbd/drbd_receiver.c
38312@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38313 struct drbd_device *device = peer_device->device;
38314 int err;
38315
38316- atomic_set(&device->packet_seq, 0);
38317+ atomic_set_unchecked(&device->packet_seq, 0);
38318 device->peer_seq = 0;
38319
38320 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38321@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38322 do {
38323 next_epoch = NULL;
38324
38325- epoch_size = atomic_read(&epoch->epoch_size);
38326+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38327
38328 switch (ev & ~EV_CLEANUP) {
38329 case EV_PUT:
38330@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38331 rv = FE_DESTROYED;
38332 } else {
38333 epoch->flags = 0;
38334- atomic_set(&epoch->epoch_size, 0);
38335+ atomic_set_unchecked(&epoch->epoch_size, 0);
38336 /* atomic_set(&epoch->active, 0); is already zero */
38337 if (rv == FE_STILL_LIVE)
38338 rv = FE_RECYCLED;
38339@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38340 conn_wait_active_ee_empty(connection);
38341 drbd_flush(connection);
38342
38343- if (atomic_read(&connection->current_epoch->epoch_size)) {
38344+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38345 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38346 if (epoch)
38347 break;
38348@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38349 }
38350
38351 epoch->flags = 0;
38352- atomic_set(&epoch->epoch_size, 0);
38353+ atomic_set_unchecked(&epoch->epoch_size, 0);
38354 atomic_set(&epoch->active, 0);
38355
38356 spin_lock(&connection->epoch_lock);
38357- if (atomic_read(&connection->current_epoch->epoch_size)) {
38358+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38359 list_add(&epoch->list, &connection->current_epoch->list);
38360 connection->current_epoch = epoch;
38361 connection->epochs++;
38362@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38363 list_add_tail(&peer_req->w.list, &device->sync_ee);
38364 spin_unlock_irq(&device->resource->req_lock);
38365
38366- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38367+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38368 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38369 return 0;
38370
38371@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38372 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38373 }
38374
38375- atomic_add(pi->size >> 9, &device->rs_sect_in);
38376+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38377
38378 return err;
38379 }
38380@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38381
38382 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38383 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38384- atomic_inc(&connection->current_epoch->epoch_size);
38385+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38386 err2 = drbd_drain_block(peer_device, pi->size);
38387 if (!err)
38388 err = err2;
38389@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38390
38391 spin_lock(&connection->epoch_lock);
38392 peer_req->epoch = connection->current_epoch;
38393- atomic_inc(&peer_req->epoch->epoch_size);
38394+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38395 atomic_inc(&peer_req->epoch->active);
38396 spin_unlock(&connection->epoch_lock);
38397
38398@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38399
38400 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38401 (int)part_stat_read(&disk->part0, sectors[1]) -
38402- atomic_read(&device->rs_sect_ev);
38403+ atomic_read_unchecked(&device->rs_sect_ev);
38404
38405 if (atomic_read(&device->ap_actlog_cnt)
38406 || curr_events - device->rs_last_events > 64) {
38407@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38408 device->use_csums = true;
38409 } else if (pi->cmd == P_OV_REPLY) {
38410 /* track progress, we may need to throttle */
38411- atomic_add(size >> 9, &device->rs_sect_in);
38412+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38413 peer_req->w.cb = w_e_end_ov_reply;
38414 dec_rs_pending(device);
38415 /* drbd_rs_begin_io done when we sent this request,
38416@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38417 goto out_free_e;
38418
38419 submit_for_resync:
38420- atomic_add(size >> 9, &device->rs_sect_ev);
38421+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38422
38423 submit:
38424 update_receiver_timing_details(connection, drbd_submit_peer_request);
38425@@ -4564,7 +4564,7 @@ struct data_cmd {
38426 int expect_payload;
38427 size_t pkt_size;
38428 int (*fn)(struct drbd_connection *, struct packet_info *);
38429-};
38430+} __do_const;
38431
38432 static struct data_cmd drbd_cmd_handler[] = {
38433 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38434@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38435 if (!list_empty(&connection->current_epoch->list))
38436 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38437 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38438- atomic_set(&connection->current_epoch->epoch_size, 0);
38439+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38440 connection->send.seen_any_write_yet = false;
38441
38442 drbd_info(connection, "Connection closed\n");
38443@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38444 put_ldev(device);
38445 }
38446 dec_rs_pending(device);
38447- atomic_add(blksize >> 9, &device->rs_sect_in);
38448+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38449
38450 return 0;
38451 }
38452@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38453 struct asender_cmd {
38454 size_t pkt_size;
38455 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38456-};
38457+} __do_const;
38458
38459 static struct asender_cmd asender_tbl[] = {
38460 [P_PING] = { 0, got_Ping },
38461diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38462index d0fae55..4469096 100644
38463--- a/drivers/block/drbd/drbd_worker.c
38464+++ b/drivers/block/drbd/drbd_worker.c
38465@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38466 list_add_tail(&peer_req->w.list, &device->read_ee);
38467 spin_unlock_irq(&device->resource->req_lock);
38468
38469- atomic_add(size >> 9, &device->rs_sect_ev);
38470+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38471 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38472 return 0;
38473
38474@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38475 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38476 int number, mxb;
38477
38478- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38479+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38480 device->rs_in_flight -= sect_in;
38481
38482 rcu_read_lock();
38483@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38484 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38485 struct fifo_buffer *plan;
38486
38487- atomic_set(&device->rs_sect_in, 0);
38488- atomic_set(&device->rs_sect_ev, 0);
38489+ atomic_set_unchecked(&device->rs_sect_in, 0);
38490+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38491 device->rs_in_flight = 0;
38492 device->rs_last_events =
38493 (int)part_stat_read(&disk->part0, sectors[0]) +
38494diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38495index d1f168b..8f8cc52 100644
38496--- a/drivers/block/loop.c
38497+++ b/drivers/block/loop.c
38498@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38499
38500 file_start_write(file);
38501 set_fs(get_ds());
38502- bw = file->f_op->write(file, buf, len, &pos);
38503+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38504 set_fs(old_fs);
38505 file_end_write(file);
38506 if (likely(bw == len))
38507diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38508index 09e628da..7607aaa 100644
38509--- a/drivers/block/pktcdvd.c
38510+++ b/drivers/block/pktcdvd.c
38511@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38512
38513 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38514 {
38515- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38516+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38517 }
38518
38519 /*
38520@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38521 return -EROFS;
38522 }
38523 pd->settings.fp = ti.fp;
38524- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38525+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38526
38527 if (ti.nwa_v) {
38528 pd->nwa = be32_to_cpu(ti.next_writable);
38529diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38530index b40af32..5fa825d 100644
38531--- a/drivers/block/rbd.c
38532+++ b/drivers/block/rbd.c
38533@@ -64,7 +64,7 @@
38534 * If the counter is already at its maximum value returns
38535 * -EINVAL without updating it.
38536 */
38537-static int atomic_inc_return_safe(atomic_t *v)
38538+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38539 {
38540 unsigned int counter;
38541
38542diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38543index e5565fb..71be10b4 100644
38544--- a/drivers/block/smart1,2.h
38545+++ b/drivers/block/smart1,2.h
38546@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38547 }
38548
38549 static struct access_method smart4_access = {
38550- smart4_submit_command,
38551- smart4_intr_mask,
38552- smart4_fifo_full,
38553- smart4_intr_pending,
38554- smart4_completed,
38555+ .submit_command = smart4_submit_command,
38556+ .set_intr_mask = smart4_intr_mask,
38557+ .fifo_full = smart4_fifo_full,
38558+ .intr_pending = smart4_intr_pending,
38559+ .command_completed = smart4_completed,
38560 };
38561
38562 /*
38563@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38564 }
38565
38566 static struct access_method smart2_access = {
38567- smart2_submit_command,
38568- smart2_intr_mask,
38569- smart2_fifo_full,
38570- smart2_intr_pending,
38571- smart2_completed,
38572+ .submit_command = smart2_submit_command,
38573+ .set_intr_mask = smart2_intr_mask,
38574+ .fifo_full = smart2_fifo_full,
38575+ .intr_pending = smart2_intr_pending,
38576+ .command_completed = smart2_completed,
38577 };
38578
38579 /*
38580@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38581 }
38582
38583 static struct access_method smart2e_access = {
38584- smart2e_submit_command,
38585- smart2e_intr_mask,
38586- smart2e_fifo_full,
38587- smart2e_intr_pending,
38588- smart2e_completed,
38589+ .submit_command = smart2e_submit_command,
38590+ .set_intr_mask = smart2e_intr_mask,
38591+ .fifo_full = smart2e_fifo_full,
38592+ .intr_pending = smart2e_intr_pending,
38593+ .command_completed = smart2e_completed,
38594 };
38595
38596 /*
38597@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38598 }
38599
38600 static struct access_method smart1_access = {
38601- smart1_submit_command,
38602- smart1_intr_mask,
38603- smart1_fifo_full,
38604- smart1_intr_pending,
38605- smart1_completed,
38606+ .submit_command = smart1_submit_command,
38607+ .set_intr_mask = smart1_intr_mask,
38608+ .fifo_full = smart1_fifo_full,
38609+ .intr_pending = smart1_intr_pending,
38610+ .command_completed = smart1_completed,
38611 };
38612diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38613index 55c135b..9f8d60c 100644
38614--- a/drivers/bluetooth/btwilink.c
38615+++ b/drivers/bluetooth/btwilink.c
38616@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38617
38618 static int bt_ti_probe(struct platform_device *pdev)
38619 {
38620- static struct ti_st *hst;
38621+ struct ti_st *hst;
38622 struct hci_dev *hdev;
38623 int err;
38624
38625diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38626index 5d28a45..a538f90 100644
38627--- a/drivers/cdrom/cdrom.c
38628+++ b/drivers/cdrom/cdrom.c
38629@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38630 ENSURE(reset, CDC_RESET);
38631 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38632 cdi->mc_flags = 0;
38633- cdo->n_minors = 0;
38634 cdi->options = CDO_USE_FFLAGS;
38635
38636 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38637@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38638 else
38639 cdi->cdda_method = CDDA_OLD;
38640
38641- if (!cdo->generic_packet)
38642- cdo->generic_packet = cdrom_dummy_generic_packet;
38643+ if (!cdo->generic_packet) {
38644+ pax_open_kernel();
38645+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38646+ pax_close_kernel();
38647+ }
38648
38649 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38650 mutex_lock(&cdrom_mutex);
38651@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38652 if (cdi->exit)
38653 cdi->exit(cdi);
38654
38655- cdi->ops->n_minors--;
38656 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38657 }
38658
38659@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38660 */
38661 nr = nframes;
38662 do {
38663- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38664+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38665 if (cgc.buffer)
38666 break;
38667
38668@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38669 struct cdrom_device_info *cdi;
38670 int ret;
38671
38672- ret = scnprintf(info + *pos, max_size - *pos, header);
38673+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38674 if (!ret)
38675 return 1;
38676
38677diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38678index 584bc31..e64a12c 100644
38679--- a/drivers/cdrom/gdrom.c
38680+++ b/drivers/cdrom/gdrom.c
38681@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38682 .audio_ioctl = gdrom_audio_ioctl,
38683 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38684 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38685- .n_minors = 1,
38686 };
38687
38688 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38689diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38690index a4af822..ed58cd1 100644
38691--- a/drivers/char/Kconfig
38692+++ b/drivers/char/Kconfig
38693@@ -17,7 +17,8 @@ config DEVMEM
38694
38695 config DEVKMEM
38696 bool "/dev/kmem virtual device support"
38697- default y
38698+ default n
38699+ depends on !GRKERNSEC_KMEM
38700 help
38701 Say Y here if you want to support the /dev/kmem device. The
38702 /dev/kmem device is rarely used, but can be used for certain
38703@@ -586,6 +587,7 @@ config DEVPORT
38704 bool
38705 depends on !M68K
38706 depends on ISA || PCI
38707+ depends on !GRKERNSEC_KMEM
38708 default y
38709
38710 source "drivers/s390/char/Kconfig"
38711diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38712index a48e05b..6bac831 100644
38713--- a/drivers/char/agp/compat_ioctl.c
38714+++ b/drivers/char/agp/compat_ioctl.c
38715@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38716 return -ENOMEM;
38717 }
38718
38719- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38720+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38721 sizeof(*usegment) * ureserve.seg_count)) {
38722 kfree(usegment);
38723 kfree(ksegment);
38724diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38725index 09f17eb..8531d2f 100644
38726--- a/drivers/char/agp/frontend.c
38727+++ b/drivers/char/agp/frontend.c
38728@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38729 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38730 return -EFAULT;
38731
38732- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38733+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38734 return -EFAULT;
38735
38736 client = agp_find_client_by_pid(reserve.pid);
38737@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38738 if (segment == NULL)
38739 return -ENOMEM;
38740
38741- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38742+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38743 sizeof(struct agp_segment) * reserve.seg_count)) {
38744 kfree(segment);
38745 return -EFAULT;
38746diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38747index 4f94375..413694e 100644
38748--- a/drivers/char/genrtc.c
38749+++ b/drivers/char/genrtc.c
38750@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38751 switch (cmd) {
38752
38753 case RTC_PLL_GET:
38754+ memset(&pll, 0, sizeof(pll));
38755 if (get_rtc_pll(&pll))
38756 return -EINVAL;
38757 else
38758diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38759index 5c0baa9..44011b1 100644
38760--- a/drivers/char/hpet.c
38761+++ b/drivers/char/hpet.c
38762@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38763 }
38764
38765 static int
38766-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38767+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38768 struct hpet_info *info)
38769 {
38770 struct hpet_timer __iomem *timer;
38771diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38772index 24cc4ed..f9807cf 100644
38773--- a/drivers/char/i8k.c
38774+++ b/drivers/char/i8k.c
38775@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38776 },
38777 };
38778
38779-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38780+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38781 {
38782 .ident = "Dell Inspiron",
38783 .matches = {
38784diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38785index 9bb5928..57a7801 100644
38786--- a/drivers/char/ipmi/ipmi_msghandler.c
38787+++ b/drivers/char/ipmi/ipmi_msghandler.c
38788@@ -436,7 +436,7 @@ struct ipmi_smi {
38789 struct proc_dir_entry *proc_dir;
38790 char proc_dir_name[10];
38791
38792- atomic_t stats[IPMI_NUM_STATS];
38793+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38794
38795 /*
38796 * run_to_completion duplicate of smb_info, smi_info
38797@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38798 static DEFINE_MUTEX(smi_watchers_mutex);
38799
38800 #define ipmi_inc_stat(intf, stat) \
38801- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38802+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38803 #define ipmi_get_stat(intf, stat) \
38804- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38805+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38806
38807 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38808 "ACPI", "SMBIOS", "PCI",
38809@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38810 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38811 init_waitqueue_head(&intf->waitq);
38812 for (i = 0; i < IPMI_NUM_STATS; i++)
38813- atomic_set(&intf->stats[i], 0);
38814+ atomic_set_unchecked(&intf->stats[i], 0);
38815
38816 intf->proc_dir = NULL;
38817
38818diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38819index 518585c..6c985cef 100644
38820--- a/drivers/char/ipmi/ipmi_si_intf.c
38821+++ b/drivers/char/ipmi/ipmi_si_intf.c
38822@@ -289,7 +289,7 @@ struct smi_info {
38823 unsigned char slave_addr;
38824
38825 /* Counters and things for the proc filesystem. */
38826- atomic_t stats[SI_NUM_STATS];
38827+ atomic_unchecked_t stats[SI_NUM_STATS];
38828
38829 struct task_struct *thread;
38830
38831@@ -298,9 +298,9 @@ struct smi_info {
38832 };
38833
38834 #define smi_inc_stat(smi, stat) \
38835- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38836+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38837 #define smi_get_stat(smi, stat) \
38838- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38839+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38840
38841 #define SI_MAX_PARMS 4
38842
38843@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38844 atomic_set(&new_smi->req_events, 0);
38845 new_smi->run_to_completion = false;
38846 for (i = 0; i < SI_NUM_STATS; i++)
38847- atomic_set(&new_smi->stats[i], 0);
38848+ atomic_set_unchecked(&new_smi->stats[i], 0);
38849
38850 new_smi->interrupt_disabled = true;
38851 atomic_set(&new_smi->need_watch, 0);
38852diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38853index 297110c..3f69b43 100644
38854--- a/drivers/char/mem.c
38855+++ b/drivers/char/mem.c
38856@@ -18,6 +18,7 @@
38857 #include <linux/raw.h>
38858 #include <linux/tty.h>
38859 #include <linux/capability.h>
38860+#include <linux/security.h>
38861 #include <linux/ptrace.h>
38862 #include <linux/device.h>
38863 #include <linux/highmem.h>
38864@@ -36,6 +37,10 @@
38865
38866 #define DEVPORT_MINOR 4
38867
38868+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38869+extern const struct file_operations grsec_fops;
38870+#endif
38871+
38872 static inline unsigned long size_inside_page(unsigned long start,
38873 unsigned long size)
38874 {
38875@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38876
38877 while (cursor < to) {
38878 if (!devmem_is_allowed(pfn)) {
38879+#ifdef CONFIG_GRKERNSEC_KMEM
38880+ gr_handle_mem_readwrite(from, to);
38881+#else
38882 printk(KERN_INFO
38883 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38884 current->comm, from, to);
38885+#endif
38886 return 0;
38887 }
38888 cursor += PAGE_SIZE;
38889@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38890 }
38891 return 1;
38892 }
38893+#elif defined(CONFIG_GRKERNSEC_KMEM)
38894+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38895+{
38896+ return 0;
38897+}
38898 #else
38899 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38900 {
38901@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38902 #endif
38903
38904 while (count > 0) {
38905- unsigned long remaining;
38906+ unsigned long remaining = 0;
38907+ char *temp;
38908
38909 sz = size_inside_page(p, count);
38910
38911@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38912 if (!ptr)
38913 return -EFAULT;
38914
38915- remaining = copy_to_user(buf, ptr, sz);
38916+#ifdef CONFIG_PAX_USERCOPY
38917+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38918+ if (!temp) {
38919+ unxlate_dev_mem_ptr(p, ptr);
38920+ return -ENOMEM;
38921+ }
38922+ remaining = probe_kernel_read(temp, ptr, sz);
38923+#else
38924+ temp = ptr;
38925+#endif
38926+
38927+ if (!remaining)
38928+ remaining = copy_to_user(buf, temp, sz);
38929+
38930+#ifdef CONFIG_PAX_USERCOPY
38931+ kfree(temp);
38932+#endif
38933+
38934 unxlate_dev_mem_ptr(p, ptr);
38935 if (remaining)
38936 return -EFAULT;
38937@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38938 size_t count, loff_t *ppos)
38939 {
38940 unsigned long p = *ppos;
38941- ssize_t low_count, read, sz;
38942+ ssize_t low_count, read, sz, err = 0;
38943 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38944- int err = 0;
38945
38946 read = 0;
38947 if (p < (unsigned long) high_memory) {
38948@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38949 }
38950 #endif
38951 while (low_count > 0) {
38952+ char *temp;
38953+
38954 sz = size_inside_page(p, low_count);
38955
38956 /*
38957@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38958 */
38959 kbuf = xlate_dev_kmem_ptr((void *)p);
38960
38961- if (copy_to_user(buf, kbuf, sz))
38962+#ifdef CONFIG_PAX_USERCOPY
38963+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38964+ if (!temp)
38965+ return -ENOMEM;
38966+ err = probe_kernel_read(temp, kbuf, sz);
38967+#else
38968+ temp = kbuf;
38969+#endif
38970+
38971+ if (!err)
38972+ err = copy_to_user(buf, temp, sz);
38973+
38974+#ifdef CONFIG_PAX_USERCOPY
38975+ kfree(temp);
38976+#endif
38977+
38978+ if (err)
38979 return -EFAULT;
38980 buf += sz;
38981 p += sz;
38982@@ -804,6 +853,9 @@ static const struct memdev {
38983 #ifdef CONFIG_PRINTK
38984 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
38985 #endif
38986+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38987+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
38988+#endif
38989 };
38990
38991 static int memory_open(struct inode *inode, struct file *filp)
38992@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
38993 continue;
38994
38995 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38996- NULL, devlist[minor].name);
38997+ NULL, "%s", devlist[minor].name);
38998 }
38999
39000 return tty_init();
39001diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39002index 9df78e2..01ba9ae 100644
39003--- a/drivers/char/nvram.c
39004+++ b/drivers/char/nvram.c
39005@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39006
39007 spin_unlock_irq(&rtc_lock);
39008
39009- if (copy_to_user(buf, contents, tmp - contents))
39010+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39011 return -EFAULT;
39012
39013 *ppos = i;
39014diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39015index 0ea9986..e7b07e4 100644
39016--- a/drivers/char/pcmcia/synclink_cs.c
39017+++ b/drivers/char/pcmcia/synclink_cs.c
39018@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39019
39020 if (debug_level >= DEBUG_LEVEL_INFO)
39021 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39022- __FILE__, __LINE__, info->device_name, port->count);
39023+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39024
39025 if (tty_port_close_start(port, tty, filp) == 0)
39026 goto cleanup;
39027@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39028 cleanup:
39029 if (debug_level >= DEBUG_LEVEL_INFO)
39030 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39031- tty->driver->name, port->count);
39032+ tty->driver->name, atomic_read(&port->count));
39033 }
39034
39035 /* Wait until the transmitter is empty.
39036@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39037
39038 if (debug_level >= DEBUG_LEVEL_INFO)
39039 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39040- __FILE__, __LINE__, tty->driver->name, port->count);
39041+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39042
39043 /* If port is closing, signal caller to try again */
39044 if (port->flags & ASYNC_CLOSING){
39045@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39046 goto cleanup;
39047 }
39048 spin_lock(&port->lock);
39049- port->count++;
39050+ atomic_inc(&port->count);
39051 spin_unlock(&port->lock);
39052 spin_unlock_irqrestore(&info->netlock, flags);
39053
39054- if (port->count == 1) {
39055+ if (atomic_read(&port->count) == 1) {
39056 /* 1st open on this device, init hardware */
39057 retval = startup(info, tty);
39058 if (retval < 0)
39059@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39060 unsigned short new_crctype;
39061
39062 /* return error if TTY interface open */
39063- if (info->port.count)
39064+ if (atomic_read(&info->port.count))
39065 return -EBUSY;
39066
39067 switch (encoding)
39068@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39069
39070 /* arbitrate between network and tty opens */
39071 spin_lock_irqsave(&info->netlock, flags);
39072- if (info->port.count != 0 || info->netcount != 0) {
39073+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39074 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39075 spin_unlock_irqrestore(&info->netlock, flags);
39076 return -EBUSY;
39077@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39078 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39079
39080 /* return error if TTY interface open */
39081- if (info->port.count)
39082+ if (atomic_read(&info->port.count))
39083 return -EBUSY;
39084
39085 if (cmd != SIOCWANDEV)
39086diff --git a/drivers/char/random.c b/drivers/char/random.c
39087index 9cd6968..6416f00 100644
39088--- a/drivers/char/random.c
39089+++ b/drivers/char/random.c
39090@@ -289,9 +289,6 @@
39091 /*
39092 * To allow fractional bits to be tracked, the entropy_count field is
39093 * denominated in units of 1/8th bits.
39094- *
39095- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39096- * credit_entropy_bits() needs to be 64 bits wide.
39097 */
39098 #define ENTROPY_SHIFT 3
39099 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39100@@ -439,9 +436,9 @@ struct entropy_store {
39101 };
39102
39103 static void push_to_pool(struct work_struct *work);
39104-static __u32 input_pool_data[INPUT_POOL_WORDS];
39105-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39106-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39107+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39108+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39109+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39110
39111 static struct entropy_store input_pool = {
39112 .poolinfo = &poolinfo_table[0],
39113@@ -635,7 +632,7 @@ retry:
39114 /* The +2 corresponds to the /4 in the denominator */
39115
39116 do {
39117- unsigned int anfrac = min(pnfrac, pool_size/2);
39118+ u64 anfrac = min(pnfrac, pool_size/2);
39119 unsigned int add =
39120 ((pool_size - entropy_count)*anfrac*3) >> s;
39121
39122@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39123
39124 extract_buf(r, tmp);
39125 i = min_t(int, nbytes, EXTRACT_SIZE);
39126- if (copy_to_user(buf, tmp, i)) {
39127+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39128 ret = -EFAULT;
39129 break;
39130 }
39131@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39132 static int proc_do_uuid(struct ctl_table *table, int write,
39133 void __user *buffer, size_t *lenp, loff_t *ppos)
39134 {
39135- struct ctl_table fake_table;
39136+ ctl_table_no_const fake_table;
39137 unsigned char buf[64], tmp_uuid[16], *uuid;
39138
39139 uuid = table->data;
39140@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39141 static int proc_do_entropy(struct ctl_table *table, int write,
39142 void __user *buffer, size_t *lenp, loff_t *ppos)
39143 {
39144- struct ctl_table fake_table;
39145+ ctl_table_no_const fake_table;
39146 int entropy_count;
39147
39148 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39149diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39150index e496dae..3db53b6 100644
39151--- a/drivers/char/sonypi.c
39152+++ b/drivers/char/sonypi.c
39153@@ -54,6 +54,7 @@
39154
39155 #include <asm/uaccess.h>
39156 #include <asm/io.h>
39157+#include <asm/local.h>
39158
39159 #include <linux/sonypi.h>
39160
39161@@ -490,7 +491,7 @@ static struct sonypi_device {
39162 spinlock_t fifo_lock;
39163 wait_queue_head_t fifo_proc_list;
39164 struct fasync_struct *fifo_async;
39165- int open_count;
39166+ local_t open_count;
39167 int model;
39168 struct input_dev *input_jog_dev;
39169 struct input_dev *input_key_dev;
39170@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39171 static int sonypi_misc_release(struct inode *inode, struct file *file)
39172 {
39173 mutex_lock(&sonypi_device.lock);
39174- sonypi_device.open_count--;
39175+ local_dec(&sonypi_device.open_count);
39176 mutex_unlock(&sonypi_device.lock);
39177 return 0;
39178 }
39179@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39180 {
39181 mutex_lock(&sonypi_device.lock);
39182 /* Flush input queue on first open */
39183- if (!sonypi_device.open_count)
39184+ if (!local_read(&sonypi_device.open_count))
39185 kfifo_reset(&sonypi_device.fifo);
39186- sonypi_device.open_count++;
39187+ local_inc(&sonypi_device.open_count);
39188 mutex_unlock(&sonypi_device.lock);
39189
39190 return 0;
39191@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39192
39193 static struct platform_device *sonypi_platform_device;
39194
39195-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39196+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39197 {
39198 .ident = "Sony Vaio",
39199 .matches = {
39200diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39201index 565a947..dcdc06e 100644
39202--- a/drivers/char/tpm/tpm_acpi.c
39203+++ b/drivers/char/tpm/tpm_acpi.c
39204@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39205 virt = acpi_os_map_iomem(start, len);
39206 if (!virt) {
39207 kfree(log->bios_event_log);
39208+ log->bios_event_log = NULL;
39209 printk("%s: ERROR - Unable to map memory\n", __func__);
39210 return -EIO;
39211 }
39212
39213- memcpy_fromio(log->bios_event_log, virt, len);
39214+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39215
39216 acpi_os_unmap_iomem(virt, len);
39217 return 0;
39218diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39219index 3a56a13..f8cbd25 100644
39220--- a/drivers/char/tpm/tpm_eventlog.c
39221+++ b/drivers/char/tpm/tpm_eventlog.c
39222@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39223 event = addr;
39224
39225 if ((event->event_type == 0 && event->event_size == 0) ||
39226- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39227+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39228 return NULL;
39229
39230 return addr;
39231@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39232 return NULL;
39233
39234 if ((event->event_type == 0 && event->event_size == 0) ||
39235- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39236+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39237 return NULL;
39238
39239 (*pos)++;
39240@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39241 int i;
39242
39243 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39244- seq_putc(m, data[i]);
39245+ if (!seq_putc(m, data[i]))
39246+ return -EFAULT;
39247
39248 return 0;
39249 }
39250diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39251index 72d7028..1586601 100644
39252--- a/drivers/char/virtio_console.c
39253+++ b/drivers/char/virtio_console.c
39254@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39255 if (to_user) {
39256 ssize_t ret;
39257
39258- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39259+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39260 if (ret)
39261 return -EFAULT;
39262 } else {
39263@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39264 if (!port_has_data(port) && !port->host_connected)
39265 return 0;
39266
39267- return fill_readbuf(port, ubuf, count, true);
39268+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39269 }
39270
39271 static int wait_port_writable(struct port *port, bool nonblock)
39272diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39273index 956b7e5..b655045 100644
39274--- a/drivers/clk/clk-composite.c
39275+++ b/drivers/clk/clk-composite.c
39276@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39277 struct clk *clk;
39278 struct clk_init_data init;
39279 struct clk_composite *composite;
39280- struct clk_ops *clk_composite_ops;
39281+ clk_ops_no_const *clk_composite_ops;
39282
39283 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39284 if (!composite) {
39285diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39286index dd3a78c..386d49c 100644
39287--- a/drivers/clk/socfpga/clk-gate.c
39288+++ b/drivers/clk/socfpga/clk-gate.c
39289@@ -22,6 +22,7 @@
39290 #include <linux/mfd/syscon.h>
39291 #include <linux/of.h>
39292 #include <linux/regmap.h>
39293+#include <asm/pgtable.h>
39294
39295 #include "clk.h"
39296
39297@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39298 return 0;
39299 }
39300
39301-static struct clk_ops gateclk_ops = {
39302+static clk_ops_no_const gateclk_ops __read_only = {
39303 .prepare = socfpga_clk_prepare,
39304 .recalc_rate = socfpga_clk_recalc_rate,
39305 .get_parent = socfpga_clk_get_parent,
39306@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39307 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39308 socfpga_clk->hw.bit_idx = clk_gate[1];
39309
39310- gateclk_ops.enable = clk_gate_ops.enable;
39311- gateclk_ops.disable = clk_gate_ops.disable;
39312+ pax_open_kernel();
39313+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39314+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39315+ pax_close_kernel();
39316 }
39317
39318 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39319diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39320index de6da95..c98278b 100644
39321--- a/drivers/clk/socfpga/clk-pll.c
39322+++ b/drivers/clk/socfpga/clk-pll.c
39323@@ -21,6 +21,7 @@
39324 #include <linux/io.h>
39325 #include <linux/of.h>
39326 #include <linux/of_address.h>
39327+#include <asm/pgtable.h>
39328
39329 #include "clk.h"
39330
39331@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39332 CLK_MGR_PLL_CLK_SRC_MASK;
39333 }
39334
39335-static struct clk_ops clk_pll_ops = {
39336+static clk_ops_no_const clk_pll_ops __read_only = {
39337 .recalc_rate = clk_pll_recalc_rate,
39338 .get_parent = clk_pll_get_parent,
39339 };
39340@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39341 pll_clk->hw.hw.init = &init;
39342
39343 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39344- clk_pll_ops.enable = clk_gate_ops.enable;
39345- clk_pll_ops.disable = clk_gate_ops.disable;
39346+ pax_open_kernel();
39347+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39348+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39349+ pax_close_kernel();
39350
39351 clk = clk_register(NULL, &pll_clk->hw.hw);
39352 if (WARN_ON(IS_ERR(clk))) {
39353diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39354index b0c18ed..1713a80 100644
39355--- a/drivers/cpufreq/acpi-cpufreq.c
39356+++ b/drivers/cpufreq/acpi-cpufreq.c
39357@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39358 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39359 per_cpu(acfreq_data, cpu) = data;
39360
39361- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39362- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39363+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39364+ pax_open_kernel();
39365+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39366+ pax_close_kernel();
39367+ }
39368
39369 result = acpi_processor_register_performance(data->acpi_data, cpu);
39370 if (result)
39371@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39372 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39373 break;
39374 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39375- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39376+ pax_open_kernel();
39377+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39378+ pax_close_kernel();
39379 break;
39380 default:
39381 break;
39382@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39383 if (!msrs)
39384 return;
39385
39386- acpi_cpufreq_driver.boost_supported = true;
39387- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39388+ pax_open_kernel();
39389+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39390+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39391+ pax_close_kernel();
39392
39393 cpu_notifier_register_begin();
39394
39395diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39396index bab67db..91af7e3 100644
39397--- a/drivers/cpufreq/cpufreq-dt.c
39398+++ b/drivers/cpufreq/cpufreq-dt.c
39399@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39400 if (!IS_ERR(cpu_reg))
39401 regulator_put(cpu_reg);
39402
39403- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39404+ pax_open_kernel();
39405+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39406+ pax_close_kernel();
39407
39408 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39409 if (ret)
39410diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39411index 8ae655c..3141442 100644
39412--- a/drivers/cpufreq/cpufreq.c
39413+++ b/drivers/cpufreq/cpufreq.c
39414@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39415 }
39416
39417 mutex_lock(&cpufreq_governor_mutex);
39418- list_del(&governor->governor_list);
39419+ pax_list_del(&governor->governor_list);
39420 mutex_unlock(&cpufreq_governor_mutex);
39421 return;
39422 }
39423@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39424 return NOTIFY_OK;
39425 }
39426
39427-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39428+static struct notifier_block cpufreq_cpu_notifier = {
39429 .notifier_call = cpufreq_cpu_callback,
39430 };
39431
39432@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39433 return 0;
39434
39435 write_lock_irqsave(&cpufreq_driver_lock, flags);
39436- cpufreq_driver->boost_enabled = state;
39437+ pax_open_kernel();
39438+ *(bool *)&cpufreq_driver->boost_enabled = state;
39439+ pax_close_kernel();
39440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39441
39442 ret = cpufreq_driver->set_boost(state);
39443 if (ret) {
39444 write_lock_irqsave(&cpufreq_driver_lock, flags);
39445- cpufreq_driver->boost_enabled = !state;
39446+ pax_open_kernel();
39447+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39448+ pax_close_kernel();
39449 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39450
39451 pr_err("%s: Cannot %s BOOST\n",
39452@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39453 cpufreq_driver = driver_data;
39454 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39455
39456- if (driver_data->setpolicy)
39457- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39458+ if (driver_data->setpolicy) {
39459+ pax_open_kernel();
39460+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39461+ pax_close_kernel();
39462+ }
39463
39464 if (cpufreq_boost_supported()) {
39465 /*
39466 * Check if driver provides function to enable boost -
39467 * if not, use cpufreq_boost_set_sw as default
39468 */
39469- if (!cpufreq_driver->set_boost)
39470- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39471+ if (!cpufreq_driver->set_boost) {
39472+ pax_open_kernel();
39473+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39474+ pax_close_kernel();
39475+ }
39476
39477 ret = cpufreq_sysfs_create_file(&boost.attr);
39478 if (ret) {
39479diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39480index 1b44496..b80ff5e 100644
39481--- a/drivers/cpufreq/cpufreq_governor.c
39482+++ b/drivers/cpufreq/cpufreq_governor.c
39483@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39484 struct dbs_data *dbs_data;
39485 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39486 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39487- struct od_ops *od_ops = NULL;
39488+ const struct od_ops *od_ops = NULL;
39489 struct od_dbs_tuners *od_tuners = NULL;
39490 struct cs_dbs_tuners *cs_tuners = NULL;
39491 struct cpu_dbs_common_info *cpu_cdbs;
39492@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39493
39494 if ((cdata->governor == GOV_CONSERVATIVE) &&
39495 (!policy->governor->initialized)) {
39496- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39497+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39498
39499 cpufreq_register_notifier(cs_ops->notifier_block,
39500 CPUFREQ_TRANSITION_NOTIFIER);
39501@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39502
39503 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39504 (policy->governor->initialized == 1)) {
39505- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39506+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39507
39508 cpufreq_unregister_notifier(cs_ops->notifier_block,
39509 CPUFREQ_TRANSITION_NOTIFIER);
39510diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39511index cc401d1..8197340 100644
39512--- a/drivers/cpufreq/cpufreq_governor.h
39513+++ b/drivers/cpufreq/cpufreq_governor.h
39514@@ -212,7 +212,7 @@ struct common_dbs_data {
39515 void (*exit)(struct dbs_data *dbs_data);
39516
39517 /* Governor specific ops, see below */
39518- void *gov_ops;
39519+ const void *gov_ops;
39520 };
39521
39522 /* Governor Per policy data */
39523@@ -232,7 +232,7 @@ struct od_ops {
39524 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39525 unsigned int freq_next, unsigned int relation);
39526 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39527-};
39528+} __no_const;
39529
39530 struct cs_ops {
39531 struct notifier_block *notifier_block;
39532diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39533index ad3f38f..8f086cd 100644
39534--- a/drivers/cpufreq/cpufreq_ondemand.c
39535+++ b/drivers/cpufreq/cpufreq_ondemand.c
39536@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39537
39538 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39539
39540-static struct od_ops od_ops = {
39541+static struct od_ops od_ops __read_only = {
39542 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39543 .powersave_bias_target = generic_powersave_bias_target,
39544 .freq_increase = dbs_freq_increase,
39545@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39546 (struct cpufreq_policy *, unsigned int, unsigned int),
39547 unsigned int powersave_bias)
39548 {
39549- od_ops.powersave_bias_target = f;
39550+ pax_open_kernel();
39551+ *(void **)&od_ops.powersave_bias_target = f;
39552+ pax_close_kernel();
39553 od_set_powersave_bias(powersave_bias);
39554 }
39555 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39556
39557 void od_unregister_powersave_bias_handler(void)
39558 {
39559- od_ops.powersave_bias_target = generic_powersave_bias_target;
39560+ pax_open_kernel();
39561+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39562+ pax_close_kernel();
39563 od_set_powersave_bias(0);
39564 }
39565 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39566diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39567index 872c577..5fb3c20 100644
39568--- a/drivers/cpufreq/intel_pstate.c
39569+++ b/drivers/cpufreq/intel_pstate.c
39570@@ -133,10 +133,10 @@ struct pstate_funcs {
39571 struct cpu_defaults {
39572 struct pstate_adjust_policy pid_policy;
39573 struct pstate_funcs funcs;
39574-};
39575+} __do_const;
39576
39577 static struct pstate_adjust_policy pid_params;
39578-static struct pstate_funcs pstate_funcs;
39579+static struct pstate_funcs *pstate_funcs;
39580 static int hwp_active;
39581
39582 struct perf_limits {
39583@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39584
39585 cpu->pstate.current_pstate = pstate;
39586
39587- pstate_funcs.set(cpu, pstate);
39588+ pstate_funcs->set(cpu, pstate);
39589 }
39590
39591 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39592 {
39593- cpu->pstate.min_pstate = pstate_funcs.get_min();
39594- cpu->pstate.max_pstate = pstate_funcs.get_max();
39595- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39596- cpu->pstate.scaling = pstate_funcs.get_scaling();
39597+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39598+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39599+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39600+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39601
39602- if (pstate_funcs.get_vid)
39603- pstate_funcs.get_vid(cpu);
39604+ if (pstate_funcs->get_vid)
39605+ pstate_funcs->get_vid(cpu);
39606 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39607 }
39608
39609@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39610 rdmsrl(MSR_IA32_APERF, aperf);
39611 rdmsrl(MSR_IA32_MPERF, mperf);
39612
39613- if (!pstate_funcs.get_max() ||
39614- !pstate_funcs.get_min() ||
39615- !pstate_funcs.get_turbo())
39616+ if (!pstate_funcs->get_max() ||
39617+ !pstate_funcs->get_min() ||
39618+ !pstate_funcs->get_turbo())
39619 return -ENODEV;
39620
39621 rdmsrl(MSR_IA32_APERF, tmp);
39622@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39623 return 0;
39624 }
39625
39626-static void copy_pid_params(struct pstate_adjust_policy *policy)
39627+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39628 {
39629 pid_params.sample_rate_ms = policy->sample_rate_ms;
39630 pid_params.p_gain_pct = policy->p_gain_pct;
39631@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39632
39633 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39634 {
39635- pstate_funcs.get_max = funcs->get_max;
39636- pstate_funcs.get_min = funcs->get_min;
39637- pstate_funcs.get_turbo = funcs->get_turbo;
39638- pstate_funcs.get_scaling = funcs->get_scaling;
39639- pstate_funcs.set = funcs->set;
39640- pstate_funcs.get_vid = funcs->get_vid;
39641+ pstate_funcs = funcs;
39642 }
39643
39644 #if IS_ENABLED(CONFIG_ACPI)
39645diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39646index 529cfd9..0e28fff 100644
39647--- a/drivers/cpufreq/p4-clockmod.c
39648+++ b/drivers/cpufreq/p4-clockmod.c
39649@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39650 case 0x0F: /* Core Duo */
39651 case 0x16: /* Celeron Core */
39652 case 0x1C: /* Atom */
39653- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39654+ pax_open_kernel();
39655+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39656+ pax_close_kernel();
39657 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39658 case 0x0D: /* Pentium M (Dothan) */
39659- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39660+ pax_open_kernel();
39661+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39662+ pax_close_kernel();
39663 /* fall through */
39664 case 0x09: /* Pentium M (Banias) */
39665 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39666@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39667
39668 /* on P-4s, the TSC runs with constant frequency independent whether
39669 * throttling is active or not. */
39670- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39671+ pax_open_kernel();
39672+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39673+ pax_close_kernel();
39674
39675 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39676 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39677diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39678index 9bb42ba..b01b4a2 100644
39679--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39680+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39681@@ -18,14 +18,12 @@
39682 #include <asm/head.h>
39683 #include <asm/timer.h>
39684
39685-static struct cpufreq_driver *cpufreq_us3_driver;
39686-
39687 struct us3_freq_percpu_info {
39688 struct cpufreq_frequency_table table[4];
39689 };
39690
39691 /* Indexed by cpu number. */
39692-static struct us3_freq_percpu_info *us3_freq_table;
39693+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39694
39695 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39696 * in the Safari config register.
39697@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39698
39699 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39700 {
39701- if (cpufreq_us3_driver)
39702- us3_freq_target(policy, 0);
39703+ us3_freq_target(policy, 0);
39704
39705 return 0;
39706 }
39707
39708+static int __init us3_freq_init(void);
39709+static void __exit us3_freq_exit(void);
39710+
39711+static struct cpufreq_driver cpufreq_us3_driver = {
39712+ .init = us3_freq_cpu_init,
39713+ .verify = cpufreq_generic_frequency_table_verify,
39714+ .target_index = us3_freq_target,
39715+ .get = us3_freq_get,
39716+ .exit = us3_freq_cpu_exit,
39717+ .name = "UltraSPARC-III",
39718+
39719+};
39720+
39721 static int __init us3_freq_init(void)
39722 {
39723 unsigned long manuf, impl, ver;
39724- int ret;
39725
39726 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39727 return -ENODEV;
39728@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39729 (impl == CHEETAH_IMPL ||
39730 impl == CHEETAH_PLUS_IMPL ||
39731 impl == JAGUAR_IMPL ||
39732- impl == PANTHER_IMPL)) {
39733- struct cpufreq_driver *driver;
39734-
39735- ret = -ENOMEM;
39736- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39737- if (!driver)
39738- goto err_out;
39739-
39740- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39741- GFP_KERNEL);
39742- if (!us3_freq_table)
39743- goto err_out;
39744-
39745- driver->init = us3_freq_cpu_init;
39746- driver->verify = cpufreq_generic_frequency_table_verify;
39747- driver->target_index = us3_freq_target;
39748- driver->get = us3_freq_get;
39749- driver->exit = us3_freq_cpu_exit;
39750- strcpy(driver->name, "UltraSPARC-III");
39751-
39752- cpufreq_us3_driver = driver;
39753- ret = cpufreq_register_driver(driver);
39754- if (ret)
39755- goto err_out;
39756-
39757- return 0;
39758-
39759-err_out:
39760- if (driver) {
39761- kfree(driver);
39762- cpufreq_us3_driver = NULL;
39763- }
39764- kfree(us3_freq_table);
39765- us3_freq_table = NULL;
39766- return ret;
39767- }
39768+ impl == PANTHER_IMPL))
39769+ return cpufreq_register_driver(&cpufreq_us3_driver);
39770
39771 return -ENODEV;
39772 }
39773
39774 static void __exit us3_freq_exit(void)
39775 {
39776- if (cpufreq_us3_driver) {
39777- cpufreq_unregister_driver(cpufreq_us3_driver);
39778- kfree(cpufreq_us3_driver);
39779- cpufreq_us3_driver = NULL;
39780- kfree(us3_freq_table);
39781- us3_freq_table = NULL;
39782- }
39783+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39784 }
39785
39786 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39787diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39788index 7d4a315..21bb886 100644
39789--- a/drivers/cpufreq/speedstep-centrino.c
39790+++ b/drivers/cpufreq/speedstep-centrino.c
39791@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39792 !cpu_has(cpu, X86_FEATURE_EST))
39793 return -ENODEV;
39794
39795- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39796- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39797+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39798+ pax_open_kernel();
39799+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39800+ pax_close_kernel();
39801+ }
39802
39803 if (policy->cpu != 0)
39804 return -ENODEV;
39805diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39806index 2697e87..c32476c 100644
39807--- a/drivers/cpuidle/driver.c
39808+++ b/drivers/cpuidle/driver.c
39809@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39810
39811 static void poll_idle_init(struct cpuidle_driver *drv)
39812 {
39813- struct cpuidle_state *state = &drv->states[0];
39814+ cpuidle_state_no_const *state = &drv->states[0];
39815
39816 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39817 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39818diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39819index fb9f511..213e6cc 100644
39820--- a/drivers/cpuidle/governor.c
39821+++ b/drivers/cpuidle/governor.c
39822@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39823 mutex_lock(&cpuidle_lock);
39824 if (__cpuidle_find_governor(gov->name) == NULL) {
39825 ret = 0;
39826- list_add_tail(&gov->governor_list, &cpuidle_governors);
39827+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39828 if (!cpuidle_curr_governor ||
39829 cpuidle_curr_governor->rating < gov->rating)
39830 cpuidle_switch_governor(gov);
39831diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39832index 832a2c3..1794080 100644
39833--- a/drivers/cpuidle/sysfs.c
39834+++ b/drivers/cpuidle/sysfs.c
39835@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39836 NULL
39837 };
39838
39839-static struct attribute_group cpuidle_attr_group = {
39840+static attribute_group_no_const cpuidle_attr_group = {
39841 .attrs = cpuidle_default_attrs,
39842 .name = "cpuidle",
39843 };
39844diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39845index 8d2a772..33826c9 100644
39846--- a/drivers/crypto/hifn_795x.c
39847+++ b/drivers/crypto/hifn_795x.c
39848@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39849 MODULE_PARM_DESC(hifn_pll_ref,
39850 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39851
39852-static atomic_t hifn_dev_number;
39853+static atomic_unchecked_t hifn_dev_number;
39854
39855 #define ACRYPTO_OP_DECRYPT 0
39856 #define ACRYPTO_OP_ENCRYPT 1
39857@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39858 goto err_out_disable_pci_device;
39859
39860 snprintf(name, sizeof(name), "hifn%d",
39861- atomic_inc_return(&hifn_dev_number)-1);
39862+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39863
39864 err = pci_request_regions(pdev, name);
39865 if (err)
39866diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39867index 30b538d8..1610d75 100644
39868--- a/drivers/devfreq/devfreq.c
39869+++ b/drivers/devfreq/devfreq.c
39870@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39871 goto err_out;
39872 }
39873
39874- list_add(&governor->node, &devfreq_governor_list);
39875+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39876
39877 list_for_each_entry(devfreq, &devfreq_list, node) {
39878 int ret = 0;
39879@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39880 }
39881 }
39882
39883- list_del(&governor->node);
39884+ pax_list_del((struct list_head *)&governor->node);
39885 err_out:
39886 mutex_unlock(&devfreq_list_lock);
39887
39888diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39889index 8ee383d..736b5de 100644
39890--- a/drivers/dma/sh/shdma-base.c
39891+++ b/drivers/dma/sh/shdma-base.c
39892@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39893 schan->slave_id = -EINVAL;
39894 }
39895
39896- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39897- sdev->desc_size, GFP_KERNEL);
39898+ schan->desc = kcalloc(sdev->desc_size,
39899+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39900 if (!schan->desc) {
39901 ret = -ENOMEM;
39902 goto edescalloc;
39903diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39904index 9f1d4c7..fceff78 100644
39905--- a/drivers/dma/sh/shdmac.c
39906+++ b/drivers/dma/sh/shdmac.c
39907@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39908 return ret;
39909 }
39910
39911-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39912+static struct notifier_block sh_dmae_nmi_notifier = {
39913 .notifier_call = sh_dmae_nmi_handler,
39914
39915 /* Run before NMI debug handler and KGDB */
39916diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39917index 592af5f..bb1d583 100644
39918--- a/drivers/edac/edac_device.c
39919+++ b/drivers/edac/edac_device.c
39920@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39921 */
39922 int edac_device_alloc_index(void)
39923 {
39924- static atomic_t device_indexes = ATOMIC_INIT(0);
39925+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39926
39927- return atomic_inc_return(&device_indexes) - 1;
39928+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39929 }
39930 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39931
39932diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39933index c84eecb..4d7381d 100644
39934--- a/drivers/edac/edac_mc_sysfs.c
39935+++ b/drivers/edac/edac_mc_sysfs.c
39936@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39937 struct dev_ch_attribute {
39938 struct device_attribute attr;
39939 int channel;
39940-};
39941+} __do_const;
39942
39943 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39944 static struct dev_ch_attribute dev_attr_legacy_##_name = \
39945@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39946 }
39947
39948 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39949+ pax_open_kernel();
39950 if (mci->get_sdram_scrub_rate) {
39951- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39952- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39953+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39954+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39955 }
39956
39957 if (mci->set_sdram_scrub_rate) {
39958- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39959- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39960+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39961+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39962 }
39963+ pax_close_kernel();
39964
39965 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
39966 if (err) {
39967diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39968index 2cf44b4d..6dd2dc7 100644
39969--- a/drivers/edac/edac_pci.c
39970+++ b/drivers/edac/edac_pci.c
39971@@ -29,7 +29,7 @@
39972
39973 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39974 static LIST_HEAD(edac_pci_list);
39975-static atomic_t pci_indexes = ATOMIC_INIT(0);
39976+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39977
39978 /*
39979 * edac_pci_alloc_ctl_info
39980@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39981 */
39982 int edac_pci_alloc_index(void)
39983 {
39984- return atomic_inc_return(&pci_indexes) - 1;
39985+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39986 }
39987 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39988
39989diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39990index 24d877f..4e30133 100644
39991--- a/drivers/edac/edac_pci_sysfs.c
39992+++ b/drivers/edac/edac_pci_sysfs.c
39993@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39994 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39995 static int edac_pci_poll_msec = 1000; /* one second workq period */
39996
39997-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39998-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39999+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40000+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40001
40002 static struct kobject *edac_pci_top_main_kobj;
40003 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40004@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40005 void *value;
40006 ssize_t(*show) (void *, char *);
40007 ssize_t(*store) (void *, const char *, size_t);
40008-};
40009+} __do_const;
40010
40011 /* Set of show/store abstract level functions for PCI Parity object */
40012 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40013@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40014 edac_printk(KERN_CRIT, EDAC_PCI,
40015 "Signaled System Error on %s\n",
40016 pci_name(dev));
40017- atomic_inc(&pci_nonparity_count);
40018+ atomic_inc_unchecked(&pci_nonparity_count);
40019 }
40020
40021 if (status & (PCI_STATUS_PARITY)) {
40022@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40023 "Master Data Parity Error on %s\n",
40024 pci_name(dev));
40025
40026- atomic_inc(&pci_parity_count);
40027+ atomic_inc_unchecked(&pci_parity_count);
40028 }
40029
40030 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40031@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40032 "Detected Parity Error on %s\n",
40033 pci_name(dev));
40034
40035- atomic_inc(&pci_parity_count);
40036+ atomic_inc_unchecked(&pci_parity_count);
40037 }
40038 }
40039
40040@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40041 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40042 "Signaled System Error on %s\n",
40043 pci_name(dev));
40044- atomic_inc(&pci_nonparity_count);
40045+ atomic_inc_unchecked(&pci_nonparity_count);
40046 }
40047
40048 if (status & (PCI_STATUS_PARITY)) {
40049@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40050 "Master Data Parity Error on "
40051 "%s\n", pci_name(dev));
40052
40053- atomic_inc(&pci_parity_count);
40054+ atomic_inc_unchecked(&pci_parity_count);
40055 }
40056
40057 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40058@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40059 "Detected Parity Error on %s\n",
40060 pci_name(dev));
40061
40062- atomic_inc(&pci_parity_count);
40063+ atomic_inc_unchecked(&pci_parity_count);
40064 }
40065 }
40066 }
40067@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40068 if (!check_pci_errors)
40069 return;
40070
40071- before_count = atomic_read(&pci_parity_count);
40072+ before_count = atomic_read_unchecked(&pci_parity_count);
40073
40074 /* scan all PCI devices looking for a Parity Error on devices and
40075 * bridges.
40076@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40077 /* Only if operator has selected panic on PCI Error */
40078 if (edac_pci_get_panic_on_pe()) {
40079 /* If the count is different 'after' from 'before' */
40080- if (before_count != atomic_read(&pci_parity_count))
40081+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40082 panic("EDAC: PCI Parity Error");
40083 }
40084 }
40085diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40086index c2359a1..8bd119d 100644
40087--- a/drivers/edac/mce_amd.h
40088+++ b/drivers/edac/mce_amd.h
40089@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40090 bool (*mc0_mce)(u16, u8);
40091 bool (*mc1_mce)(u16, u8);
40092 bool (*mc2_mce)(u16, u8);
40093-};
40094+} __no_const;
40095
40096 void amd_report_gart_errors(bool);
40097 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40098diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40099index 57ea7f4..af06b76 100644
40100--- a/drivers/firewire/core-card.c
40101+++ b/drivers/firewire/core-card.c
40102@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40103 const struct fw_card_driver *driver,
40104 struct device *device)
40105 {
40106- static atomic_t index = ATOMIC_INIT(-1);
40107+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40108
40109- card->index = atomic_inc_return(&index);
40110+ card->index = atomic_inc_return_unchecked(&index);
40111 card->driver = driver;
40112 card->device = device;
40113 card->current_tlabel = 0;
40114@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40115
40116 void fw_core_remove_card(struct fw_card *card)
40117 {
40118- struct fw_card_driver dummy_driver = dummy_driver_template;
40119+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40120
40121 card->driver->update_phy_reg(card, 4,
40122 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40123diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40124index f9e3aee..269dbdb 100644
40125--- a/drivers/firewire/core-device.c
40126+++ b/drivers/firewire/core-device.c
40127@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40128 struct config_rom_attribute {
40129 struct device_attribute attr;
40130 u32 key;
40131-};
40132+} __do_const;
40133
40134 static ssize_t show_immediate(struct device *dev,
40135 struct device_attribute *dattr, char *buf)
40136diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40137index d6a09b9..18e90dd 100644
40138--- a/drivers/firewire/core-transaction.c
40139+++ b/drivers/firewire/core-transaction.c
40140@@ -38,6 +38,7 @@
40141 #include <linux/timer.h>
40142 #include <linux/types.h>
40143 #include <linux/workqueue.h>
40144+#include <linux/sched.h>
40145
40146 #include <asm/byteorder.h>
40147
40148diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40149index e1480ff6..1a429bd 100644
40150--- a/drivers/firewire/core.h
40151+++ b/drivers/firewire/core.h
40152@@ -111,6 +111,7 @@ struct fw_card_driver {
40153
40154 int (*stop_iso)(struct fw_iso_context *ctx);
40155 };
40156+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40157
40158 void fw_card_initialize(struct fw_card *card,
40159 const struct fw_card_driver *driver, struct device *device);
40160diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40161index f51d376..b118e40 100644
40162--- a/drivers/firewire/ohci.c
40163+++ b/drivers/firewire/ohci.c
40164@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40165 be32_to_cpu(ohci->next_header));
40166 }
40167
40168+#ifndef CONFIG_GRKERNSEC
40169 if (param_remote_dma) {
40170 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40171 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40172 }
40173+#endif
40174
40175 spin_unlock_irq(&ohci->lock);
40176
40177@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40178 unsigned long flags;
40179 int n, ret = 0;
40180
40181+#ifndef CONFIG_GRKERNSEC
40182 if (param_remote_dma)
40183 return 0;
40184+#endif
40185
40186 /*
40187 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40188diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40189index 94a58a0..f5eba42 100644
40190--- a/drivers/firmware/dmi-id.c
40191+++ b/drivers/firmware/dmi-id.c
40192@@ -16,7 +16,7 @@
40193 struct dmi_device_attribute{
40194 struct device_attribute dev_attr;
40195 int field;
40196-};
40197+} __do_const;
40198 #define to_dmi_dev_attr(_dev_attr) \
40199 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40200
40201diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40202index 2eebd28b..4261350 100644
40203--- a/drivers/firmware/dmi_scan.c
40204+++ b/drivers/firmware/dmi_scan.c
40205@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40206 if (buf == NULL)
40207 return -1;
40208
40209- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40210+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40211
40212 dmi_unmap(buf);
40213 return 0;
40214diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40215index 4fd9961..52d60ce 100644
40216--- a/drivers/firmware/efi/cper.c
40217+++ b/drivers/firmware/efi/cper.c
40218@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40219 */
40220 u64 cper_next_record_id(void)
40221 {
40222- static atomic64_t seq;
40223+ static atomic64_unchecked_t seq;
40224
40225- if (!atomic64_read(&seq))
40226- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40227+ if (!atomic64_read_unchecked(&seq))
40228+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40229
40230- return atomic64_inc_return(&seq);
40231+ return atomic64_inc_return_unchecked(&seq);
40232 }
40233 EXPORT_SYMBOL_GPL(cper_next_record_id);
40234
40235diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40236index 3061bb8..92b5fcc 100644
40237--- a/drivers/firmware/efi/efi.c
40238+++ b/drivers/firmware/efi/efi.c
40239@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40240 };
40241
40242 static struct efivars generic_efivars;
40243-static struct efivar_operations generic_ops;
40244+static efivar_operations_no_const generic_ops __read_only;
40245
40246 static int generic_ops_register(void)
40247 {
40248- generic_ops.get_variable = efi.get_variable;
40249- generic_ops.set_variable = efi.set_variable;
40250- generic_ops.get_next_variable = efi.get_next_variable;
40251- generic_ops.query_variable_store = efi_query_variable_store;
40252+ pax_open_kernel();
40253+ *(void **)&generic_ops.get_variable = efi.get_variable;
40254+ *(void **)&generic_ops.set_variable = efi.set_variable;
40255+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40256+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40257+ pax_close_kernel();
40258
40259 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40260 }
40261diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40262index 7b2e049..a253334 100644
40263--- a/drivers/firmware/efi/efivars.c
40264+++ b/drivers/firmware/efi/efivars.c
40265@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40266 static int
40267 create_efivars_bin_attributes(void)
40268 {
40269- struct bin_attribute *attr;
40270+ bin_attribute_no_const *attr;
40271 int error;
40272
40273 /* new_var */
40274diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40275index 87b8e3b..c4afb35 100644
40276--- a/drivers/firmware/efi/runtime-map.c
40277+++ b/drivers/firmware/efi/runtime-map.c
40278@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40279 kfree(entry);
40280 }
40281
40282-static struct kobj_type __refdata map_ktype = {
40283+static const struct kobj_type __refconst map_ktype = {
40284 .sysfs_ops = &map_attr_ops,
40285 .default_attrs = def_attrs,
40286 .release = map_release,
40287diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40288index f1ab05e..ab51228 100644
40289--- a/drivers/firmware/google/gsmi.c
40290+++ b/drivers/firmware/google/gsmi.c
40291@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40292 return local_hash_64(input, 32);
40293 }
40294
40295-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40296+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40297 {
40298 .ident = "Google Board",
40299 .matches = {
40300diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40301index 2f569aa..26e4f39 100644
40302--- a/drivers/firmware/google/memconsole.c
40303+++ b/drivers/firmware/google/memconsole.c
40304@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40305 return false;
40306 }
40307
40308-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40309+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40310 {
40311 .ident = "Google Board",
40312 .matches = {
40313@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40314 if (!found_memconsole())
40315 return -ENODEV;
40316
40317- memconsole_bin_attr.size = memconsole_length;
40318+ pax_open_kernel();
40319+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40320+ pax_close_kernel();
40321+
40322 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40323 }
40324
40325diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40326index cc016c61..d35279e 100644
40327--- a/drivers/firmware/memmap.c
40328+++ b/drivers/firmware/memmap.c
40329@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40330 kfree(entry);
40331 }
40332
40333-static struct kobj_type __refdata memmap_ktype = {
40334+static const struct kobj_type __refconst memmap_ktype = {
40335 .release = release_firmware_map_entry,
40336 .sysfs_ops = &memmap_attr_ops,
40337 .default_attrs = def_attrs,
40338diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40339index 3cfcfc6..09d6f117 100644
40340--- a/drivers/gpio/gpio-em.c
40341+++ b/drivers/gpio/gpio-em.c
40342@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40343 struct em_gio_priv *p;
40344 struct resource *io[2], *irq[2];
40345 struct gpio_chip *gpio_chip;
40346- struct irq_chip *irq_chip;
40347+ irq_chip_no_const *irq_chip;
40348 const char *name = dev_name(&pdev->dev);
40349 int ret;
40350
40351diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40352index 7818cd1..1be40e5 100644
40353--- a/drivers/gpio/gpio-ich.c
40354+++ b/drivers/gpio/gpio-ich.c
40355@@ -94,7 +94,7 @@ struct ichx_desc {
40356 * this option allows driver caching written output values
40357 */
40358 bool use_outlvl_cache;
40359-};
40360+} __do_const;
40361
40362 static struct {
40363 spinlock_t lock;
40364diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40365index f476ae2..05e1bdd 100644
40366--- a/drivers/gpio/gpio-omap.c
40367+++ b/drivers/gpio/gpio-omap.c
40368@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40369 const struct omap_gpio_platform_data *pdata;
40370 struct resource *res;
40371 struct gpio_bank *bank;
40372- struct irq_chip *irqc;
40373+ irq_chip_no_const *irqc;
40374 int ret;
40375
40376 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40377diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40378index c49522e..9a7ee54 100644
40379--- a/drivers/gpio/gpio-rcar.c
40380+++ b/drivers/gpio/gpio-rcar.c
40381@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40382 struct gpio_rcar_priv *p;
40383 struct resource *io, *irq;
40384 struct gpio_chip *gpio_chip;
40385- struct irq_chip *irq_chip;
40386+ irq_chip_no_const *irq_chip;
40387 struct device *dev = &pdev->dev;
40388 const char *name = dev_name(dev);
40389 int ret;
40390diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40391index c1caa45..f0f97d2 100644
40392--- a/drivers/gpio/gpio-vr41xx.c
40393+++ b/drivers/gpio/gpio-vr41xx.c
40394@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40395 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40396 maskl, pendl, maskh, pendh);
40397
40398- atomic_inc(&irq_err_count);
40399+ atomic_inc_unchecked(&irq_err_count);
40400
40401 return -EINVAL;
40402 }
40403diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40404index 1ca9295..9f3d481 100644
40405--- a/drivers/gpio/gpiolib.c
40406+++ b/drivers/gpio/gpiolib.c
40407@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40408 }
40409
40410 if (gpiochip->irqchip) {
40411- gpiochip->irqchip->irq_request_resources = NULL;
40412- gpiochip->irqchip->irq_release_resources = NULL;
40413+ pax_open_kernel();
40414+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40415+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40416+ pax_close_kernel();
40417 gpiochip->irqchip = NULL;
40418 }
40419 }
40420@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40421 gpiochip->irqchip = NULL;
40422 return -EINVAL;
40423 }
40424- irqchip->irq_request_resources = gpiochip_irq_reqres;
40425- irqchip->irq_release_resources = gpiochip_irq_relres;
40426+
40427+ pax_open_kernel();
40428+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40429+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40430+ pax_close_kernel();
40431
40432 /*
40433 * Prepare the mapping since the irqchip shall be orthogonal to
40434diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40435index 488f51d..301d462 100644
40436--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40437+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40438@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40439 enum cache_policy alternate_policy,
40440 void __user *alternate_aperture_base,
40441 uint64_t alternate_aperture_size);
40442-};
40443+} __no_const;
40444
40445 /**
40446 * struct device_queue_manager
40447diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40448index 5940531..a75b0e5 100644
40449--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40450+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40451@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40452
40453 void (*submit_packet)(struct kernel_queue *kq);
40454 void (*rollback_packet)(struct kernel_queue *kq);
40455-};
40456+} __no_const;
40457
40458 struct kernel_queue {
40459 struct kernel_queue_ops ops;
40460diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40461index b6f076b..2918de2 100644
40462--- a/drivers/gpu/drm/drm_crtc.c
40463+++ b/drivers/gpu/drm/drm_crtc.c
40464@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40465 goto done;
40466 }
40467
40468- if (copy_to_user(&enum_ptr[copied].name,
40469+ if (copy_to_user(enum_ptr[copied].name,
40470 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40471 ret = -EFAULT;
40472 goto done;
40473diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40474index d512134..046f258 100644
40475--- a/drivers/gpu/drm/drm_drv.c
40476+++ b/drivers/gpu/drm/drm_drv.c
40477@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40478
40479 drm_device_set_unplugged(dev);
40480
40481- if (dev->open_count == 0) {
40482+ if (local_read(&dev->open_count) == 0) {
40483 drm_put_dev(dev);
40484 }
40485 mutex_unlock(&drm_global_mutex);
40486diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40487index 076dd60..e4a4ba7 100644
40488--- a/drivers/gpu/drm/drm_fops.c
40489+++ b/drivers/gpu/drm/drm_fops.c
40490@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40491 return PTR_ERR(minor);
40492
40493 dev = minor->dev;
40494- if (!dev->open_count++)
40495+ if (local_inc_return(&dev->open_count) == 1)
40496 need_setup = 1;
40497
40498 /* share address_space across all char-devs of a single device */
40499@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40500 return 0;
40501
40502 err_undo:
40503- dev->open_count--;
40504+ local_dec(&dev->open_count);
40505 drm_minor_release(minor);
40506 return retcode;
40507 }
40508@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40509
40510 mutex_lock(&drm_global_mutex);
40511
40512- DRM_DEBUG("open_count = %d\n", dev->open_count);
40513+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40514
40515 mutex_lock(&dev->struct_mutex);
40516 list_del(&file_priv->lhead);
40517@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40518 * Begin inline drm_release
40519 */
40520
40521- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40522+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40523 task_pid_nr(current),
40524 (long)old_encode_dev(file_priv->minor->kdev->devt),
40525- dev->open_count);
40526+ local_read(&dev->open_count));
40527
40528 /* Release any auth tokens that might point to this file_priv,
40529 (do that under the drm_global_mutex) */
40530@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40531 * End inline drm_release
40532 */
40533
40534- if (!--dev->open_count) {
40535+ if (local_dec_and_test(&dev->open_count)) {
40536 retcode = drm_lastclose(dev);
40537 if (drm_device_is_unplugged(dev))
40538 drm_put_dev(dev);
40539diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40540index 3d2e91c..d31c4c9 100644
40541--- a/drivers/gpu/drm/drm_global.c
40542+++ b/drivers/gpu/drm/drm_global.c
40543@@ -36,7 +36,7 @@
40544 struct drm_global_item {
40545 struct mutex mutex;
40546 void *object;
40547- int refcount;
40548+ atomic_t refcount;
40549 };
40550
40551 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40552@@ -49,7 +49,7 @@ void drm_global_init(void)
40553 struct drm_global_item *item = &glob[i];
40554 mutex_init(&item->mutex);
40555 item->object = NULL;
40556- item->refcount = 0;
40557+ atomic_set(&item->refcount, 0);
40558 }
40559 }
40560
40561@@ -59,7 +59,7 @@ void drm_global_release(void)
40562 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40563 struct drm_global_item *item = &glob[i];
40564 BUG_ON(item->object != NULL);
40565- BUG_ON(item->refcount != 0);
40566+ BUG_ON(atomic_read(&item->refcount) != 0);
40567 }
40568 }
40569
40570@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40571 struct drm_global_item *item = &glob[ref->global_type];
40572
40573 mutex_lock(&item->mutex);
40574- if (item->refcount == 0) {
40575+ if (atomic_read(&item->refcount) == 0) {
40576 item->object = kzalloc(ref->size, GFP_KERNEL);
40577 if (unlikely(item->object == NULL)) {
40578 ret = -ENOMEM;
40579@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40580 goto out_err;
40581
40582 }
40583- ++item->refcount;
40584+ atomic_inc(&item->refcount);
40585 ref->object = item->object;
40586 mutex_unlock(&item->mutex);
40587 return 0;
40588@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40589 struct drm_global_item *item = &glob[ref->global_type];
40590
40591 mutex_lock(&item->mutex);
40592- BUG_ON(item->refcount == 0);
40593+ BUG_ON(atomic_read(&item->refcount) == 0);
40594 BUG_ON(ref->object != item->object);
40595- if (--item->refcount == 0) {
40596+ if (atomic_dec_and_test(&item->refcount)) {
40597 ref->release(ref);
40598 item->object = NULL;
40599 }
40600diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40601index f1b32f9..394f791 100644
40602--- a/drivers/gpu/drm/drm_info.c
40603+++ b/drivers/gpu/drm/drm_info.c
40604@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40605 struct drm_local_map *map;
40606 struct drm_map_list *r_list;
40607
40608- /* Hardcoded from _DRM_FRAME_BUFFER,
40609- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40610- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40611- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40612+ static const char * const types[] = {
40613+ [_DRM_FRAME_BUFFER] = "FB",
40614+ [_DRM_REGISTERS] = "REG",
40615+ [_DRM_SHM] = "SHM",
40616+ [_DRM_AGP] = "AGP",
40617+ [_DRM_SCATTER_GATHER] = "SG",
40618+ [_DRM_CONSISTENT] = "PCI"};
40619 const char *type;
40620 int i;
40621
40622@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40623 map = r_list->map;
40624 if (!map)
40625 continue;
40626- if (map->type < 0 || map->type > 5)
40627+ if (map->type >= ARRAY_SIZE(types))
40628 type = "??";
40629 else
40630 type = types[map->type];
40631diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40632index 2f4c4343..dd12cd2 100644
40633--- a/drivers/gpu/drm/drm_ioc32.c
40634+++ b/drivers/gpu/drm/drm_ioc32.c
40635@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40636 request = compat_alloc_user_space(nbytes);
40637 if (!access_ok(VERIFY_WRITE, request, nbytes))
40638 return -EFAULT;
40639- list = (struct drm_buf_desc *) (request + 1);
40640+ list = (struct drm_buf_desc __user *) (request + 1);
40641
40642 if (__put_user(count, &request->count)
40643 || __put_user(list, &request->list))
40644@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40645 request = compat_alloc_user_space(nbytes);
40646 if (!access_ok(VERIFY_WRITE, request, nbytes))
40647 return -EFAULT;
40648- list = (struct drm_buf_pub *) (request + 1);
40649+ list = (struct drm_buf_pub __user *) (request + 1);
40650
40651 if (__put_user(count, &request->count)
40652 || __put_user(list, &request->list))
40653@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40654 return 0;
40655 }
40656
40657-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40658+drm_ioctl_compat_t drm_compat_ioctls[] = {
40659 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40660 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40661 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40662@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40663 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40664 {
40665 unsigned int nr = DRM_IOCTL_NR(cmd);
40666- drm_ioctl_compat_t *fn;
40667 int ret;
40668
40669 /* Assume that ioctls without an explicit compat routine will just
40670@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40671 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40672 return drm_ioctl(filp, cmd, arg);
40673
40674- fn = drm_compat_ioctls[nr];
40675-
40676- if (fn != NULL)
40677- ret = (*fn) (filp, cmd, arg);
40678+ if (drm_compat_ioctls[nr] != NULL)
40679+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40680 else
40681 ret = drm_ioctl(filp, cmd, arg);
40682
40683diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40684index 3785d66..1c489ef 100644
40685--- a/drivers/gpu/drm/drm_ioctl.c
40686+++ b/drivers/gpu/drm/drm_ioctl.c
40687@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
40688 struct drm_file *file_priv = filp->private_data;
40689 struct drm_device *dev;
40690 const struct drm_ioctl_desc *ioctl = NULL;
40691- drm_ioctl_t *func;
40692+ drm_ioctl_no_const_t func;
40693 unsigned int nr = DRM_IOCTL_NR(cmd);
40694 int retcode = -EINVAL;
40695 char stack_kdata[128];
40696diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40697index d4813e0..6c1ab4d 100644
40698--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40699+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40700@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
40701 u32 pipeconf_reg = PIPEACONF;
40702 u32 dspcntr_reg = DSPACNTR;
40703
40704- u32 pipeconf = dev_priv->pipeconf[pipe];
40705- u32 dspcntr = dev_priv->dspcntr[pipe];
40706+ u32 pipeconf;
40707+ u32 dspcntr;
40708 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
40709
40710+ if (pipe == -1)
40711+ return;
40712+
40713+ pipeconf = dev_priv->pipeconf[pipe];
40714+ dspcntr = dev_priv->dspcntr[pipe];
40715+
40716 if (pipe) {
40717 pipeconf_reg = PIPECCONF;
40718 dspcntr_reg = DSPCCNTR;
40719diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40720index 93ec5dc..82acbaf 100644
40721--- a/drivers/gpu/drm/i810/i810_drv.h
40722+++ b/drivers/gpu/drm/i810/i810_drv.h
40723@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40724 int page_flipping;
40725
40726 wait_queue_head_t irq_queue;
40727- atomic_t irq_received;
40728- atomic_t irq_emitted;
40729+ atomic_unchecked_t irq_received;
40730+ atomic_unchecked_t irq_emitted;
40731
40732 int front_offset;
40733 } drm_i810_private_t;
40734diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40735index 1a46787..0ff2ff4 100644
40736--- a/drivers/gpu/drm/i915/i915_dma.c
40737+++ b/drivers/gpu/drm/i915/i915_dma.c
40738@@ -362,7 +362,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40739 * locking inversion with the driver load path. And the access here is
40740 * completely racy anyway. So don't bother with locking for now.
40741 */
40742- return dev->open_count == 0;
40743+ return local_read(&dev->open_count) == 0;
40744 }
40745
40746 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40747diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40748index 38a7425..5322b16 100644
40749--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40750+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40751@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40752 static int
40753 validate_exec_list(struct drm_device *dev,
40754 struct drm_i915_gem_exec_object2 *exec,
40755- int count)
40756+ unsigned int count)
40757 {
40758 unsigned relocs_total = 0;
40759 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40760 unsigned invalid_flags;
40761- int i;
40762+ unsigned int i;
40763
40764 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40765 if (USES_FULL_PPGTT(dev))
40766diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40767index 176de63..b50b66a 100644
40768--- a/drivers/gpu/drm/i915/i915_ioc32.c
40769+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40770@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
40771 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
40772 || __put_user(batchbuffer32.num_cliprects,
40773 &batchbuffer->num_cliprects)
40774- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
40775+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
40776 &batchbuffer->cliprects))
40777 return -EFAULT;
40778
40779@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
40780
40781 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
40782 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
40783- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
40784+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
40785 &cmdbuffer->buf)
40786 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
40787 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
40788 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
40789 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
40790- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
40791+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
40792 &cmdbuffer->cliprects))
40793 return -EFAULT;
40794
40795@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40796 (unsigned long)request);
40797 }
40798
40799-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40800+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40801 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40802 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40803 [DRM_I915_GETPARAM] = compat_i915_getparam,
40804@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40805 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40806 {
40807 unsigned int nr = DRM_IOCTL_NR(cmd);
40808- drm_ioctl_compat_t *fn = NULL;
40809 int ret;
40810
40811 if (nr < DRM_COMMAND_BASE)
40812 return drm_compat_ioctl(filp, cmd, arg);
40813
40814- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40815- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40816-
40817- if (fn != NULL)
40818- ret = (*fn) (filp, cmd, arg);
40819+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
40820+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
40821 else
40822 ret = drm_ioctl(filp, cmd, arg);
40823
40824diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40825index f75173c..f283e45 100644
40826--- a/drivers/gpu/drm/i915/intel_display.c
40827+++ b/drivers/gpu/drm/i915/intel_display.c
40828@@ -13056,13 +13056,13 @@ struct intel_quirk {
40829 int subsystem_vendor;
40830 int subsystem_device;
40831 void (*hook)(struct drm_device *dev);
40832-};
40833+} __do_const;
40834
40835 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40836 struct intel_dmi_quirk {
40837 void (*hook)(struct drm_device *dev);
40838 const struct dmi_system_id (*dmi_id_list)[];
40839-};
40840+} __do_const;
40841
40842 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40843 {
40844@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40845 return 1;
40846 }
40847
40848-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40849+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40850 {
40851- .dmi_id_list = &(const struct dmi_system_id[]) {
40852- {
40853- .callback = intel_dmi_reverse_brightness,
40854- .ident = "NCR Corporation",
40855- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40856- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40857- },
40858- },
40859- { } /* terminating entry */
40860+ .callback = intel_dmi_reverse_brightness,
40861+ .ident = "NCR Corporation",
40862+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40863+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40864 },
40865+ },
40866+ { } /* terminating entry */
40867+};
40868+
40869+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40870+ {
40871+ .dmi_id_list = &intel_dmi_quirks_table,
40872 .hook = quirk_invert_brightness,
40873 },
40874 };
40875diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40876index a002f53..0d60514 100644
40877--- a/drivers/gpu/drm/imx/imx-drm-core.c
40878+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40879@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40880 if (imxdrm->pipes >= MAX_CRTC)
40881 return -EINVAL;
40882
40883- if (imxdrm->drm->open_count)
40884+ if (local_read(&imxdrm->drm->open_count))
40885 return -EBUSY;
40886
40887 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40888diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40889index b4a20149..219ab78 100644
40890--- a/drivers/gpu/drm/mga/mga_drv.h
40891+++ b/drivers/gpu/drm/mga/mga_drv.h
40892@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40893 u32 clear_cmd;
40894 u32 maccess;
40895
40896- atomic_t vbl_received; /**< Number of vblanks received. */
40897+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40898 wait_queue_head_t fence_queue;
40899- atomic_t last_fence_retired;
40900+ atomic_unchecked_t last_fence_retired;
40901 u32 next_fence_to_post;
40902
40903 unsigned int fb_cpp;
40904diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40905index 729bfd5..14bae78 100644
40906--- a/drivers/gpu/drm/mga/mga_ioc32.c
40907+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40908@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40909 return 0;
40910 }
40911
40912-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40913+drm_ioctl_compat_t mga_compat_ioctls[] = {
40914 [DRM_MGA_INIT] = compat_mga_init,
40915 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40916 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40917@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40918 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40919 {
40920 unsigned int nr = DRM_IOCTL_NR(cmd);
40921- drm_ioctl_compat_t *fn = NULL;
40922 int ret;
40923
40924 if (nr < DRM_COMMAND_BASE)
40925 return drm_compat_ioctl(filp, cmd, arg);
40926
40927- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40928- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40929-
40930- if (fn != NULL)
40931- ret = (*fn) (filp, cmd, arg);
40932+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
40933+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
40934 else
40935 ret = drm_ioctl(filp, cmd, arg);
40936
40937diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40938index 1b071b8..de8601a 100644
40939--- a/drivers/gpu/drm/mga/mga_irq.c
40940+++ b/drivers/gpu/drm/mga/mga_irq.c
40941@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40942 if (crtc != 0)
40943 return 0;
40944
40945- return atomic_read(&dev_priv->vbl_received);
40946+ return atomic_read_unchecked(&dev_priv->vbl_received);
40947 }
40948
40949
40950@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40951 /* VBLANK interrupt */
40952 if (status & MGA_VLINEPEN) {
40953 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40954- atomic_inc(&dev_priv->vbl_received);
40955+ atomic_inc_unchecked(&dev_priv->vbl_received);
40956 drm_handle_vblank(dev, 0);
40957 handled = 1;
40958 }
40959@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40960 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40961 MGA_WRITE(MGA_PRIMEND, prim_end);
40962
40963- atomic_inc(&dev_priv->last_fence_retired);
40964+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40965 wake_up(&dev_priv->fence_queue);
40966 handled = 1;
40967 }
40968@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40969 * using fences.
40970 */
40971 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40972- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40973+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40974 - *sequence) <= (1 << 23)));
40975
40976 *sequence = cur_fence;
40977diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40978index 0190b69..60c3eaf 100644
40979--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40980+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40981@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40982 struct bit_table {
40983 const char id;
40984 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40985-};
40986+} __no_const;
40987
40988 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40989
40990diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40991index fc68f09..0511d71 100644
40992--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40993+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40994@@ -121,7 +121,6 @@ struct nouveau_drm {
40995 struct drm_global_reference mem_global_ref;
40996 struct ttm_bo_global_ref bo_global_ref;
40997 struct ttm_bo_device bdev;
40998- atomic_t validate_sequence;
40999 int (*move)(struct nouveau_channel *,
41000 struct ttm_buffer_object *,
41001 struct ttm_mem_reg *, struct ttm_mem_reg *);
41002diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41003index 462679a..88e32a7 100644
41004--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41005+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41006@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41007 unsigned long arg)
41008 {
41009 unsigned int nr = DRM_IOCTL_NR(cmd);
41010- drm_ioctl_compat_t *fn = NULL;
41011+ drm_ioctl_compat_t fn = NULL;
41012 int ret;
41013
41014 if (nr < DRM_COMMAND_BASE)
41015diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41016index 273e501..3b6c0a2 100644
41017--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41018+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41019@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41020 }
41021
41022 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41023- nouveau_vram_manager_init,
41024- nouveau_vram_manager_fini,
41025- nouveau_vram_manager_new,
41026- nouveau_vram_manager_del,
41027- nouveau_vram_manager_debug
41028+ .init = nouveau_vram_manager_init,
41029+ .takedown = nouveau_vram_manager_fini,
41030+ .get_node = nouveau_vram_manager_new,
41031+ .put_node = nouveau_vram_manager_del,
41032+ .debug = nouveau_vram_manager_debug
41033 };
41034
41035 static int
41036@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41037 }
41038
41039 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41040- nouveau_gart_manager_init,
41041- nouveau_gart_manager_fini,
41042- nouveau_gart_manager_new,
41043- nouveau_gart_manager_del,
41044- nouveau_gart_manager_debug
41045+ .init = nouveau_gart_manager_init,
41046+ .takedown = nouveau_gart_manager_fini,
41047+ .get_node = nouveau_gart_manager_new,
41048+ .put_node = nouveau_gart_manager_del,
41049+ .debug = nouveau_gart_manager_debug
41050 };
41051
41052 /*XXX*/
41053@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41054 }
41055
41056 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41057- nv04_gart_manager_init,
41058- nv04_gart_manager_fini,
41059- nv04_gart_manager_new,
41060- nv04_gart_manager_del,
41061- nv04_gart_manager_debug
41062+ .init = nv04_gart_manager_init,
41063+ .takedown = nv04_gart_manager_fini,
41064+ .get_node = nv04_gart_manager_new,
41065+ .put_node = nv04_gart_manager_del,
41066+ .debug = nv04_gart_manager_debug
41067 };
41068
41069 int
41070diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41071index c7592ec..dd45ebc 100644
41072--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41073+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41074@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41075 * locking inversion with the driver load path. And the access here is
41076 * completely racy anyway. So don't bother with locking for now.
41077 */
41078- return dev->open_count == 0;
41079+ return local_read(&dev->open_count) == 0;
41080 }
41081
41082 static const struct vga_switcheroo_client_ops
41083diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41084index 9782364..89bd954 100644
41085--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41086+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41087@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41088 int ret;
41089
41090 mutex_lock(&qdev->async_io_mutex);
41091- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41092+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41093 if (qdev->last_sent_io_cmd > irq_num) {
41094 if (intr)
41095 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41096- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41097+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41098 else
41099 ret = wait_event_timeout(qdev->io_cmd_event,
41100- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41101+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41102 /* 0 is timeout, just bail the "hw" has gone away */
41103 if (ret <= 0)
41104 goto out;
41105- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41106+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41107 }
41108 outb(val, addr);
41109 qdev->last_sent_io_cmd = irq_num + 1;
41110 if (intr)
41111 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41112- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41113+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41114 else
41115 ret = wait_event_timeout(qdev->io_cmd_event,
41116- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41117+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41118 out:
41119 if (ret > 0)
41120 ret = 0;
41121diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41122index 6911b8c..89d6867 100644
41123--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41124+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41125@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41126 struct drm_info_node *node = (struct drm_info_node *) m->private;
41127 struct qxl_device *qdev = node->minor->dev->dev_private;
41128
41129- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41130- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41131- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41132- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41133+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41134+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41135+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41136+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41137 seq_printf(m, "%d\n", qdev->irq_received_error);
41138 return 0;
41139 }
41140diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41141index 7c6cafe..460f542 100644
41142--- a/drivers/gpu/drm/qxl/qxl_drv.h
41143+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41144@@ -290,10 +290,10 @@ struct qxl_device {
41145 unsigned int last_sent_io_cmd;
41146
41147 /* interrupt handling */
41148- atomic_t irq_received;
41149- atomic_t irq_received_display;
41150- atomic_t irq_received_cursor;
41151- atomic_t irq_received_io_cmd;
41152+ atomic_unchecked_t irq_received;
41153+ atomic_unchecked_t irq_received_display;
41154+ atomic_unchecked_t irq_received_cursor;
41155+ atomic_unchecked_t irq_received_io_cmd;
41156 unsigned irq_received_error;
41157 wait_queue_head_t display_event;
41158 wait_queue_head_t cursor_event;
41159diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41160index b110883..dd06418 100644
41161--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41162+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41163@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41164
41165 /* TODO copy slow path code from i915 */
41166 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41167- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41168+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41169
41170 {
41171 struct qxl_drawable *draw = fb_cmd;
41172@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41173 struct drm_qxl_reloc reloc;
41174
41175 if (copy_from_user(&reloc,
41176- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41177+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41178 sizeof(reloc))) {
41179 ret = -EFAULT;
41180 goto out_free_bos;
41181@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41182
41183 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41184
41185- struct drm_qxl_command *commands =
41186- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41187+ struct drm_qxl_command __user *commands =
41188+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41189
41190- if (copy_from_user(&user_cmd, &commands[cmd_num],
41191+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41192 sizeof(user_cmd)))
41193 return -EFAULT;
41194
41195diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41196index 0bf1e20..42a7310 100644
41197--- a/drivers/gpu/drm/qxl/qxl_irq.c
41198+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41199@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41200 if (!pending)
41201 return IRQ_NONE;
41202
41203- atomic_inc(&qdev->irq_received);
41204+ atomic_inc_unchecked(&qdev->irq_received);
41205
41206 if (pending & QXL_INTERRUPT_DISPLAY) {
41207- atomic_inc(&qdev->irq_received_display);
41208+ atomic_inc_unchecked(&qdev->irq_received_display);
41209 wake_up_all(&qdev->display_event);
41210 qxl_queue_garbage_collect(qdev, false);
41211 }
41212 if (pending & QXL_INTERRUPT_CURSOR) {
41213- atomic_inc(&qdev->irq_received_cursor);
41214+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41215 wake_up_all(&qdev->cursor_event);
41216 }
41217 if (pending & QXL_INTERRUPT_IO_CMD) {
41218- atomic_inc(&qdev->irq_received_io_cmd);
41219+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41220 wake_up_all(&qdev->io_cmd_event);
41221 }
41222 if (pending & QXL_INTERRUPT_ERROR) {
41223@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41224 init_waitqueue_head(&qdev->io_cmd_event);
41225 INIT_WORK(&qdev->client_monitors_config_work,
41226 qxl_client_monitors_config_work_func);
41227- atomic_set(&qdev->irq_received, 0);
41228- atomic_set(&qdev->irq_received_display, 0);
41229- atomic_set(&qdev->irq_received_cursor, 0);
41230- atomic_set(&qdev->irq_received_io_cmd, 0);
41231+ atomic_set_unchecked(&qdev->irq_received, 0);
41232+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41233+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41234+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41235 qdev->irq_received_error = 0;
41236 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41237 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41238diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41239index 0cbc4c9..0e46686 100644
41240--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41241+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41242@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41243 }
41244 }
41245
41246-static struct vm_operations_struct qxl_ttm_vm_ops;
41247+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41248 static const struct vm_operations_struct *ttm_vm_ops;
41249
41250 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41251@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41252 return r;
41253 if (unlikely(ttm_vm_ops == NULL)) {
41254 ttm_vm_ops = vma->vm_ops;
41255+ pax_open_kernel();
41256 qxl_ttm_vm_ops = *ttm_vm_ops;
41257 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41258+ pax_close_kernel();
41259 }
41260 vma->vm_ops = &qxl_ttm_vm_ops;
41261 return 0;
41262@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41263 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41264 {
41265 #if defined(CONFIG_DEBUG_FS)
41266- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41267- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41268- unsigned i;
41269+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41270+ {
41271+ .name = "qxl_mem_mm",
41272+ .show = &qxl_mm_dump_table,
41273+ },
41274+ {
41275+ .name = "qxl_surf_mm",
41276+ .show = &qxl_mm_dump_table,
41277+ }
41278+ };
41279
41280- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41281- if (i == 0)
41282- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41283- else
41284- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41285- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41286- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41287- qxl_mem_types_list[i].driver_features = 0;
41288- if (i == 0)
41289- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41290- else
41291- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41292+ pax_open_kernel();
41293+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41294+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41295+ pax_close_kernel();
41296
41297- }
41298- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41299+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41300 #else
41301 return 0;
41302 #endif
41303diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41304index 2c45ac9..5d740f8 100644
41305--- a/drivers/gpu/drm/r128/r128_cce.c
41306+++ b/drivers/gpu/drm/r128/r128_cce.c
41307@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41308
41309 /* GH: Simple idle check.
41310 */
41311- atomic_set(&dev_priv->idle_count, 0);
41312+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41313
41314 /* We don't support anything other than bus-mastering ring mode,
41315 * but the ring can be in either AGP or PCI space for the ring
41316diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41317index 723e5d6..102dbaf 100644
41318--- a/drivers/gpu/drm/r128/r128_drv.h
41319+++ b/drivers/gpu/drm/r128/r128_drv.h
41320@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41321 int is_pci;
41322 unsigned long cce_buffers_offset;
41323
41324- atomic_t idle_count;
41325+ atomic_unchecked_t idle_count;
41326
41327 int page_flipping;
41328 int current_page;
41329 u32 crtc_offset;
41330 u32 crtc_offset_cntl;
41331
41332- atomic_t vbl_received;
41333+ atomic_unchecked_t vbl_received;
41334
41335 u32 color_fmt;
41336 unsigned int front_offset;
41337diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41338index 663f38c..ec159a1 100644
41339--- a/drivers/gpu/drm/r128/r128_ioc32.c
41340+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41341@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41342 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41343 }
41344
41345-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41346+drm_ioctl_compat_t r128_compat_ioctls[] = {
41347 [DRM_R128_INIT] = compat_r128_init,
41348 [DRM_R128_DEPTH] = compat_r128_depth,
41349 [DRM_R128_STIPPLE] = compat_r128_stipple,
41350@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41351 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41352 {
41353 unsigned int nr = DRM_IOCTL_NR(cmd);
41354- drm_ioctl_compat_t *fn = NULL;
41355 int ret;
41356
41357 if (nr < DRM_COMMAND_BASE)
41358 return drm_compat_ioctl(filp, cmd, arg);
41359
41360- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41361- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41362-
41363- if (fn != NULL)
41364- ret = (*fn) (filp, cmd, arg);
41365+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41366+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41367 else
41368 ret = drm_ioctl(filp, cmd, arg);
41369
41370diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41371index c2ae496..30b5993 100644
41372--- a/drivers/gpu/drm/r128/r128_irq.c
41373+++ b/drivers/gpu/drm/r128/r128_irq.c
41374@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41375 if (crtc != 0)
41376 return 0;
41377
41378- return atomic_read(&dev_priv->vbl_received);
41379+ return atomic_read_unchecked(&dev_priv->vbl_received);
41380 }
41381
41382 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41383@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41384 /* VBLANK interrupt */
41385 if (status & R128_CRTC_VBLANK_INT) {
41386 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41387- atomic_inc(&dev_priv->vbl_received);
41388+ atomic_inc_unchecked(&dev_priv->vbl_received);
41389 drm_handle_vblank(dev, 0);
41390 return IRQ_HANDLED;
41391 }
41392diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41393index 8fd2d9f..18c9660 100644
41394--- a/drivers/gpu/drm/r128/r128_state.c
41395+++ b/drivers/gpu/drm/r128/r128_state.c
41396@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41397
41398 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41399 {
41400- if (atomic_read(&dev_priv->idle_count) == 0)
41401+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41402 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41403 else
41404- atomic_set(&dev_priv->idle_count, 0);
41405+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41406 }
41407
41408 #endif
41409diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41410index b928c17..e5d9400 100644
41411--- a/drivers/gpu/drm/radeon/mkregtable.c
41412+++ b/drivers/gpu/drm/radeon/mkregtable.c
41413@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41414 regex_t mask_rex;
41415 regmatch_t match[4];
41416 char buf[1024];
41417- size_t end;
41418+ long end;
41419 int len;
41420 int done = 0;
41421 int r;
41422 unsigned o;
41423 struct offset *offset;
41424 char last_reg_s[10];
41425- int last_reg;
41426+ unsigned long last_reg;
41427
41428 if (regcomp
41429 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41430diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41431index bd7519f..e1c2cd95 100644
41432--- a/drivers/gpu/drm/radeon/radeon_device.c
41433+++ b/drivers/gpu/drm/radeon/radeon_device.c
41434@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41435 * locking inversion with the driver load path. And the access here is
41436 * completely racy anyway. So don't bother with locking for now.
41437 */
41438- return dev->open_count == 0;
41439+ return local_read(&dev->open_count) == 0;
41440 }
41441
41442 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41443diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41444index 46bd393..6ae4719 100644
41445--- a/drivers/gpu/drm/radeon/radeon_drv.h
41446+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41447@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41448
41449 /* SW interrupt */
41450 wait_queue_head_t swi_queue;
41451- atomic_t swi_emitted;
41452+ atomic_unchecked_t swi_emitted;
41453 int vblank_crtc;
41454 uint32_t irq_enable_reg;
41455 uint32_t r500_disp_irq_reg;
41456diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41457index 0b98ea1..a3c770f 100644
41458--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41459+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41460@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41461 request = compat_alloc_user_space(sizeof(*request));
41462 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41463 || __put_user(req32.param, &request->param)
41464- || __put_user((void __user *)(unsigned long)req32.value,
41465+ || __put_user((unsigned long)req32.value,
41466 &request->value))
41467 return -EFAULT;
41468
41469@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41470 #define compat_radeon_cp_setparam NULL
41471 #endif /* X86_64 || IA64 */
41472
41473-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41474+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41475 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41476 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41477 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41478@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41479 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41480 {
41481 unsigned int nr = DRM_IOCTL_NR(cmd);
41482- drm_ioctl_compat_t *fn = NULL;
41483 int ret;
41484
41485 if (nr < DRM_COMMAND_BASE)
41486 return drm_compat_ioctl(filp, cmd, arg);
41487
41488- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41489- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41490-
41491- if (fn != NULL)
41492- ret = (*fn) (filp, cmd, arg);
41493+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
41494+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41495 else
41496 ret = drm_ioctl(filp, cmd, arg);
41497
41498diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41499index 244b19b..c19226d 100644
41500--- a/drivers/gpu/drm/radeon/radeon_irq.c
41501+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41502@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41503 unsigned int ret;
41504 RING_LOCALS;
41505
41506- atomic_inc(&dev_priv->swi_emitted);
41507- ret = atomic_read(&dev_priv->swi_emitted);
41508+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41509+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41510
41511 BEGIN_RING(4);
41512 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41513@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41514 drm_radeon_private_t *dev_priv =
41515 (drm_radeon_private_t *) dev->dev_private;
41516
41517- atomic_set(&dev_priv->swi_emitted, 0);
41518+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41519 init_waitqueue_head(&dev_priv->swi_queue);
41520
41521 dev->max_vblank_count = 0x001fffff;
41522diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41523index 15aee72..cda326e 100644
41524--- a/drivers/gpu/drm/radeon/radeon_state.c
41525+++ b/drivers/gpu/drm/radeon/radeon_state.c
41526@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41527 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41528 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41529
41530- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41531+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41532 sarea_priv->nbox * sizeof(depth_boxes[0])))
41533 return -EFAULT;
41534
41535@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41536 {
41537 drm_radeon_private_t *dev_priv = dev->dev_private;
41538 drm_radeon_getparam_t *param = data;
41539- int value;
41540+ int value = 0;
41541
41542 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41543
41544diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41545index b292aca..4e338b5 100644
41546--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41547+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41548@@ -963,7 +963,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41549 man->size = size >> PAGE_SHIFT;
41550 }
41551
41552-static struct vm_operations_struct radeon_ttm_vm_ops;
41553+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41554 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41555
41556 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41557@@ -1004,8 +1004,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41558 }
41559 if (unlikely(ttm_vm_ops == NULL)) {
41560 ttm_vm_ops = vma->vm_ops;
41561+ pax_open_kernel();
41562 radeon_ttm_vm_ops = *ttm_vm_ops;
41563 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41564+ pax_close_kernel();
41565 }
41566 vma->vm_ops = &radeon_ttm_vm_ops;
41567 return 0;
41568diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41569index 1a52522..8e78043 100644
41570--- a/drivers/gpu/drm/tegra/dc.c
41571+++ b/drivers/gpu/drm/tegra/dc.c
41572@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41573 }
41574
41575 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41576- dc->debugfs_files[i].data = dc;
41577+ *(void **)&dc->debugfs_files[i].data = dc;
41578
41579 err = drm_debugfs_create_files(dc->debugfs_files,
41580 ARRAY_SIZE(debugfs_files),
41581diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41582index ed970f6..4eeea42 100644
41583--- a/drivers/gpu/drm/tegra/dsi.c
41584+++ b/drivers/gpu/drm/tegra/dsi.c
41585@@ -62,7 +62,7 @@ struct tegra_dsi {
41586 struct clk *clk_lp;
41587 struct clk *clk;
41588
41589- struct drm_info_list *debugfs_files;
41590+ drm_info_list_no_const *debugfs_files;
41591 struct drm_minor *minor;
41592 struct dentry *debugfs;
41593
41594diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41595index 7eaaee74..cc2bc04 100644
41596--- a/drivers/gpu/drm/tegra/hdmi.c
41597+++ b/drivers/gpu/drm/tegra/hdmi.c
41598@@ -64,7 +64,7 @@ struct tegra_hdmi {
41599 bool stereo;
41600 bool dvi;
41601
41602- struct drm_info_list *debugfs_files;
41603+ drm_info_list_no_const *debugfs_files;
41604 struct drm_minor *minor;
41605 struct dentry *debugfs;
41606 };
41607diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41608index aa0bd054..aea6a01 100644
41609--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41610+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41611@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41612 }
41613
41614 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41615- ttm_bo_man_init,
41616- ttm_bo_man_takedown,
41617- ttm_bo_man_get_node,
41618- ttm_bo_man_put_node,
41619- ttm_bo_man_debug
41620+ .init = ttm_bo_man_init,
41621+ .takedown = ttm_bo_man_takedown,
41622+ .get_node = ttm_bo_man_get_node,
41623+ .put_node = ttm_bo_man_put_node,
41624+ .debug = ttm_bo_man_debug
41625 };
41626 EXPORT_SYMBOL(ttm_bo_manager_func);
41627diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41628index a1803fb..c53f6b0 100644
41629--- a/drivers/gpu/drm/ttm/ttm_memory.c
41630+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41631@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41632 zone->glob = glob;
41633 glob->zone_kernel = zone;
41634 ret = kobject_init_and_add(
41635- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41636+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41637 if (unlikely(ret != 0)) {
41638 kobject_put(&zone->kobj);
41639 return ret;
41640@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41641 zone->glob = glob;
41642 glob->zone_dma32 = zone;
41643 ret = kobject_init_and_add(
41644- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41645+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41646 if (unlikely(ret != 0)) {
41647 kobject_put(&zone->kobj);
41648 return ret;
41649diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41650index 025c429..314062f 100644
41651--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41652+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41653@@ -54,7 +54,7 @@
41654
41655 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41656 #define SMALL_ALLOCATION 16
41657-#define FREE_ALL_PAGES (~0U)
41658+#define FREE_ALL_PAGES (~0UL)
41659 /* times are in msecs */
41660 #define PAGE_FREE_INTERVAL 1000
41661
41662@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41663 * @free_all: If set to true will free all pages in pool
41664 * @use_static: Safe to use static buffer
41665 **/
41666-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41667+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41668 bool use_static)
41669 {
41670 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41671 unsigned long irq_flags;
41672 struct page *p;
41673 struct page **pages_to_free;
41674- unsigned freed_pages = 0,
41675- npages_to_free = nr_free;
41676+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41677
41678 if (NUM_PAGES_TO_ALLOC < nr_free)
41679 npages_to_free = NUM_PAGES_TO_ALLOC;
41680@@ -371,7 +370,8 @@ restart:
41681 __list_del(&p->lru, &pool->list);
41682
41683 ttm_pool_update_free_locked(pool, freed_pages);
41684- nr_free -= freed_pages;
41685+ if (likely(nr_free != FREE_ALL_PAGES))
41686+ nr_free -= freed_pages;
41687 }
41688
41689 spin_unlock_irqrestore(&pool->lock, irq_flags);
41690@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41691 unsigned i;
41692 unsigned pool_offset;
41693 struct ttm_page_pool *pool;
41694- int shrink_pages = sc->nr_to_scan;
41695+ unsigned long shrink_pages = sc->nr_to_scan;
41696 unsigned long freed = 0;
41697
41698 if (!mutex_trylock(&lock))
41699@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41700 pool_offset = ++start_pool % NUM_POOLS;
41701 /* select start pool in round robin fashion */
41702 for (i = 0; i < NUM_POOLS; ++i) {
41703- unsigned nr_free = shrink_pages;
41704+ unsigned long nr_free = shrink_pages;
41705 if (shrink_pages == 0)
41706 break;
41707 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41708@@ -673,7 +673,7 @@ out:
41709 }
41710
41711 /* Put all pages in pages list to correct pool to wait for reuse */
41712-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41713+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41714 enum ttm_caching_state cstate)
41715 {
41716 unsigned long irq_flags;
41717@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41718 struct list_head plist;
41719 struct page *p = NULL;
41720 gfp_t gfp_flags = GFP_USER;
41721- unsigned count;
41722+ unsigned long count;
41723 int r;
41724
41725 /* set zero flag for page allocation if required */
41726diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41727index 01e1d27..aaa018a 100644
41728--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41729+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41730@@ -56,7 +56,7 @@
41731
41732 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41733 #define SMALL_ALLOCATION 4
41734-#define FREE_ALL_PAGES (~0U)
41735+#define FREE_ALL_PAGES (~0UL)
41736 /* times are in msecs */
41737 #define IS_UNDEFINED (0)
41738 #define IS_WC (1<<1)
41739@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41740 * @nr_free: If set to true will free all pages in pool
41741 * @use_static: Safe to use static buffer
41742 **/
41743-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41744+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41745 bool use_static)
41746 {
41747 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41748@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41749 struct dma_page *dma_p, *tmp;
41750 struct page **pages_to_free;
41751 struct list_head d_pages;
41752- unsigned freed_pages = 0,
41753- npages_to_free = nr_free;
41754+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41755
41756 if (NUM_PAGES_TO_ALLOC < nr_free)
41757 npages_to_free = NUM_PAGES_TO_ALLOC;
41758@@ -499,7 +498,8 @@ restart:
41759 /* remove range of pages from the pool */
41760 if (freed_pages) {
41761 ttm_pool_update_free_locked(pool, freed_pages);
41762- nr_free -= freed_pages;
41763+ if (likely(nr_free != FREE_ALL_PAGES))
41764+ nr_free -= freed_pages;
41765 }
41766
41767 spin_unlock_irqrestore(&pool->lock, irq_flags);
41768@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41769 struct dma_page *d_page, *next;
41770 enum pool_type type;
41771 bool is_cached = false;
41772- unsigned count = 0, i, npages = 0;
41773+ unsigned long count = 0, i, npages = 0;
41774 unsigned long irq_flags;
41775
41776 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41777@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41778 static unsigned start_pool;
41779 unsigned idx = 0;
41780 unsigned pool_offset;
41781- unsigned shrink_pages = sc->nr_to_scan;
41782+ unsigned long shrink_pages = sc->nr_to_scan;
41783 struct device_pools *p;
41784 unsigned long freed = 0;
41785
41786@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41787 goto out;
41788 pool_offset = ++start_pool % _manager->npools;
41789 list_for_each_entry(p, &_manager->pools, pools) {
41790- unsigned nr_free;
41791+ unsigned long nr_free;
41792
41793 if (!p->dev)
41794 continue;
41795@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41796 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41797 freed += nr_free - shrink_pages;
41798
41799- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41800+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41801 p->pool->dev_name, p->pool->name, current->pid,
41802 nr_free, shrink_pages);
41803 }
41804diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41805index 5fc16ce..1bd84ec 100644
41806--- a/drivers/gpu/drm/udl/udl_fb.c
41807+++ b/drivers/gpu/drm/udl/udl_fb.c
41808@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41809 fb_deferred_io_cleanup(info);
41810 kfree(info->fbdefio);
41811 info->fbdefio = NULL;
41812- info->fbops->fb_mmap = udl_fb_mmap;
41813 }
41814
41815 pr_warn("released /dev/fb%d user=%d count=%d\n",
41816diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41817index ef8c500..01030c8 100644
41818--- a/drivers/gpu/drm/via/via_drv.h
41819+++ b/drivers/gpu/drm/via/via_drv.h
41820@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41821 typedef uint32_t maskarray_t[5];
41822
41823 typedef struct drm_via_irq {
41824- atomic_t irq_received;
41825+ atomic_unchecked_t irq_received;
41826 uint32_t pending_mask;
41827 uint32_t enable_mask;
41828 wait_queue_head_t irq_queue;
41829@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41830 struct timeval last_vblank;
41831 int last_vblank_valid;
41832 unsigned usec_per_vblank;
41833- atomic_t vbl_received;
41834+ atomic_unchecked_t vbl_received;
41835 drm_via_state_t hc_state;
41836 char pci_buf[VIA_PCI_BUF_SIZE];
41837 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41838diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41839index 1319433..a993b0c 100644
41840--- a/drivers/gpu/drm/via/via_irq.c
41841+++ b/drivers/gpu/drm/via/via_irq.c
41842@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41843 if (crtc != 0)
41844 return 0;
41845
41846- return atomic_read(&dev_priv->vbl_received);
41847+ return atomic_read_unchecked(&dev_priv->vbl_received);
41848 }
41849
41850 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41851@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41852
41853 status = VIA_READ(VIA_REG_INTERRUPT);
41854 if (status & VIA_IRQ_VBLANK_PENDING) {
41855- atomic_inc(&dev_priv->vbl_received);
41856- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41857+ atomic_inc_unchecked(&dev_priv->vbl_received);
41858+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41859 do_gettimeofday(&cur_vblank);
41860 if (dev_priv->last_vblank_valid) {
41861 dev_priv->usec_per_vblank =
41862@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41863 dev_priv->last_vblank = cur_vblank;
41864 dev_priv->last_vblank_valid = 1;
41865 }
41866- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41867+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41868 DRM_DEBUG("US per vblank is: %u\n",
41869 dev_priv->usec_per_vblank);
41870 }
41871@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41872
41873 for (i = 0; i < dev_priv->num_irqs; ++i) {
41874 if (status & cur_irq->pending_mask) {
41875- atomic_inc(&cur_irq->irq_received);
41876+ atomic_inc_unchecked(&cur_irq->irq_received);
41877 wake_up(&cur_irq->irq_queue);
41878 handled = 1;
41879 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41880@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41881 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41882 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41883 masks[irq][4]));
41884- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41885+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41886 } else {
41887 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41888 (((cur_irq_sequence =
41889- atomic_read(&cur_irq->irq_received)) -
41890+ atomic_read_unchecked(&cur_irq->irq_received)) -
41891 *sequence) <= (1 << 23)));
41892 }
41893 *sequence = cur_irq_sequence;
41894@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41895 }
41896
41897 for (i = 0; i < dev_priv->num_irqs; ++i) {
41898- atomic_set(&cur_irq->irq_received, 0);
41899+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41900 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41901 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41902 init_waitqueue_head(&cur_irq->irq_queue);
41903@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41904 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41905 case VIA_IRQ_RELATIVE:
41906 irqwait->request.sequence +=
41907- atomic_read(&cur_irq->irq_received);
41908+ atomic_read_unchecked(&cur_irq->irq_received);
41909 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41910 case VIA_IRQ_ABSOLUTE:
41911 break;
41912diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41913index d26a6da..5fa41ed 100644
41914--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41915+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41916@@ -447,7 +447,7 @@ struct vmw_private {
41917 * Fencing and IRQs.
41918 */
41919
41920- atomic_t marker_seq;
41921+ atomic_unchecked_t marker_seq;
41922 wait_queue_head_t fence_queue;
41923 wait_queue_head_t fifo_queue;
41924 spinlock_t waiter_lock;
41925diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41926index 39f2b03..d1b0a64 100644
41927--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41928+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41929@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41930 (unsigned int) min,
41931 (unsigned int) fifo->capabilities);
41932
41933- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41934+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41935 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41936 vmw_marker_queue_init(&fifo->marker_queue);
41937 return vmw_fifo_send_fence(dev_priv, &dummy);
41938@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41939 if (reserveable)
41940 iowrite32(bytes, fifo_mem +
41941 SVGA_FIFO_RESERVED);
41942- return fifo_mem + (next_cmd >> 2);
41943+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41944 } else {
41945 need_bounce = true;
41946 }
41947@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41948
41949 fm = vmw_fifo_reserve(dev_priv, bytes);
41950 if (unlikely(fm == NULL)) {
41951- *seqno = atomic_read(&dev_priv->marker_seq);
41952+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41953 ret = -ENOMEM;
41954 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41955 false, 3*HZ);
41956@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41957 }
41958
41959 do {
41960- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41961+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41962 } while (*seqno == 0);
41963
41964 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41965diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41966index 170b61b..fec7348 100644
41967--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41968+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41969@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41970 }
41971
41972 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41973- vmw_gmrid_man_init,
41974- vmw_gmrid_man_takedown,
41975- vmw_gmrid_man_get_node,
41976- vmw_gmrid_man_put_node,
41977- vmw_gmrid_man_debug
41978+ .init = vmw_gmrid_man_init,
41979+ .takedown = vmw_gmrid_man_takedown,
41980+ .get_node = vmw_gmrid_man_get_node,
41981+ .put_node = vmw_gmrid_man_put_node,
41982+ .debug = vmw_gmrid_man_debug
41983 };
41984diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41985index 69c8ce2..cacb0ab 100644
41986--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41987+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41988@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41989 int ret;
41990
41991 num_clips = arg->num_clips;
41992- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41993+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41994
41995 if (unlikely(num_clips == 0))
41996 return 0;
41997@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41998 int ret;
41999
42000 num_clips = arg->num_clips;
42001- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42002+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42003
42004 if (unlikely(num_clips == 0))
42005 return 0;
42006diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42007index 9fe9827..0aa2fc0 100644
42008--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42009+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42010@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42011 * emitted. Then the fence is stale and signaled.
42012 */
42013
42014- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42015+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42016 > VMW_FENCE_WRAP);
42017
42018 return ret;
42019@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42020
42021 if (fifo_idle)
42022 down_read(&fifo_state->rwsem);
42023- signal_seq = atomic_read(&dev_priv->marker_seq);
42024+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42025 ret = 0;
42026
42027 for (;;) {
42028diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42029index efd1ffd..0ae13ca 100644
42030--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42031+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42032@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42033 while (!vmw_lag_lt(queue, us)) {
42034 spin_lock(&queue->lock);
42035 if (list_empty(&queue->head))
42036- seqno = atomic_read(&dev_priv->marker_seq);
42037+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42038 else {
42039 marker = list_first_entry(&queue->head,
42040 struct vmw_marker, head);
42041diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42042index 37ac7b5..d52a5c9 100644
42043--- a/drivers/gpu/vga/vga_switcheroo.c
42044+++ b/drivers/gpu/vga/vga_switcheroo.c
42045@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42046
42047 /* this version is for the case where the power switch is separate
42048 to the device being powered down. */
42049-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42050+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42051 {
42052 /* copy over all the bus versions */
42053 if (dev->bus && dev->bus->pm) {
42054@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42055 return ret;
42056 }
42057
42058-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42059+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42060 {
42061 /* copy over all the bus versions */
42062 if (dev->bus && dev->bus->pm) {
42063diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42064index 56ce8c2..32ce524 100644
42065--- a/drivers/hid/hid-core.c
42066+++ b/drivers/hid/hid-core.c
42067@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42068
42069 int hid_add_device(struct hid_device *hdev)
42070 {
42071- static atomic_t id = ATOMIC_INIT(0);
42072+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42073 int ret;
42074
42075 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42076@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42077 /* XXX hack, any other cleaner solution after the driver core
42078 * is converted to allow more than 20 bytes as the device name? */
42079 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42080- hdev->vendor, hdev->product, atomic_inc_return(&id));
42081+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42082
42083 hid_debug_register(hdev, dev_name(&hdev->dev));
42084 ret = device_add(&hdev->dev);
42085diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42086index c13fb5b..55a3802 100644
42087--- a/drivers/hid/hid-wiimote-debug.c
42088+++ b/drivers/hid/hid-wiimote-debug.c
42089@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42090 else if (size == 0)
42091 return -EIO;
42092
42093- if (copy_to_user(u, buf, size))
42094+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42095 return -EFAULT;
42096
42097 *off += size;
42098diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42099index 2978f5e..ac3a23c 100644
42100--- a/drivers/hv/channel.c
42101+++ b/drivers/hv/channel.c
42102@@ -367,7 +367,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42103 int ret = 0;
42104
42105 next_gpadl_handle =
42106- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42107+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42108
42109 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42110 if (ret)
42111diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42112index 50e51a5..b0bfd78 100644
42113--- a/drivers/hv/hv.c
42114+++ b/drivers/hv/hv.c
42115@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42116 u64 output_address = (output) ? virt_to_phys(output) : 0;
42117 u32 output_address_hi = output_address >> 32;
42118 u32 output_address_lo = output_address & 0xFFFFFFFF;
42119- void *hypercall_page = hv_context.hypercall_page;
42120+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42121
42122 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42123 "=a"(hv_status_lo) : "d" (control_hi),
42124@@ -164,7 +164,7 @@ int hv_init(void)
42125 /* See if the hypercall page is already set */
42126 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42127
42128- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42129+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42130
42131 if (!virtaddr)
42132 goto cleanup;
42133diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42134index ff16938..e60879c 100644
42135--- a/drivers/hv/hv_balloon.c
42136+++ b/drivers/hv/hv_balloon.c
42137@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42138
42139 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42140 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42141-static atomic_t trans_id = ATOMIC_INIT(0);
42142+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42143
42144 static int dm_ring_size = (5 * PAGE_SIZE);
42145
42146@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42147 pr_info("Memory hot add failed\n");
42148
42149 dm->state = DM_INITIALIZED;
42150- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42151+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42152 vmbus_sendpacket(dm->dev->channel, &resp,
42153 sizeof(struct dm_hot_add_response),
42154 (unsigned long)NULL,
42155@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42156 memset(&status, 0, sizeof(struct dm_status));
42157 status.hdr.type = DM_STATUS_REPORT;
42158 status.hdr.size = sizeof(struct dm_status);
42159- status.hdr.trans_id = atomic_inc_return(&trans_id);
42160+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42161
42162 /*
42163 * The host expects the guest to report free memory.
42164@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42165 * send the status. This can happen if we were interrupted
42166 * after we picked our transaction ID.
42167 */
42168- if (status.hdr.trans_id != atomic_read(&trans_id))
42169+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42170 return;
42171
42172 /*
42173@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42174 */
42175
42176 do {
42177- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42178+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42179 ret = vmbus_sendpacket(dm_device.dev->channel,
42180 bl_resp,
42181 bl_resp->hdr.size,
42182@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42183
42184 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42185 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42186- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42187+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42188 resp.hdr.size = sizeof(struct dm_unballoon_response);
42189
42190 vmbus_sendpacket(dm_device.dev->channel, &resp,
42191@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42192 memset(&version_req, 0, sizeof(struct dm_version_request));
42193 version_req.hdr.type = DM_VERSION_REQUEST;
42194 version_req.hdr.size = sizeof(struct dm_version_request);
42195- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42196+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42197 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42198 version_req.is_last_attempt = 1;
42199
42200@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42201 memset(&version_req, 0, sizeof(struct dm_version_request));
42202 version_req.hdr.type = DM_VERSION_REQUEST;
42203 version_req.hdr.size = sizeof(struct dm_version_request);
42204- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42205+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42206 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42207 version_req.is_last_attempt = 0;
42208
42209@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42210 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42211 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42212 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42213- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42214+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42215
42216 cap_msg.caps.cap_bits.balloon = 1;
42217 cap_msg.caps.cap_bits.hot_add = 1;
42218diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42219index 44b1c94..6dccc2c 100644
42220--- a/drivers/hv/hyperv_vmbus.h
42221+++ b/drivers/hv/hyperv_vmbus.h
42222@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42223 struct vmbus_connection {
42224 enum vmbus_connect_state conn_state;
42225
42226- atomic_t next_gpadl_handle;
42227+ atomic_unchecked_t next_gpadl_handle;
42228
42229 /*
42230 * Represents channel interrupts. Each bit position represents a
42231diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42232index f518b8d7..4bc0b64 100644
42233--- a/drivers/hv/vmbus_drv.c
42234+++ b/drivers/hv/vmbus_drv.c
42235@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42236 {
42237 int ret = 0;
42238
42239- static atomic_t device_num = ATOMIC_INIT(0);
42240+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42241
42242 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42243- atomic_inc_return(&device_num));
42244+ atomic_inc_return_unchecked(&device_num));
42245
42246 child_device_obj->device.bus = &hv_bus;
42247 child_device_obj->device.parent = &hv_acpi_dev->dev;
42248diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42249index 579bdf9..0dac21d5 100644
42250--- a/drivers/hwmon/acpi_power_meter.c
42251+++ b/drivers/hwmon/acpi_power_meter.c
42252@@ -116,7 +116,7 @@ struct sensor_template {
42253 struct device_attribute *devattr,
42254 const char *buf, size_t count);
42255 int index;
42256-};
42257+} __do_const;
42258
42259 /* Averaging interval */
42260 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42261@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42262 struct sensor_template *attrs)
42263 {
42264 struct device *dev = &resource->acpi_dev->dev;
42265- struct sensor_device_attribute *sensors =
42266+ sensor_device_attribute_no_const *sensors =
42267 &resource->sensors[resource->num_sensors];
42268 int res = 0;
42269
42270@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42271 return 0;
42272 }
42273
42274-static struct dmi_system_id __initdata pm_dmi_table[] = {
42275+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42276 {
42277 enable_cap_knobs, "IBM Active Energy Manager",
42278 {
42279diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42280index 0af63da..05a183a 100644
42281--- a/drivers/hwmon/applesmc.c
42282+++ b/drivers/hwmon/applesmc.c
42283@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42284 {
42285 struct applesmc_node_group *grp;
42286 struct applesmc_dev_attr *node;
42287- struct attribute *attr;
42288+ attribute_no_const *attr;
42289 int ret, i;
42290
42291 for (grp = groups; grp->format; grp++) {
42292diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42293index cccef87..06ce8ec 100644
42294--- a/drivers/hwmon/asus_atk0110.c
42295+++ b/drivers/hwmon/asus_atk0110.c
42296@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42297 struct atk_sensor_data {
42298 struct list_head list;
42299 struct atk_data *data;
42300- struct device_attribute label_attr;
42301- struct device_attribute input_attr;
42302- struct device_attribute limit1_attr;
42303- struct device_attribute limit2_attr;
42304+ device_attribute_no_const label_attr;
42305+ device_attribute_no_const input_attr;
42306+ device_attribute_no_const limit1_attr;
42307+ device_attribute_no_const limit2_attr;
42308 char label_attr_name[ATTR_NAME_SIZE];
42309 char input_attr_name[ATTR_NAME_SIZE];
42310 char limit1_attr_name[ATTR_NAME_SIZE];
42311@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42312 static struct device_attribute atk_name_attr =
42313 __ATTR(name, 0444, atk_name_show, NULL);
42314
42315-static void atk_init_attribute(struct device_attribute *attr, char *name,
42316+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42317 sysfs_show_func show)
42318 {
42319 sysfs_attr_init(&attr->attr);
42320diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42321index 5b7fec8..05c957a 100644
42322--- a/drivers/hwmon/coretemp.c
42323+++ b/drivers/hwmon/coretemp.c
42324@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42325 return NOTIFY_OK;
42326 }
42327
42328-static struct notifier_block coretemp_cpu_notifier __refdata = {
42329+static struct notifier_block coretemp_cpu_notifier = {
42330 .notifier_call = coretemp_cpu_callback,
42331 };
42332
42333diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42334index 7a8a6fb..015c1fd 100644
42335--- a/drivers/hwmon/ibmaem.c
42336+++ b/drivers/hwmon/ibmaem.c
42337@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42338 struct aem_rw_sensor_template *rw)
42339 {
42340 struct device *dev = &data->pdev->dev;
42341- struct sensor_device_attribute *sensors = data->sensors;
42342+ sensor_device_attribute_no_const *sensors = data->sensors;
42343 int err;
42344
42345 /* Set up read-only sensors */
42346diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42347index 17ae2eb..21b71dd 100644
42348--- a/drivers/hwmon/iio_hwmon.c
42349+++ b/drivers/hwmon/iio_hwmon.c
42350@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42351 {
42352 struct device *dev = &pdev->dev;
42353 struct iio_hwmon_state *st;
42354- struct sensor_device_attribute *a;
42355+ sensor_device_attribute_no_const *a;
42356 int ret, i;
42357 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42358 enum iio_chan_type type;
42359diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42360index f3830db..9f4d6d5 100644
42361--- a/drivers/hwmon/nct6683.c
42362+++ b/drivers/hwmon/nct6683.c
42363@@ -397,11 +397,11 @@ static struct attribute_group *
42364 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42365 int repeat)
42366 {
42367- struct sensor_device_attribute_2 *a2;
42368- struct sensor_device_attribute *a;
42369+ sensor_device_attribute_2_no_const *a2;
42370+ sensor_device_attribute_no_const *a;
42371 struct sensor_device_template **t;
42372 struct sensor_device_attr_u *su;
42373- struct attribute_group *group;
42374+ attribute_group_no_const *group;
42375 struct attribute **attrs;
42376 int i, j, count;
42377
42378diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42379index 1be4117..88ae1e1 100644
42380--- a/drivers/hwmon/nct6775.c
42381+++ b/drivers/hwmon/nct6775.c
42382@@ -952,10 +952,10 @@ static struct attribute_group *
42383 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42384 int repeat)
42385 {
42386- struct attribute_group *group;
42387+ attribute_group_no_const *group;
42388 struct sensor_device_attr_u *su;
42389- struct sensor_device_attribute *a;
42390- struct sensor_device_attribute_2 *a2;
42391+ sensor_device_attribute_no_const *a;
42392+ sensor_device_attribute_2_no_const *a2;
42393 struct attribute **attrs;
42394 struct sensor_device_template **t;
42395 int i, count;
42396diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42397index f2e47c7..45d7941 100644
42398--- a/drivers/hwmon/pmbus/pmbus_core.c
42399+++ b/drivers/hwmon/pmbus/pmbus_core.c
42400@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42401 return 0;
42402 }
42403
42404-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42405+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42406 const char *name,
42407 umode_t mode,
42408 ssize_t (*show)(struct device *dev,
42409@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42410 dev_attr->store = store;
42411 }
42412
42413-static void pmbus_attr_init(struct sensor_device_attribute *a,
42414+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42415 const char *name,
42416 umode_t mode,
42417 ssize_t (*show)(struct device *dev,
42418@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42419 u16 reg, u8 mask)
42420 {
42421 struct pmbus_boolean *boolean;
42422- struct sensor_device_attribute *a;
42423+ sensor_device_attribute_no_const *a;
42424
42425 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42426 if (!boolean)
42427@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42428 bool update, bool readonly)
42429 {
42430 struct pmbus_sensor *sensor;
42431- struct device_attribute *a;
42432+ device_attribute_no_const *a;
42433
42434 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42435 if (!sensor)
42436@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42437 const char *lstring, int index)
42438 {
42439 struct pmbus_label *label;
42440- struct device_attribute *a;
42441+ device_attribute_no_const *a;
42442
42443 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42444 if (!label)
42445diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42446index d4f0935..7420593 100644
42447--- a/drivers/hwmon/sht15.c
42448+++ b/drivers/hwmon/sht15.c
42449@@ -169,7 +169,7 @@ struct sht15_data {
42450 int supply_uv;
42451 bool supply_uv_valid;
42452 struct work_struct update_supply_work;
42453- atomic_t interrupt_handled;
42454+ atomic_unchecked_t interrupt_handled;
42455 };
42456
42457 /**
42458@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42459 ret = gpio_direction_input(data->pdata->gpio_data);
42460 if (ret)
42461 return ret;
42462- atomic_set(&data->interrupt_handled, 0);
42463+ atomic_set_unchecked(&data->interrupt_handled, 0);
42464
42465 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42466 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42467 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42468 /* Only relevant if the interrupt hasn't occurred. */
42469- if (!atomic_read(&data->interrupt_handled))
42470+ if (!atomic_read_unchecked(&data->interrupt_handled))
42471 schedule_work(&data->read_work);
42472 }
42473 ret = wait_event_timeout(data->wait_queue,
42474@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42475
42476 /* First disable the interrupt */
42477 disable_irq_nosync(irq);
42478- atomic_inc(&data->interrupt_handled);
42479+ atomic_inc_unchecked(&data->interrupt_handled);
42480 /* Then schedule a reading work struct */
42481 if (data->state != SHT15_READING_NOTHING)
42482 schedule_work(&data->read_work);
42483@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42484 * If not, then start the interrupt again - care here as could
42485 * have gone low in meantime so verify it hasn't!
42486 */
42487- atomic_set(&data->interrupt_handled, 0);
42488+ atomic_set_unchecked(&data->interrupt_handled, 0);
42489 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42490 /* If still not occurred or another handler was scheduled */
42491 if (gpio_get_value(data->pdata->gpio_data)
42492- || atomic_read(&data->interrupt_handled))
42493+ || atomic_read_unchecked(&data->interrupt_handled))
42494 return;
42495 }
42496
42497diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42498index ac91c07..8e69663 100644
42499--- a/drivers/hwmon/via-cputemp.c
42500+++ b/drivers/hwmon/via-cputemp.c
42501@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42502 return NOTIFY_OK;
42503 }
42504
42505-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42506+static struct notifier_block via_cputemp_cpu_notifier = {
42507 .notifier_call = via_cputemp_cpu_callback,
42508 };
42509
42510diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42511index 65e3240..e6c511d 100644
42512--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42513+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42514@@ -39,7 +39,7 @@
42515 extern struct i2c_adapter amd756_smbus;
42516
42517 static struct i2c_adapter *s4882_adapter;
42518-static struct i2c_algorithm *s4882_algo;
42519+static i2c_algorithm_no_const *s4882_algo;
42520
42521 /* Wrapper access functions for multiplexed SMBus */
42522 static DEFINE_MUTEX(amd756_lock);
42523diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42524index b19a310..d6eece0 100644
42525--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42526+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42527@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42528 /* usb layer */
42529
42530 /* Send command to device, and get response. */
42531-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42532+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42533 {
42534 int ret = 0;
42535 int actual;
42536diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42537index 88eda09..cf40434 100644
42538--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42539+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42540@@ -37,7 +37,7 @@
42541 extern struct i2c_adapter *nforce2_smbus;
42542
42543 static struct i2c_adapter *s4985_adapter;
42544-static struct i2c_algorithm *s4985_algo;
42545+static i2c_algorithm_no_const *s4985_algo;
42546
42547 /* Wrapper access functions for multiplexed SMBus */
42548 static DEFINE_MUTEX(nforce2_lock);
42549diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42550index 71c7a39..71dd3e0 100644
42551--- a/drivers/i2c/i2c-dev.c
42552+++ b/drivers/i2c/i2c-dev.c
42553@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42554 break;
42555 }
42556
42557- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42558+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42559 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42560 if (IS_ERR(rdwr_pa[i].buf)) {
42561 res = PTR_ERR(rdwr_pa[i].buf);
42562diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42563index 0b510ba..4fbb5085 100644
42564--- a/drivers/ide/ide-cd.c
42565+++ b/drivers/ide/ide-cd.c
42566@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42567 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42568 if ((unsigned long)buf & alignment
42569 || blk_rq_bytes(rq) & q->dma_pad_mask
42570- || object_is_on_stack(buf))
42571+ || object_starts_on_stack(buf))
42572 drive->dma = 0;
42573 }
42574 }
42575diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42576index 4df97f6..c751151 100644
42577--- a/drivers/iio/industrialio-core.c
42578+++ b/drivers/iio/industrialio-core.c
42579@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42580 }
42581
42582 static
42583-int __iio_device_attr_init(struct device_attribute *dev_attr,
42584+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42585 const char *postfix,
42586 struct iio_chan_spec const *chan,
42587 ssize_t (*readfunc)(struct device *dev,
42588diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42589index e28a494..f7c2671 100644
42590--- a/drivers/infiniband/core/cm.c
42591+++ b/drivers/infiniband/core/cm.c
42592@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42593
42594 struct cm_counter_group {
42595 struct kobject obj;
42596- atomic_long_t counter[CM_ATTR_COUNT];
42597+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42598 };
42599
42600 struct cm_counter_attribute {
42601@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42602 struct ib_mad_send_buf *msg = NULL;
42603 int ret;
42604
42605- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42606+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42607 counter[CM_REQ_COUNTER]);
42608
42609 /* Quick state check to discard duplicate REQs. */
42610@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42611 if (!cm_id_priv)
42612 return;
42613
42614- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42615+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42616 counter[CM_REP_COUNTER]);
42617 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42618 if (ret)
42619@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42620 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42621 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42622 spin_unlock_irq(&cm_id_priv->lock);
42623- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42624+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42625 counter[CM_RTU_COUNTER]);
42626 goto out;
42627 }
42628@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42629 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42630 dreq_msg->local_comm_id);
42631 if (!cm_id_priv) {
42632- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42633+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42634 counter[CM_DREQ_COUNTER]);
42635 cm_issue_drep(work->port, work->mad_recv_wc);
42636 return -EINVAL;
42637@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42638 case IB_CM_MRA_REP_RCVD:
42639 break;
42640 case IB_CM_TIMEWAIT:
42641- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42642+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42643 counter[CM_DREQ_COUNTER]);
42644 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42645 goto unlock;
42646@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42647 cm_free_msg(msg);
42648 goto deref;
42649 case IB_CM_DREQ_RCVD:
42650- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42651+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42652 counter[CM_DREQ_COUNTER]);
42653 goto unlock;
42654 default:
42655@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42656 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42657 cm_id_priv->msg, timeout)) {
42658 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42659- atomic_long_inc(&work->port->
42660+ atomic_long_inc_unchecked(&work->port->
42661 counter_group[CM_RECV_DUPLICATES].
42662 counter[CM_MRA_COUNTER]);
42663 goto out;
42664@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42665 break;
42666 case IB_CM_MRA_REQ_RCVD:
42667 case IB_CM_MRA_REP_RCVD:
42668- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42669+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42670 counter[CM_MRA_COUNTER]);
42671 /* fall through */
42672 default:
42673@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42674 case IB_CM_LAP_IDLE:
42675 break;
42676 case IB_CM_MRA_LAP_SENT:
42677- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42678+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42679 counter[CM_LAP_COUNTER]);
42680 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42681 goto unlock;
42682@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42683 cm_free_msg(msg);
42684 goto deref;
42685 case IB_CM_LAP_RCVD:
42686- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42687+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42688 counter[CM_LAP_COUNTER]);
42689 goto unlock;
42690 default:
42691@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42692 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42693 if (cur_cm_id_priv) {
42694 spin_unlock_irq(&cm.lock);
42695- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42696+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42697 counter[CM_SIDR_REQ_COUNTER]);
42698 goto out; /* Duplicate message. */
42699 }
42700@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42701 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42702 msg->retries = 1;
42703
42704- atomic_long_add(1 + msg->retries,
42705+ atomic_long_add_unchecked(1 + msg->retries,
42706 &port->counter_group[CM_XMIT].counter[attr_index]);
42707 if (msg->retries)
42708- atomic_long_add(msg->retries,
42709+ atomic_long_add_unchecked(msg->retries,
42710 &port->counter_group[CM_XMIT_RETRIES].
42711 counter[attr_index]);
42712
42713@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42714 }
42715
42716 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42717- atomic_long_inc(&port->counter_group[CM_RECV].
42718+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42719 counter[attr_id - CM_ATTR_ID_OFFSET]);
42720
42721 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42722@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42723 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42724
42725 return sprintf(buf, "%ld\n",
42726- atomic_long_read(&group->counter[cm_attr->index]));
42727+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42728 }
42729
42730 static const struct sysfs_ops cm_counter_ops = {
42731diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42732index 9f5ad7c..588cd84 100644
42733--- a/drivers/infiniband/core/fmr_pool.c
42734+++ b/drivers/infiniband/core/fmr_pool.c
42735@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42736
42737 struct task_struct *thread;
42738
42739- atomic_t req_ser;
42740- atomic_t flush_ser;
42741+ atomic_unchecked_t req_ser;
42742+ atomic_unchecked_t flush_ser;
42743
42744 wait_queue_head_t force_wait;
42745 };
42746@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42747 struct ib_fmr_pool *pool = pool_ptr;
42748
42749 do {
42750- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42751+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42752 ib_fmr_batch_release(pool);
42753
42754- atomic_inc(&pool->flush_ser);
42755+ atomic_inc_unchecked(&pool->flush_ser);
42756 wake_up_interruptible(&pool->force_wait);
42757
42758 if (pool->flush_function)
42759@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42760 }
42761
42762 set_current_state(TASK_INTERRUPTIBLE);
42763- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42764+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42765 !kthread_should_stop())
42766 schedule();
42767 __set_current_state(TASK_RUNNING);
42768@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42769 pool->dirty_watermark = params->dirty_watermark;
42770 pool->dirty_len = 0;
42771 spin_lock_init(&pool->pool_lock);
42772- atomic_set(&pool->req_ser, 0);
42773- atomic_set(&pool->flush_ser, 0);
42774+ atomic_set_unchecked(&pool->req_ser, 0);
42775+ atomic_set_unchecked(&pool->flush_ser, 0);
42776 init_waitqueue_head(&pool->force_wait);
42777
42778 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42779@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42780 }
42781 spin_unlock_irq(&pool->pool_lock);
42782
42783- serial = atomic_inc_return(&pool->req_ser);
42784+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42785 wake_up_process(pool->thread);
42786
42787 if (wait_event_interruptible(pool->force_wait,
42788- atomic_read(&pool->flush_ser) - serial >= 0))
42789+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42790 return -EINTR;
42791
42792 return 0;
42793@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42794 } else {
42795 list_add_tail(&fmr->list, &pool->dirty_list);
42796 if (++pool->dirty_len >= pool->dirty_watermark) {
42797- atomic_inc(&pool->req_ser);
42798+ atomic_inc_unchecked(&pool->req_ser);
42799 wake_up_process(pool->thread);
42800 }
42801 }
42802diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
42803index a9f0489..27a161b 100644
42804--- a/drivers/infiniband/core/uverbs_cmd.c
42805+++ b/drivers/infiniband/core/uverbs_cmd.c
42806@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
42807 if (copy_from_user(&cmd, buf, sizeof cmd))
42808 return -EFAULT;
42809
42810+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
42811+ return -EFAULT;
42812+
42813 INIT_UDATA(&udata, buf + sizeof cmd,
42814 (unsigned long) cmd.response + sizeof resp,
42815 in_len - sizeof cmd, out_len - sizeof resp);
42816diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42817index 6791fd1..78bdcdf 100644
42818--- a/drivers/infiniband/hw/cxgb4/mem.c
42819+++ b/drivers/infiniband/hw/cxgb4/mem.c
42820@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42821 int err;
42822 struct fw_ri_tpte tpt;
42823 u32 stag_idx;
42824- static atomic_t key;
42825+ static atomic_unchecked_t key;
42826
42827 if (c4iw_fatal_error(rdev))
42828 return -EIO;
42829@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42830 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42831 rdev->stats.stag.max = rdev->stats.stag.cur;
42832 mutex_unlock(&rdev->stats.lock);
42833- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42834+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42835 }
42836 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42837 __func__, stag_state, type, pdid, stag_idx);
42838diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42839index 79b3dbc..96e5fcc 100644
42840--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42841+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42842@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42843 struct ib_atomic_eth *ateth;
42844 struct ipath_ack_entry *e;
42845 u64 vaddr;
42846- atomic64_t *maddr;
42847+ atomic64_unchecked_t *maddr;
42848 u64 sdata;
42849 u32 rkey;
42850 u8 next;
42851@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42852 IB_ACCESS_REMOTE_ATOMIC)))
42853 goto nack_acc_unlck;
42854 /* Perform atomic OP and save result. */
42855- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42856+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42857 sdata = be64_to_cpu(ateth->swap_data);
42858 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42859 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42860- (u64) atomic64_add_return(sdata, maddr) - sdata :
42861+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42862 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42863 be64_to_cpu(ateth->compare_data),
42864 sdata);
42865diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42866index 1f95bba..9530f87 100644
42867--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42868+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42869@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42870 unsigned long flags;
42871 struct ib_wc wc;
42872 u64 sdata;
42873- atomic64_t *maddr;
42874+ atomic64_unchecked_t *maddr;
42875 enum ib_wc_status send_status;
42876
42877 /*
42878@@ -382,11 +382,11 @@ again:
42879 IB_ACCESS_REMOTE_ATOMIC)))
42880 goto acc_err;
42881 /* Perform atomic OP and save result. */
42882- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42883+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42884 sdata = wqe->wr.wr.atomic.compare_add;
42885 *(u64 *) sqp->s_sge.sge.vaddr =
42886 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42887- (u64) atomic64_add_return(sdata, maddr) - sdata :
42888+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42889 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42890 sdata, wqe->wr.wr.atomic.swap);
42891 goto send_comp;
42892diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42893index 5904026..f1c30e5 100644
42894--- a/drivers/infiniband/hw/mlx4/mad.c
42895+++ b/drivers/infiniband/hw/mlx4/mad.c
42896@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42897
42898 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42899 {
42900- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42901+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42902 cpu_to_be64(0xff00000000000000LL);
42903 }
42904
42905diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42906index ed327e6..ca1739e0 100644
42907--- a/drivers/infiniband/hw/mlx4/mcg.c
42908+++ b/drivers/infiniband/hw/mlx4/mcg.c
42909@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42910 {
42911 char name[20];
42912
42913- atomic_set(&ctx->tid, 0);
42914+ atomic_set_unchecked(&ctx->tid, 0);
42915 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42916 ctx->mcg_wq = create_singlethread_workqueue(name);
42917 if (!ctx->mcg_wq)
42918diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42919index f829fd9..1a8d436 100644
42920--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42921+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42922@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
42923 struct list_head mcg_mgid0_list;
42924 struct workqueue_struct *mcg_wq;
42925 struct mlx4_ib_demux_pv_ctx **tun;
42926- atomic_t tid;
42927+ atomic_unchecked_t tid;
42928 int flushing; /* flushing the work queue */
42929 };
42930
42931diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42932index 9d3e5c1..6f166df 100644
42933--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42934+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42935@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42936 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42937 }
42938
42939-int mthca_QUERY_FW(struct mthca_dev *dev)
42940+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42941 {
42942 struct mthca_mailbox *mailbox;
42943 u32 *outbox;
42944@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42945 CMD_TIME_CLASS_B);
42946 }
42947
42948-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42949+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42950 int num_mtt)
42951 {
42952 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42953@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42954 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42955 }
42956
42957-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42958+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42959 int eq_num)
42960 {
42961 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42962@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42963 CMD_TIME_CLASS_B);
42964 }
42965
42966-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42967+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42968 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42969 void *in_mad, void *response_mad)
42970 {
42971diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42972index ded76c1..0cf0a08 100644
42973--- a/drivers/infiniband/hw/mthca/mthca_main.c
42974+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42975@@ -692,7 +692,7 @@ err_close:
42976 return err;
42977 }
42978
42979-static int mthca_setup_hca(struct mthca_dev *dev)
42980+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42981 {
42982 int err;
42983
42984diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42985index ed9a989..6aa5dc2 100644
42986--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42987+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42988@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42989 * through the bitmaps)
42990 */
42991
42992-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42993+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42994 {
42995 int o;
42996 int m;
42997@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42998 return key;
42999 }
43000
43001-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43002+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43003 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43004 {
43005 struct mthca_mailbox *mailbox;
43006@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43007 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43008 }
43009
43010-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43011+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43012 u64 *buffer_list, int buffer_size_shift,
43013 int list_len, u64 iova, u64 total_size,
43014 u32 access, struct mthca_mr *mr)
43015diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43016index 415f8e1..e34214e 100644
43017--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43018+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43019@@ -764,7 +764,7 @@ unlock:
43020 return 0;
43021 }
43022
43023-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43024+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43025 {
43026 struct mthca_dev *dev = to_mdev(ibcq->device);
43027 struct mthca_cq *cq = to_mcq(ibcq);
43028diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43029index 3b2a6dc..bce26ff 100644
43030--- a/drivers/infiniband/hw/nes/nes.c
43031+++ b/drivers/infiniband/hw/nes/nes.c
43032@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43033 LIST_HEAD(nes_adapter_list);
43034 static LIST_HEAD(nes_dev_list);
43035
43036-atomic_t qps_destroyed;
43037+atomic_unchecked_t qps_destroyed;
43038
43039 static unsigned int ee_flsh_adapter;
43040 static unsigned int sysfs_nonidx_addr;
43041@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43042 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43043 struct nes_adapter *nesadapter = nesdev->nesadapter;
43044
43045- atomic_inc(&qps_destroyed);
43046+ atomic_inc_unchecked(&qps_destroyed);
43047
43048 /* Free the control structures */
43049
43050diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43051index bd9d132..70d84f4 100644
43052--- a/drivers/infiniband/hw/nes/nes.h
43053+++ b/drivers/infiniband/hw/nes/nes.h
43054@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43055 extern unsigned int wqm_quanta;
43056 extern struct list_head nes_adapter_list;
43057
43058-extern atomic_t cm_connects;
43059-extern atomic_t cm_accepts;
43060-extern atomic_t cm_disconnects;
43061-extern atomic_t cm_closes;
43062-extern atomic_t cm_connecteds;
43063-extern atomic_t cm_connect_reqs;
43064-extern atomic_t cm_rejects;
43065-extern atomic_t mod_qp_timouts;
43066-extern atomic_t qps_created;
43067-extern atomic_t qps_destroyed;
43068-extern atomic_t sw_qps_destroyed;
43069+extern atomic_unchecked_t cm_connects;
43070+extern atomic_unchecked_t cm_accepts;
43071+extern atomic_unchecked_t cm_disconnects;
43072+extern atomic_unchecked_t cm_closes;
43073+extern atomic_unchecked_t cm_connecteds;
43074+extern atomic_unchecked_t cm_connect_reqs;
43075+extern atomic_unchecked_t cm_rejects;
43076+extern atomic_unchecked_t mod_qp_timouts;
43077+extern atomic_unchecked_t qps_created;
43078+extern atomic_unchecked_t qps_destroyed;
43079+extern atomic_unchecked_t sw_qps_destroyed;
43080 extern u32 mh_detected;
43081 extern u32 mh_pauses_sent;
43082 extern u32 cm_packets_sent;
43083@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43084 extern u32 cm_packets_received;
43085 extern u32 cm_packets_dropped;
43086 extern u32 cm_packets_retrans;
43087-extern atomic_t cm_listens_created;
43088-extern atomic_t cm_listens_destroyed;
43089+extern atomic_unchecked_t cm_listens_created;
43090+extern atomic_unchecked_t cm_listens_destroyed;
43091 extern u32 cm_backlog_drops;
43092-extern atomic_t cm_loopbacks;
43093-extern atomic_t cm_nodes_created;
43094-extern atomic_t cm_nodes_destroyed;
43095-extern atomic_t cm_accel_dropped_pkts;
43096-extern atomic_t cm_resets_recvd;
43097-extern atomic_t pau_qps_created;
43098-extern atomic_t pau_qps_destroyed;
43099+extern atomic_unchecked_t cm_loopbacks;
43100+extern atomic_unchecked_t cm_nodes_created;
43101+extern atomic_unchecked_t cm_nodes_destroyed;
43102+extern atomic_unchecked_t cm_accel_dropped_pkts;
43103+extern atomic_unchecked_t cm_resets_recvd;
43104+extern atomic_unchecked_t pau_qps_created;
43105+extern atomic_unchecked_t pau_qps_destroyed;
43106
43107 extern u32 int_mod_timer_init;
43108 extern u32 int_mod_cq_depth_256;
43109diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43110index 6f09a72..cf4399d 100644
43111--- a/drivers/infiniband/hw/nes/nes_cm.c
43112+++ b/drivers/infiniband/hw/nes/nes_cm.c
43113@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43114 u32 cm_packets_retrans;
43115 u32 cm_packets_created;
43116 u32 cm_packets_received;
43117-atomic_t cm_listens_created;
43118-atomic_t cm_listens_destroyed;
43119+atomic_unchecked_t cm_listens_created;
43120+atomic_unchecked_t cm_listens_destroyed;
43121 u32 cm_backlog_drops;
43122-atomic_t cm_loopbacks;
43123-atomic_t cm_nodes_created;
43124-atomic_t cm_nodes_destroyed;
43125-atomic_t cm_accel_dropped_pkts;
43126-atomic_t cm_resets_recvd;
43127+atomic_unchecked_t cm_loopbacks;
43128+atomic_unchecked_t cm_nodes_created;
43129+atomic_unchecked_t cm_nodes_destroyed;
43130+atomic_unchecked_t cm_accel_dropped_pkts;
43131+atomic_unchecked_t cm_resets_recvd;
43132
43133 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43134 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43135@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43136 /* instance of function pointers for client API */
43137 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43138 static struct nes_cm_ops nes_cm_api = {
43139- mini_cm_accelerated,
43140- mini_cm_listen,
43141- mini_cm_del_listen,
43142- mini_cm_connect,
43143- mini_cm_close,
43144- mini_cm_accept,
43145- mini_cm_reject,
43146- mini_cm_recv_pkt,
43147- mini_cm_dealloc_core,
43148- mini_cm_get,
43149- mini_cm_set
43150+ .accelerated = mini_cm_accelerated,
43151+ .listen = mini_cm_listen,
43152+ .stop_listener = mini_cm_del_listen,
43153+ .connect = mini_cm_connect,
43154+ .close = mini_cm_close,
43155+ .accept = mini_cm_accept,
43156+ .reject = mini_cm_reject,
43157+ .recv_pkt = mini_cm_recv_pkt,
43158+ .destroy_cm_core = mini_cm_dealloc_core,
43159+ .get = mini_cm_get,
43160+ .set = mini_cm_set
43161 };
43162
43163 static struct nes_cm_core *g_cm_core;
43164
43165-atomic_t cm_connects;
43166-atomic_t cm_accepts;
43167-atomic_t cm_disconnects;
43168-atomic_t cm_closes;
43169-atomic_t cm_connecteds;
43170-atomic_t cm_connect_reqs;
43171-atomic_t cm_rejects;
43172+atomic_unchecked_t cm_connects;
43173+atomic_unchecked_t cm_accepts;
43174+atomic_unchecked_t cm_disconnects;
43175+atomic_unchecked_t cm_closes;
43176+atomic_unchecked_t cm_connecteds;
43177+atomic_unchecked_t cm_connect_reqs;
43178+atomic_unchecked_t cm_rejects;
43179
43180 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43181 {
43182@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43183 kfree(listener);
43184 listener = NULL;
43185 ret = 0;
43186- atomic_inc(&cm_listens_destroyed);
43187+ atomic_inc_unchecked(&cm_listens_destroyed);
43188 } else {
43189 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43190 }
43191@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43192 cm_node->rem_mac);
43193
43194 add_hte_node(cm_core, cm_node);
43195- atomic_inc(&cm_nodes_created);
43196+ atomic_inc_unchecked(&cm_nodes_created);
43197
43198 return cm_node;
43199 }
43200@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43201 }
43202
43203 atomic_dec(&cm_core->node_cnt);
43204- atomic_inc(&cm_nodes_destroyed);
43205+ atomic_inc_unchecked(&cm_nodes_destroyed);
43206 nesqp = cm_node->nesqp;
43207 if (nesqp) {
43208 nesqp->cm_node = NULL;
43209@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43210
43211 static void drop_packet(struct sk_buff *skb)
43212 {
43213- atomic_inc(&cm_accel_dropped_pkts);
43214+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43215 dev_kfree_skb_any(skb);
43216 }
43217
43218@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43219 {
43220
43221 int reset = 0; /* whether to send reset in case of err.. */
43222- atomic_inc(&cm_resets_recvd);
43223+ atomic_inc_unchecked(&cm_resets_recvd);
43224 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43225 " refcnt=%d\n", cm_node, cm_node->state,
43226 atomic_read(&cm_node->ref_count));
43227@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43228 rem_ref_cm_node(cm_node->cm_core, cm_node);
43229 return NULL;
43230 }
43231- atomic_inc(&cm_loopbacks);
43232+ atomic_inc_unchecked(&cm_loopbacks);
43233 loopbackremotenode->loopbackpartner = cm_node;
43234 loopbackremotenode->tcp_cntxt.rcv_wscale =
43235 NES_CM_DEFAULT_RCV_WND_SCALE;
43236@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43237 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43238 else {
43239 rem_ref_cm_node(cm_core, cm_node);
43240- atomic_inc(&cm_accel_dropped_pkts);
43241+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43242 dev_kfree_skb_any(skb);
43243 }
43244 break;
43245@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43246
43247 if ((cm_id) && (cm_id->event_handler)) {
43248 if (issue_disconn) {
43249- atomic_inc(&cm_disconnects);
43250+ atomic_inc_unchecked(&cm_disconnects);
43251 cm_event.event = IW_CM_EVENT_DISCONNECT;
43252 cm_event.status = disconn_status;
43253 cm_event.local_addr = cm_id->local_addr;
43254@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43255 }
43256
43257 if (issue_close) {
43258- atomic_inc(&cm_closes);
43259+ atomic_inc_unchecked(&cm_closes);
43260 nes_disconnect(nesqp, 1);
43261
43262 cm_id->provider_data = nesqp;
43263@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43264
43265 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43266 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43267- atomic_inc(&cm_accepts);
43268+ atomic_inc_unchecked(&cm_accepts);
43269
43270 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43271 netdev_refcnt_read(nesvnic->netdev));
43272@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43273 struct nes_cm_core *cm_core;
43274 u8 *start_buff;
43275
43276- atomic_inc(&cm_rejects);
43277+ atomic_inc_unchecked(&cm_rejects);
43278 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43279 loopback = cm_node->loopbackpartner;
43280 cm_core = cm_node->cm_core;
43281@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43282 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43283 ntohs(laddr->sin_port));
43284
43285- atomic_inc(&cm_connects);
43286+ atomic_inc_unchecked(&cm_connects);
43287 nesqp->active_conn = 1;
43288
43289 /* cache the cm_id in the qp */
43290@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43291 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43292 return err;
43293 }
43294- atomic_inc(&cm_listens_created);
43295+ atomic_inc_unchecked(&cm_listens_created);
43296 }
43297
43298 cm_id->add_ref(cm_id);
43299@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43300
43301 if (nesqp->destroyed)
43302 return;
43303- atomic_inc(&cm_connecteds);
43304+ atomic_inc_unchecked(&cm_connecteds);
43305 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43306 " local port 0x%04X. jiffies = %lu.\n",
43307 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43308@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43309
43310 cm_id->add_ref(cm_id);
43311 ret = cm_id->event_handler(cm_id, &cm_event);
43312- atomic_inc(&cm_closes);
43313+ atomic_inc_unchecked(&cm_closes);
43314 cm_event.event = IW_CM_EVENT_CLOSE;
43315 cm_event.status = 0;
43316 cm_event.provider_data = cm_id->provider_data;
43317@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43318 return;
43319 cm_id = cm_node->cm_id;
43320
43321- atomic_inc(&cm_connect_reqs);
43322+ atomic_inc_unchecked(&cm_connect_reqs);
43323 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43324 cm_node, cm_id, jiffies);
43325
43326@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43327 return;
43328 cm_id = cm_node->cm_id;
43329
43330- atomic_inc(&cm_connect_reqs);
43331+ atomic_inc_unchecked(&cm_connect_reqs);
43332 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43333 cm_node, cm_id, jiffies);
43334
43335diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43336index 4166452..fc952c3 100644
43337--- a/drivers/infiniband/hw/nes/nes_mgt.c
43338+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43339@@ -40,8 +40,8 @@
43340 #include "nes.h"
43341 #include "nes_mgt.h"
43342
43343-atomic_t pau_qps_created;
43344-atomic_t pau_qps_destroyed;
43345+atomic_unchecked_t pau_qps_created;
43346+atomic_unchecked_t pau_qps_destroyed;
43347
43348 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43349 {
43350@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43351 {
43352 struct sk_buff *skb;
43353 unsigned long flags;
43354- atomic_inc(&pau_qps_destroyed);
43355+ atomic_inc_unchecked(&pau_qps_destroyed);
43356
43357 /* Free packets that have not yet been forwarded */
43358 /* Lock is acquired by skb_dequeue when removing the skb */
43359@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43360 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43361 skb_queue_head_init(&nesqp->pau_list);
43362 spin_lock_init(&nesqp->pau_lock);
43363- atomic_inc(&pau_qps_created);
43364+ atomic_inc_unchecked(&pau_qps_created);
43365 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43366 }
43367
43368diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43369index 70acda9..a96de9d 100644
43370--- a/drivers/infiniband/hw/nes/nes_nic.c
43371+++ b/drivers/infiniband/hw/nes/nes_nic.c
43372@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43373 target_stat_values[++index] = mh_detected;
43374 target_stat_values[++index] = mh_pauses_sent;
43375 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43376- target_stat_values[++index] = atomic_read(&cm_connects);
43377- target_stat_values[++index] = atomic_read(&cm_accepts);
43378- target_stat_values[++index] = atomic_read(&cm_disconnects);
43379- target_stat_values[++index] = atomic_read(&cm_connecteds);
43380- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43381- target_stat_values[++index] = atomic_read(&cm_rejects);
43382- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43383- target_stat_values[++index] = atomic_read(&qps_created);
43384- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43385- target_stat_values[++index] = atomic_read(&qps_destroyed);
43386- target_stat_values[++index] = atomic_read(&cm_closes);
43387+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43388+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43389+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43390+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43391+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43392+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43393+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43394+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43395+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43396+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43397+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43398 target_stat_values[++index] = cm_packets_sent;
43399 target_stat_values[++index] = cm_packets_bounced;
43400 target_stat_values[++index] = cm_packets_created;
43401 target_stat_values[++index] = cm_packets_received;
43402 target_stat_values[++index] = cm_packets_dropped;
43403 target_stat_values[++index] = cm_packets_retrans;
43404- target_stat_values[++index] = atomic_read(&cm_listens_created);
43405- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43406+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43407+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43408 target_stat_values[++index] = cm_backlog_drops;
43409- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43410- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43411- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43412- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43413- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43414+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43415+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43416+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43417+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43418+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43419 target_stat_values[++index] = nesadapter->free_4kpbl;
43420 target_stat_values[++index] = nesadapter->free_256pbl;
43421 target_stat_values[++index] = int_mod_timer_init;
43422 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43423 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43424 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43425- target_stat_values[++index] = atomic_read(&pau_qps_created);
43426- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43427+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43428+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43429 }
43430
43431 /**
43432diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43433index c0d0296..3185f57 100644
43434--- a/drivers/infiniband/hw/nes/nes_verbs.c
43435+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43436@@ -46,9 +46,9 @@
43437
43438 #include <rdma/ib_umem.h>
43439
43440-atomic_t mod_qp_timouts;
43441-atomic_t qps_created;
43442-atomic_t sw_qps_destroyed;
43443+atomic_unchecked_t mod_qp_timouts;
43444+atomic_unchecked_t qps_created;
43445+atomic_unchecked_t sw_qps_destroyed;
43446
43447 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43448
43449@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43450 if (init_attr->create_flags)
43451 return ERR_PTR(-EINVAL);
43452
43453- atomic_inc(&qps_created);
43454+ atomic_inc_unchecked(&qps_created);
43455 switch (init_attr->qp_type) {
43456 case IB_QPT_RC:
43457 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43458@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43459 struct iw_cm_event cm_event;
43460 int ret = 0;
43461
43462- atomic_inc(&sw_qps_destroyed);
43463+ atomic_inc_unchecked(&sw_qps_destroyed);
43464 nesqp->destroyed = 1;
43465
43466 /* Blow away the connection if it exists. */
43467diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43468index ffd48bf..83cdb56 100644
43469--- a/drivers/infiniband/hw/qib/qib.h
43470+++ b/drivers/infiniband/hw/qib/qib.h
43471@@ -52,6 +52,7 @@
43472 #include <linux/kref.h>
43473 #include <linux/sched.h>
43474 #include <linux/kthread.h>
43475+#include <linux/slab.h>
43476
43477 #include "qib_common.h"
43478 #include "qib_verbs.h"
43479diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43480index cdc7df4..a2fdfdb 100644
43481--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43482+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43483@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43484 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43485 }
43486
43487-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43488+static struct rtnl_link_ops ipoib_link_ops = {
43489 .kind = "ipoib",
43490 .maxtype = IFLA_IPOIB_MAX,
43491 .policy = ipoib_policy,
43492diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43493index e853a21..56fc5a8 100644
43494--- a/drivers/input/gameport/gameport.c
43495+++ b/drivers/input/gameport/gameport.c
43496@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43497 */
43498 static void gameport_init_port(struct gameport *gameport)
43499 {
43500- static atomic_t gameport_no = ATOMIC_INIT(-1);
43501+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43502
43503 __module_get(THIS_MODULE);
43504
43505 mutex_init(&gameport->drv_mutex);
43506 device_initialize(&gameport->dev);
43507 dev_set_name(&gameport->dev, "gameport%lu",
43508- (unsigned long)atomic_inc_return(&gameport_no));
43509+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43510 gameport->dev.bus = &gameport_bus;
43511 gameport->dev.release = gameport_release_port;
43512 if (gameport->parent)
43513diff --git a/drivers/input/input.c b/drivers/input/input.c
43514index cc357f1..ee42fbc 100644
43515--- a/drivers/input/input.c
43516+++ b/drivers/input/input.c
43517@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
43518 */
43519 struct input_dev *input_allocate_device(void)
43520 {
43521- static atomic_t input_no = ATOMIC_INIT(-1);
43522+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43523 struct input_dev *dev;
43524
43525 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43526@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
43527 INIT_LIST_HEAD(&dev->node);
43528
43529 dev_set_name(&dev->dev, "input%lu",
43530- (unsigned long)atomic_inc_return(&input_no));
43531+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43532
43533 __module_get(THIS_MODULE);
43534 }
43535diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43536index 4a95b22..874c182 100644
43537--- a/drivers/input/joystick/sidewinder.c
43538+++ b/drivers/input/joystick/sidewinder.c
43539@@ -30,6 +30,7 @@
43540 #include <linux/kernel.h>
43541 #include <linux/module.h>
43542 #include <linux/slab.h>
43543+#include <linux/sched.h>
43544 #include <linux/input.h>
43545 #include <linux/gameport.h>
43546 #include <linux/jiffies.h>
43547diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43548index 3aa2f3f..53c00ea 100644
43549--- a/drivers/input/joystick/xpad.c
43550+++ b/drivers/input/joystick/xpad.c
43551@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43552
43553 static int xpad_led_probe(struct usb_xpad *xpad)
43554 {
43555- static atomic_t led_seq = ATOMIC_INIT(-1);
43556+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43557 unsigned long led_no;
43558 struct xpad_led *led;
43559 struct led_classdev *led_cdev;
43560@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43561 if (!led)
43562 return -ENOMEM;
43563
43564- led_no = atomic_inc_return(&led_seq);
43565+ led_no = atomic_inc_return_unchecked(&led_seq);
43566
43567 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43568 led->xpad = xpad;
43569diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43570index ac1fa5f..5f7502c 100644
43571--- a/drivers/input/misc/ims-pcu.c
43572+++ b/drivers/input/misc/ims-pcu.c
43573@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43574
43575 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43576 {
43577- static atomic_t device_no = ATOMIC_INIT(-1);
43578+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43579
43580 const struct ims_pcu_device_info *info;
43581 int error;
43582@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43583 }
43584
43585 /* Device appears to be operable, complete initialization */
43586- pcu->device_no = atomic_inc_return(&device_no);
43587+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43588
43589 /*
43590 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43591diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43592index d02e1bd..d719719 100644
43593--- a/drivers/input/mouse/psmouse.h
43594+++ b/drivers/input/mouse/psmouse.h
43595@@ -124,7 +124,7 @@ struct psmouse_attribute {
43596 ssize_t (*set)(struct psmouse *psmouse, void *data,
43597 const char *buf, size_t count);
43598 bool protect;
43599-};
43600+} __do_const;
43601 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43602
43603 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43604diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43605index b604564..3f14ae4 100644
43606--- a/drivers/input/mousedev.c
43607+++ b/drivers/input/mousedev.c
43608@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43609
43610 spin_unlock_irq(&client->packet_lock);
43611
43612- if (copy_to_user(buffer, data, count))
43613+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43614 return -EFAULT;
43615
43616 return count;
43617diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43618index a05a517..323a2fd 100644
43619--- a/drivers/input/serio/serio.c
43620+++ b/drivers/input/serio/serio.c
43621@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43622 */
43623 static void serio_init_port(struct serio *serio)
43624 {
43625- static atomic_t serio_no = ATOMIC_INIT(-1);
43626+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43627
43628 __module_get(THIS_MODULE);
43629
43630@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43631 mutex_init(&serio->drv_mutex);
43632 device_initialize(&serio->dev);
43633 dev_set_name(&serio->dev, "serio%lu",
43634- (unsigned long)atomic_inc_return(&serio_no));
43635+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43636 serio->dev.bus = &serio_bus;
43637 serio->dev.release = serio_release_port;
43638 serio->dev.groups = serio_device_attr_groups;
43639diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43640index 71ef5d6..93380a9 100644
43641--- a/drivers/input/serio/serio_raw.c
43642+++ b/drivers/input/serio/serio_raw.c
43643@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43644
43645 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43646 {
43647- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43648+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43649 struct serio_raw *serio_raw;
43650 int err;
43651
43652@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43653 }
43654
43655 snprintf(serio_raw->name, sizeof(serio_raw->name),
43656- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43657+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43658 kref_init(&serio_raw->kref);
43659 INIT_LIST_HEAD(&serio_raw->client_list);
43660 init_waitqueue_head(&serio_raw->wait);
43661diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
43662index 92e2243..8fd9092 100644
43663--- a/drivers/input/touchscreen/htcpen.c
43664+++ b/drivers/input/touchscreen/htcpen.c
43665@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
43666 }
43667 };
43668
43669-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
43670+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
43671 {
43672 .ident = "Shift",
43673 .matches = {
43674diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43675index 48882c1..93e0987 100644
43676--- a/drivers/iommu/amd_iommu.c
43677+++ b/drivers/iommu/amd_iommu.c
43678@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43679
43680 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43681 {
43682+ phys_addr_t physaddr;
43683 WARN_ON(address & 0x7ULL);
43684
43685 memset(cmd, 0, sizeof(*cmd));
43686- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43687- cmd->data[1] = upper_32_bits(__pa(address));
43688+
43689+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43690+ if (object_starts_on_stack((void *)address)) {
43691+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43692+ physaddr = __pa((u64)adjbuf);
43693+ } else
43694+#endif
43695+ physaddr = __pa(address);
43696+
43697+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43698+ cmd->data[1] = upper_32_bits(physaddr);
43699 cmd->data[2] = 1;
43700 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43701 }
43702diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43703index 72e683d..c9db262 100644
43704--- a/drivers/iommu/iommu.c
43705+++ b/drivers/iommu/iommu.c
43706@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43707 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43708 {
43709 int err;
43710- struct notifier_block *nb;
43711+ notifier_block_no_const *nb;
43712 struct iommu_callback_data cb = {
43713 .ops = ops,
43714 };
43715diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43716index 390079e..1da9d6c 100644
43717--- a/drivers/iommu/irq_remapping.c
43718+++ b/drivers/iommu/irq_remapping.c
43719@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43720 void panic_if_irq_remap(const char *msg)
43721 {
43722 if (irq_remapping_enabled)
43723- panic(msg);
43724+ panic("%s", msg);
43725 }
43726
43727 static void ir_ack_apic_edge(struct irq_data *data)
43728@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43729
43730 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43731 {
43732- chip->irq_print_chip = ir_print_prefix;
43733- chip->irq_ack = ir_ack_apic_edge;
43734- chip->irq_eoi = ir_ack_apic_level;
43735- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43736+ pax_open_kernel();
43737+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43738+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43739+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43740+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43741+ pax_close_kernel();
43742 }
43743
43744 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43745diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43746index 471e1cd..b53b870 100644
43747--- a/drivers/irqchip/irq-gic.c
43748+++ b/drivers/irqchip/irq-gic.c
43749@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43750 * Supported arch specific GIC irq extension.
43751 * Default make them NULL.
43752 */
43753-struct irq_chip gic_arch_extn = {
43754+irq_chip_no_const gic_arch_extn = {
43755 .irq_eoi = NULL,
43756 .irq_mask = NULL,
43757 .irq_unmask = NULL,
43758@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43759 chained_irq_exit(chip, desc);
43760 }
43761
43762-static struct irq_chip gic_chip = {
43763+static irq_chip_no_const gic_chip __read_only = {
43764 .name = "GIC",
43765 .irq_mask = gic_mask_irq,
43766 .irq_unmask = gic_unmask_irq,
43767diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43768index 9a0767b..5e5f86f 100644
43769--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43770+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43771@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43772 struct intc_irqpin_iomem *i;
43773 struct resource *io[INTC_IRQPIN_REG_NR];
43774 struct resource *irq;
43775- struct irq_chip *irq_chip;
43776+ irq_chip_no_const *irq_chip;
43777 void (*enable_fn)(struct irq_data *d);
43778 void (*disable_fn)(struct irq_data *d);
43779 const char *name = dev_name(dev);
43780diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43781index 384e6ed..7a771b2 100644
43782--- a/drivers/irqchip/irq-renesas-irqc.c
43783+++ b/drivers/irqchip/irq-renesas-irqc.c
43784@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43785 struct irqc_priv *p;
43786 struct resource *io;
43787 struct resource *irq;
43788- struct irq_chip *irq_chip;
43789+ irq_chip_no_const *irq_chip;
43790 const char *name = dev_name(&pdev->dev);
43791 int ret;
43792 int k;
43793diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43794index 6a2df32..dc962f1 100644
43795--- a/drivers/isdn/capi/capi.c
43796+++ b/drivers/isdn/capi/capi.c
43797@@ -81,8 +81,8 @@ struct capiminor {
43798
43799 struct capi20_appl *ap;
43800 u32 ncci;
43801- atomic_t datahandle;
43802- atomic_t msgid;
43803+ atomic_unchecked_t datahandle;
43804+ atomic_unchecked_t msgid;
43805
43806 struct tty_port port;
43807 int ttyinstop;
43808@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43809 capimsg_setu16(s, 2, mp->ap->applid);
43810 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43811 capimsg_setu8 (s, 5, CAPI_RESP);
43812- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43813+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43814 capimsg_setu32(s, 8, mp->ncci);
43815 capimsg_setu16(s, 12, datahandle);
43816 }
43817@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43818 mp->outbytes -= len;
43819 spin_unlock_bh(&mp->outlock);
43820
43821- datahandle = atomic_inc_return(&mp->datahandle);
43822+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43823 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43824 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43825 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43826 capimsg_setu16(skb->data, 2, mp->ap->applid);
43827 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43828 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43829- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43830+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43831 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43832 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43833 capimsg_setu16(skb->data, 16, len); /* Data length */
43834diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43835index aecec6d..11e13c5 100644
43836--- a/drivers/isdn/gigaset/bas-gigaset.c
43837+++ b/drivers/isdn/gigaset/bas-gigaset.c
43838@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43839
43840
43841 static const struct gigaset_ops gigops = {
43842- gigaset_write_cmd,
43843- gigaset_write_room,
43844- gigaset_chars_in_buffer,
43845- gigaset_brkchars,
43846- gigaset_init_bchannel,
43847- gigaset_close_bchannel,
43848- gigaset_initbcshw,
43849- gigaset_freebcshw,
43850- gigaset_reinitbcshw,
43851- gigaset_initcshw,
43852- gigaset_freecshw,
43853- gigaset_set_modem_ctrl,
43854- gigaset_baud_rate,
43855- gigaset_set_line_ctrl,
43856- gigaset_isoc_send_skb,
43857- gigaset_isoc_input,
43858+ .write_cmd = gigaset_write_cmd,
43859+ .write_room = gigaset_write_room,
43860+ .chars_in_buffer = gigaset_chars_in_buffer,
43861+ .brkchars = gigaset_brkchars,
43862+ .init_bchannel = gigaset_init_bchannel,
43863+ .close_bchannel = gigaset_close_bchannel,
43864+ .initbcshw = gigaset_initbcshw,
43865+ .freebcshw = gigaset_freebcshw,
43866+ .reinitbcshw = gigaset_reinitbcshw,
43867+ .initcshw = gigaset_initcshw,
43868+ .freecshw = gigaset_freecshw,
43869+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43870+ .baud_rate = gigaset_baud_rate,
43871+ .set_line_ctrl = gigaset_set_line_ctrl,
43872+ .send_skb = gigaset_isoc_send_skb,
43873+ .handle_input = gigaset_isoc_input,
43874 };
43875
43876 /* bas_gigaset_init
43877diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43878index 600c79b..3752bab 100644
43879--- a/drivers/isdn/gigaset/interface.c
43880+++ b/drivers/isdn/gigaset/interface.c
43881@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43882 }
43883 tty->driver_data = cs;
43884
43885- ++cs->port.count;
43886+ atomic_inc(&cs->port.count);
43887
43888- if (cs->port.count == 1) {
43889+ if (atomic_read(&cs->port.count) == 1) {
43890 tty_port_tty_set(&cs->port, tty);
43891 cs->port.low_latency = 1;
43892 }
43893@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43894
43895 if (!cs->connected)
43896 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43897- else if (!cs->port.count)
43898+ else if (!atomic_read(&cs->port.count))
43899 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43900- else if (!--cs->port.count)
43901+ else if (!atomic_dec_return(&cs->port.count))
43902 tty_port_tty_set(&cs->port, NULL);
43903
43904 mutex_unlock(&cs->mutex);
43905diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43906index 8c91fd5..14f13ce 100644
43907--- a/drivers/isdn/gigaset/ser-gigaset.c
43908+++ b/drivers/isdn/gigaset/ser-gigaset.c
43909@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43910 }
43911
43912 static const struct gigaset_ops ops = {
43913- gigaset_write_cmd,
43914- gigaset_write_room,
43915- gigaset_chars_in_buffer,
43916- gigaset_brkchars,
43917- gigaset_init_bchannel,
43918- gigaset_close_bchannel,
43919- gigaset_initbcshw,
43920- gigaset_freebcshw,
43921- gigaset_reinitbcshw,
43922- gigaset_initcshw,
43923- gigaset_freecshw,
43924- gigaset_set_modem_ctrl,
43925- gigaset_baud_rate,
43926- gigaset_set_line_ctrl,
43927- gigaset_m10x_send_skb, /* asyncdata.c */
43928- gigaset_m10x_input, /* asyncdata.c */
43929+ .write_cmd = gigaset_write_cmd,
43930+ .write_room = gigaset_write_room,
43931+ .chars_in_buffer = gigaset_chars_in_buffer,
43932+ .brkchars = gigaset_brkchars,
43933+ .init_bchannel = gigaset_init_bchannel,
43934+ .close_bchannel = gigaset_close_bchannel,
43935+ .initbcshw = gigaset_initbcshw,
43936+ .freebcshw = gigaset_freebcshw,
43937+ .reinitbcshw = gigaset_reinitbcshw,
43938+ .initcshw = gigaset_initcshw,
43939+ .freecshw = gigaset_freecshw,
43940+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43941+ .baud_rate = gigaset_baud_rate,
43942+ .set_line_ctrl = gigaset_set_line_ctrl,
43943+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43944+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43945 };
43946
43947
43948diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43949index 5f306e2..5342f88 100644
43950--- a/drivers/isdn/gigaset/usb-gigaset.c
43951+++ b/drivers/isdn/gigaset/usb-gigaset.c
43952@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43953 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43954 memcpy(cs->hw.usb->bchars, buf, 6);
43955 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43956- 0, 0, &buf, 6, 2000);
43957+ 0, 0, buf, 6, 2000);
43958 }
43959
43960 static void gigaset_freebcshw(struct bc_state *bcs)
43961@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43962 }
43963
43964 static const struct gigaset_ops ops = {
43965- gigaset_write_cmd,
43966- gigaset_write_room,
43967- gigaset_chars_in_buffer,
43968- gigaset_brkchars,
43969- gigaset_init_bchannel,
43970- gigaset_close_bchannel,
43971- gigaset_initbcshw,
43972- gigaset_freebcshw,
43973- gigaset_reinitbcshw,
43974- gigaset_initcshw,
43975- gigaset_freecshw,
43976- gigaset_set_modem_ctrl,
43977- gigaset_baud_rate,
43978- gigaset_set_line_ctrl,
43979- gigaset_m10x_send_skb,
43980- gigaset_m10x_input,
43981+ .write_cmd = gigaset_write_cmd,
43982+ .write_room = gigaset_write_room,
43983+ .chars_in_buffer = gigaset_chars_in_buffer,
43984+ .brkchars = gigaset_brkchars,
43985+ .init_bchannel = gigaset_init_bchannel,
43986+ .close_bchannel = gigaset_close_bchannel,
43987+ .initbcshw = gigaset_initbcshw,
43988+ .freebcshw = gigaset_freebcshw,
43989+ .reinitbcshw = gigaset_reinitbcshw,
43990+ .initcshw = gigaset_initcshw,
43991+ .freecshw = gigaset_freecshw,
43992+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43993+ .baud_rate = gigaset_baud_rate,
43994+ .set_line_ctrl = gigaset_set_line_ctrl,
43995+ .send_skb = gigaset_m10x_send_skb,
43996+ .handle_input = gigaset_m10x_input,
43997 };
43998
43999 /*
44000diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44001index 4d9b195..455075c 100644
44002--- a/drivers/isdn/hardware/avm/b1.c
44003+++ b/drivers/isdn/hardware/avm/b1.c
44004@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44005 }
44006 if (left) {
44007 if (t4file->user) {
44008- if (copy_from_user(buf, dp, left))
44009+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44010 return -EFAULT;
44011 } else {
44012 memcpy(buf, dp, left);
44013@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44014 }
44015 if (left) {
44016 if (config->user) {
44017- if (copy_from_user(buf, dp, left))
44018+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44019 return -EFAULT;
44020 } else {
44021 memcpy(buf, dp, left);
44022diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44023index 9b856e1..fa03c92 100644
44024--- a/drivers/isdn/i4l/isdn_common.c
44025+++ b/drivers/isdn/i4l/isdn_common.c
44026@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44027 } else
44028 return -EINVAL;
44029 case IIOCDBGVAR:
44030+ if (!capable(CAP_SYS_RAWIO))
44031+ return -EPERM;
44032 if (arg) {
44033 if (copy_to_user(argp, &dev, sizeof(ulong)))
44034 return -EFAULT;
44035diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44036index 91d5730..336523e 100644
44037--- a/drivers/isdn/i4l/isdn_concap.c
44038+++ b/drivers/isdn/i4l/isdn_concap.c
44039@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44040 }
44041
44042 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44043- &isdn_concap_dl_data_req,
44044- &isdn_concap_dl_connect_req,
44045- &isdn_concap_dl_disconn_req
44046+ .data_req = &isdn_concap_dl_data_req,
44047+ .connect_req = &isdn_concap_dl_connect_req,
44048+ .disconn_req = &isdn_concap_dl_disconn_req
44049 };
44050
44051 /* The following should better go into a dedicated source file such that
44052diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44053index bc91261..2ef7e36 100644
44054--- a/drivers/isdn/i4l/isdn_tty.c
44055+++ b/drivers/isdn/i4l/isdn_tty.c
44056@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44057
44058 #ifdef ISDN_DEBUG_MODEM_OPEN
44059 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44060- port->count);
44061+ atomic_read(&port->count));
44062 #endif
44063- port->count++;
44064+ atomic_inc(&port->count);
44065 port->tty = tty;
44066 /*
44067 * Start up serial port
44068@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44069 #endif
44070 return;
44071 }
44072- if ((tty->count == 1) && (port->count != 1)) {
44073+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44074 /*
44075 * Uh, oh. tty->count is 1, which means that the tty
44076 * structure will be freed. Info->count should always
44077@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44078 * serial port won't be shutdown.
44079 */
44080 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44081- "info->count is %d\n", port->count);
44082- port->count = 1;
44083+ "info->count is %d\n", atomic_read(&port->count));
44084+ atomic_set(&port->count, 1);
44085 }
44086- if (--port->count < 0) {
44087+ if (atomic_dec_return(&port->count) < 0) {
44088 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44089- info->line, port->count);
44090- port->count = 0;
44091+ info->line, atomic_read(&port->count));
44092+ atomic_set(&port->count, 0);
44093 }
44094- if (port->count) {
44095+ if (atomic_read(&port->count)) {
44096 #ifdef ISDN_DEBUG_MODEM_OPEN
44097 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44098 #endif
44099@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44100 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44101 return;
44102 isdn_tty_shutdown(info);
44103- port->count = 0;
44104+ atomic_set(&port->count, 0);
44105 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44106 port->tty = NULL;
44107 wake_up_interruptible(&port->open_wait);
44108@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44109 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44110 modem_info *info = &dev->mdm.info[i];
44111
44112- if (info->port.count == 0)
44113+ if (atomic_read(&info->port.count) == 0)
44114 continue;
44115 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44116 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44117diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44118index e2d4e58..40cd045 100644
44119--- a/drivers/isdn/i4l/isdn_x25iface.c
44120+++ b/drivers/isdn/i4l/isdn_x25iface.c
44121@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44122
44123
44124 static struct concap_proto_ops ix25_pops = {
44125- &isdn_x25iface_proto_new,
44126- &isdn_x25iface_proto_del,
44127- &isdn_x25iface_proto_restart,
44128- &isdn_x25iface_proto_close,
44129- &isdn_x25iface_xmit,
44130- &isdn_x25iface_receive,
44131- &isdn_x25iface_connect_ind,
44132- &isdn_x25iface_disconn_ind
44133+ .proto_new = &isdn_x25iface_proto_new,
44134+ .proto_del = &isdn_x25iface_proto_del,
44135+ .restart = &isdn_x25iface_proto_restart,
44136+ .close = &isdn_x25iface_proto_close,
44137+ .encap_and_xmit = &isdn_x25iface_xmit,
44138+ .data_ind = &isdn_x25iface_receive,
44139+ .connect_ind = &isdn_x25iface_connect_ind,
44140+ .disconn_ind = &isdn_x25iface_disconn_ind
44141 };
44142
44143 /* error message helper function */
44144diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44145index 358a574..b4987ea 100644
44146--- a/drivers/isdn/icn/icn.c
44147+++ b/drivers/isdn/icn/icn.c
44148@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44149 if (count > len)
44150 count = len;
44151 if (user) {
44152- if (copy_from_user(msg, buf, count))
44153+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44154 return -EFAULT;
44155 } else
44156 memcpy(msg, buf, count);
44157diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44158index 87f7dff..7300125 100644
44159--- a/drivers/isdn/mISDN/dsp_cmx.c
44160+++ b/drivers/isdn/mISDN/dsp_cmx.c
44161@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44162 static u16 dsp_count; /* last sample count */
44163 static int dsp_count_valid; /* if we have last sample count */
44164
44165-void
44166+void __intentional_overflow(-1)
44167 dsp_cmx_send(void *arg)
44168 {
44169 struct dsp_conf *conf;
44170diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44171index 0f9ed1e..2715d6f 100644
44172--- a/drivers/leds/leds-clevo-mail.c
44173+++ b/drivers/leds/leds-clevo-mail.c
44174@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44175 * detected as working, but in reality it is not) as low as
44176 * possible.
44177 */
44178-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44179+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44180 {
44181 .callback = clevo_mail_led_dmi_callback,
44182 .ident = "Clevo D410J",
44183diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44184index 046cb70..6b20d39 100644
44185--- a/drivers/leds/leds-ss4200.c
44186+++ b/drivers/leds/leds-ss4200.c
44187@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44188 * detected as working, but in reality it is not) as low as
44189 * possible.
44190 */
44191-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44192+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44193 {
44194 .callback = ss4200_led_dmi_callback,
44195 .ident = "Intel SS4200-E",
44196diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44197index 7dc93aa..8272379 100644
44198--- a/drivers/lguest/core.c
44199+++ b/drivers/lguest/core.c
44200@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44201 * The end address needs +1 because __get_vm_area allocates an
44202 * extra guard page, so we need space for that.
44203 */
44204+
44205+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44206+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44207+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44208+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44209+#else
44210 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44211 VM_ALLOC, switcher_addr, switcher_addr
44212 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44213+#endif
44214+
44215 if (!switcher_vma) {
44216 err = -ENOMEM;
44217 printk("lguest: could not map switcher pages high\n");
44218@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44219 * Now the Switcher is mapped at the right address, we can't fail!
44220 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44221 */
44222- memcpy(switcher_vma->addr, start_switcher_text,
44223+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44224 end_switcher_text - start_switcher_text);
44225
44226 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44227diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44228index e3abebc9..6a35328 100644
44229--- a/drivers/lguest/page_tables.c
44230+++ b/drivers/lguest/page_tables.c
44231@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44232 /*:*/
44233
44234 #ifdef CONFIG_X86_PAE
44235-static void release_pmd(pmd_t *spmd)
44236+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44237 {
44238 /* If the entry's not present, there's nothing to release. */
44239 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44240diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44241index 30f2aef..391c748 100644
44242--- a/drivers/lguest/x86/core.c
44243+++ b/drivers/lguest/x86/core.c
44244@@ -60,7 +60,7 @@ static struct {
44245 /* Offset from where switcher.S was compiled to where we've copied it */
44246 static unsigned long switcher_offset(void)
44247 {
44248- return switcher_addr - (unsigned long)start_switcher_text;
44249+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44250 }
44251
44252 /* This cpu's struct lguest_pages (after the Switcher text page) */
44253@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44254 * These copies are pretty cheap, so we do them unconditionally: */
44255 /* Save the current Host top-level page directory.
44256 */
44257+
44258+#ifdef CONFIG_PAX_PER_CPU_PGD
44259+ pages->state.host_cr3 = read_cr3();
44260+#else
44261 pages->state.host_cr3 = __pa(current->mm->pgd);
44262+#endif
44263+
44264 /*
44265 * Set up the Guest's page tables to see this CPU's pages (and no
44266 * other CPU's pages).
44267@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
44268 * compiled-in switcher code and the high-mapped copy we just made.
44269 */
44270 for (i = 0; i < IDT_ENTRIES; i++)
44271- default_idt_entries[i] += switcher_offset();
44272+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44273
44274 /*
44275 * Set up the Switcher's per-cpu areas.
44276@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
44277 * it will be undisturbed when we switch. To change %cs and jump we
44278 * need this structure to feed to Intel's "lcall" instruction.
44279 */
44280- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44281+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44282 lguest_entry.segment = LGUEST_CS;
44283
44284 /*
44285diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44286index 40634b0..4f5855e 100644
44287--- a/drivers/lguest/x86/switcher_32.S
44288+++ b/drivers/lguest/x86/switcher_32.S
44289@@ -87,6 +87,7 @@
44290 #include <asm/page.h>
44291 #include <asm/segment.h>
44292 #include <asm/lguest.h>
44293+#include <asm/processor-flags.h>
44294
44295 // We mark the start of the code to copy
44296 // It's placed in .text tho it's never run here
44297@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44298 // Changes type when we load it: damn Intel!
44299 // For after we switch over our page tables
44300 // That entry will be read-only: we'd crash.
44301+
44302+#ifdef CONFIG_PAX_KERNEXEC
44303+ mov %cr0, %edx
44304+ xor $X86_CR0_WP, %edx
44305+ mov %edx, %cr0
44306+#endif
44307+
44308 movl $(GDT_ENTRY_TSS*8), %edx
44309 ltr %dx
44310
44311@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44312 // Let's clear it again for our return.
44313 // The GDT descriptor of the Host
44314 // Points to the table after two "size" bytes
44315- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44316+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44317 // Clear "used" from type field (byte 5, bit 2)
44318- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44319+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44320+
44321+#ifdef CONFIG_PAX_KERNEXEC
44322+ mov %cr0, %eax
44323+ xor $X86_CR0_WP, %eax
44324+ mov %eax, %cr0
44325+#endif
44326
44327 // Once our page table's switched, the Guest is live!
44328 // The Host fades as we run this final step.
44329@@ -295,13 +309,12 @@ deliver_to_host:
44330 // I consulted gcc, and it gave
44331 // These instructions, which I gladly credit:
44332 leal (%edx,%ebx,8), %eax
44333- movzwl (%eax),%edx
44334- movl 4(%eax), %eax
44335- xorw %ax, %ax
44336- orl %eax, %edx
44337+ movl 4(%eax), %edx
44338+ movw (%eax), %dx
44339 // Now the address of the handler's in %edx
44340 // We call it now: its "iret" drops us home.
44341- jmp *%edx
44342+ ljmp $__KERNEL_CS, $1f
44343+1: jmp *%edx
44344
44345 // Every interrupt can come to us here
44346 // But we must truly tell each apart.
44347diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44348index a08e3ee..df8ade2 100644
44349--- a/drivers/md/bcache/closure.h
44350+++ b/drivers/md/bcache/closure.h
44351@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44352 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44353 struct workqueue_struct *wq)
44354 {
44355- BUG_ON(object_is_on_stack(cl));
44356+ BUG_ON(object_starts_on_stack(cl));
44357 closure_set_ip(cl);
44358 cl->fn = fn;
44359 cl->wq = wq;
44360diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44361index 3a57679..c58cdaf 100644
44362--- a/drivers/md/bitmap.c
44363+++ b/drivers/md/bitmap.c
44364@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44365 chunk_kb ? "KB" : "B");
44366 if (bitmap->storage.file) {
44367 seq_printf(seq, ", file: ");
44368- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44369+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44370 }
44371
44372 seq_printf(seq, "\n");
44373diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44374index c8a18e4..0ab43e5 100644
44375--- a/drivers/md/dm-ioctl.c
44376+++ b/drivers/md/dm-ioctl.c
44377@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44378 cmd == DM_LIST_VERSIONS_CMD)
44379 return 0;
44380
44381- if ((cmd == DM_DEV_CREATE_CMD)) {
44382+ if (cmd == DM_DEV_CREATE_CMD) {
44383 if (!*param->name) {
44384 DMWARN("name not supplied when creating device");
44385 return -EINVAL;
44386diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44387index 089d627..ef7352e 100644
44388--- a/drivers/md/dm-raid1.c
44389+++ b/drivers/md/dm-raid1.c
44390@@ -40,7 +40,7 @@ enum dm_raid1_error {
44391
44392 struct mirror {
44393 struct mirror_set *ms;
44394- atomic_t error_count;
44395+ atomic_unchecked_t error_count;
44396 unsigned long error_type;
44397 struct dm_dev *dev;
44398 sector_t offset;
44399@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44400 struct mirror *m;
44401
44402 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44403- if (!atomic_read(&m->error_count))
44404+ if (!atomic_read_unchecked(&m->error_count))
44405 return m;
44406
44407 return NULL;
44408@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44409 * simple way to tell if a device has encountered
44410 * errors.
44411 */
44412- atomic_inc(&m->error_count);
44413+ atomic_inc_unchecked(&m->error_count);
44414
44415 if (test_and_set_bit(error_type, &m->error_type))
44416 return;
44417@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44418 struct mirror *m = get_default_mirror(ms);
44419
44420 do {
44421- if (likely(!atomic_read(&m->error_count)))
44422+ if (likely(!atomic_read_unchecked(&m->error_count)))
44423 return m;
44424
44425 if (m-- == ms->mirror)
44426@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44427 {
44428 struct mirror *default_mirror = get_default_mirror(m->ms);
44429
44430- return !atomic_read(&default_mirror->error_count);
44431+ return !atomic_read_unchecked(&default_mirror->error_count);
44432 }
44433
44434 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44435@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44436 */
44437 if (likely(region_in_sync(ms, region, 1)))
44438 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44439- else if (m && atomic_read(&m->error_count))
44440+ else if (m && atomic_read_unchecked(&m->error_count))
44441 m = NULL;
44442
44443 if (likely(m))
44444@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44445 }
44446
44447 ms->mirror[mirror].ms = ms;
44448- atomic_set(&(ms->mirror[mirror].error_count), 0);
44449+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44450 ms->mirror[mirror].error_type = 0;
44451 ms->mirror[mirror].offset = offset;
44452
44453@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44454 */
44455 static char device_status_char(struct mirror *m)
44456 {
44457- if (!atomic_read(&(m->error_count)))
44458+ if (!atomic_read_unchecked(&(m->error_count)))
44459 return 'A';
44460
44461 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44462diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44463index f478a4c..4b8e5ef 100644
44464--- a/drivers/md/dm-stats.c
44465+++ b/drivers/md/dm-stats.c
44466@@ -382,7 +382,7 @@ do_sync_free:
44467 synchronize_rcu_expedited();
44468 dm_stat_free(&s->rcu_head);
44469 } else {
44470- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44471+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44472 call_rcu(&s->rcu_head, dm_stat_free);
44473 }
44474 return 0;
44475@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44476 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44477 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44478 ));
44479- ACCESS_ONCE(last->last_sector) = end_sector;
44480- ACCESS_ONCE(last->last_rw) = bi_rw;
44481+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44482+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44483 }
44484
44485 rcu_read_lock();
44486diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44487index f8b37d4..5c5cafd 100644
44488--- a/drivers/md/dm-stripe.c
44489+++ b/drivers/md/dm-stripe.c
44490@@ -21,7 +21,7 @@ struct stripe {
44491 struct dm_dev *dev;
44492 sector_t physical_start;
44493
44494- atomic_t error_count;
44495+ atomic_unchecked_t error_count;
44496 };
44497
44498 struct stripe_c {
44499@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44500 kfree(sc);
44501 return r;
44502 }
44503- atomic_set(&(sc->stripe[i].error_count), 0);
44504+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44505 }
44506
44507 ti->private = sc;
44508@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44509 DMEMIT("%d ", sc->stripes);
44510 for (i = 0; i < sc->stripes; i++) {
44511 DMEMIT("%s ", sc->stripe[i].dev->name);
44512- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44513+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44514 'D' : 'A';
44515 }
44516 buffer[i] = '\0';
44517@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44518 */
44519 for (i = 0; i < sc->stripes; i++)
44520 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44521- atomic_inc(&(sc->stripe[i].error_count));
44522- if (atomic_read(&(sc->stripe[i].error_count)) <
44523+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44524+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44525 DM_IO_ERROR_THRESHOLD)
44526 schedule_work(&sc->trigger_event);
44527 }
44528diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44529index 6554d91..b0221c2 100644
44530--- a/drivers/md/dm-table.c
44531+++ b/drivers/md/dm-table.c
44532@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44533 if (!dev_size)
44534 return 0;
44535
44536- if ((start >= dev_size) || (start + len > dev_size)) {
44537+ if ((start >= dev_size) || (len > dev_size - start)) {
44538 DMWARN("%s: %s too small for target: "
44539 "start=%llu, len=%llu, dev_size=%llu",
44540 dm_device_name(ti->table->md), bdevname(bdev, b),
44541diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44542index 79f6941..b33b4e0 100644
44543--- a/drivers/md/dm-thin-metadata.c
44544+++ b/drivers/md/dm-thin-metadata.c
44545@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44546 {
44547 pmd->info.tm = pmd->tm;
44548 pmd->info.levels = 2;
44549- pmd->info.value_type.context = pmd->data_sm;
44550+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44551 pmd->info.value_type.size = sizeof(__le64);
44552 pmd->info.value_type.inc = data_block_inc;
44553 pmd->info.value_type.dec = data_block_dec;
44554@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44555
44556 pmd->bl_info.tm = pmd->tm;
44557 pmd->bl_info.levels = 1;
44558- pmd->bl_info.value_type.context = pmd->data_sm;
44559+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44560 pmd->bl_info.value_type.size = sizeof(__le64);
44561 pmd->bl_info.value_type.inc = data_block_inc;
44562 pmd->bl_info.value_type.dec = data_block_dec;
44563diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44564index 8001fe9..abdd0d0 100644
44565--- a/drivers/md/dm.c
44566+++ b/drivers/md/dm.c
44567@@ -188,9 +188,9 @@ struct mapped_device {
44568 /*
44569 * Event handling.
44570 */
44571- atomic_t event_nr;
44572+ atomic_unchecked_t event_nr;
44573 wait_queue_head_t eventq;
44574- atomic_t uevent_seq;
44575+ atomic_unchecked_t uevent_seq;
44576 struct list_head uevent_list;
44577 spinlock_t uevent_lock; /* Protect access to uevent_list */
44578
44579@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor)
44580 spin_lock_init(&md->deferred_lock);
44581 atomic_set(&md->holders, 1);
44582 atomic_set(&md->open_count, 0);
44583- atomic_set(&md->event_nr, 0);
44584- atomic_set(&md->uevent_seq, 0);
44585+ atomic_set_unchecked(&md->event_nr, 0);
44586+ atomic_set_unchecked(&md->uevent_seq, 0);
44587 INIT_LIST_HEAD(&md->uevent_list);
44588 INIT_LIST_HEAD(&md->table_devices);
44589 spin_lock_init(&md->uevent_lock);
44590@@ -2329,7 +2329,7 @@ static void event_callback(void *context)
44591
44592 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44593
44594- atomic_inc(&md->event_nr);
44595+ atomic_inc_unchecked(&md->event_nr);
44596 wake_up(&md->eventq);
44597 }
44598
44599@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44600
44601 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44602 {
44603- return atomic_add_return(1, &md->uevent_seq);
44604+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44605 }
44606
44607 uint32_t dm_get_event_nr(struct mapped_device *md)
44608 {
44609- return atomic_read(&md->event_nr);
44610+ return atomic_read_unchecked(&md->event_nr);
44611 }
44612
44613 int dm_wait_event(struct mapped_device *md, int event_nr)
44614 {
44615 return wait_event_interruptible(md->eventq,
44616- (event_nr != atomic_read(&md->event_nr)));
44617+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44618 }
44619
44620 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44621diff --git a/drivers/md/md.c b/drivers/md/md.c
44622index 717daad..6dd103f 100644
44623--- a/drivers/md/md.c
44624+++ b/drivers/md/md.c
44625@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44626 * start build, activate spare
44627 */
44628 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44629-static atomic_t md_event_count;
44630+static atomic_unchecked_t md_event_count;
44631 void md_new_event(struct mddev *mddev)
44632 {
44633- atomic_inc(&md_event_count);
44634+ atomic_inc_unchecked(&md_event_count);
44635 wake_up(&md_event_waiters);
44636 }
44637 EXPORT_SYMBOL_GPL(md_new_event);
44638@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44639 */
44640 static void md_new_event_inintr(struct mddev *mddev)
44641 {
44642- atomic_inc(&md_event_count);
44643+ atomic_inc_unchecked(&md_event_count);
44644 wake_up(&md_event_waiters);
44645 }
44646
44647@@ -1438,7 +1438,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44648 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44649 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44650 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44651- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44652+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44653
44654 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44655 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44656@@ -1689,7 +1689,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44657 else
44658 sb->resync_offset = cpu_to_le64(0);
44659
44660- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44661+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44662
44663 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44664 sb->size = cpu_to_le64(mddev->dev_sectors);
44665@@ -2560,7 +2560,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
44666 static ssize_t
44667 errors_show(struct md_rdev *rdev, char *page)
44668 {
44669- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44670+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44671 }
44672
44673 static ssize_t
44674@@ -2569,7 +2569,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44675 char *e;
44676 unsigned long n = simple_strtoul(buf, &e, 10);
44677 if (*buf && (*e == 0 || *e == '\n')) {
44678- atomic_set(&rdev->corrected_errors, n);
44679+ atomic_set_unchecked(&rdev->corrected_errors, n);
44680 return len;
44681 }
44682 return -EINVAL;
44683@@ -3005,8 +3005,8 @@ int md_rdev_init(struct md_rdev *rdev)
44684 rdev->sb_loaded = 0;
44685 rdev->bb_page = NULL;
44686 atomic_set(&rdev->nr_pending, 0);
44687- atomic_set(&rdev->read_errors, 0);
44688- atomic_set(&rdev->corrected_errors, 0);
44689+ atomic_set_unchecked(&rdev->read_errors, 0);
44690+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44691
44692 INIT_LIST_HEAD(&rdev->same_set);
44693 init_waitqueue_head(&rdev->blocked_wait);
44694@@ -7079,7 +7079,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44695
44696 spin_unlock(&pers_lock);
44697 seq_printf(seq, "\n");
44698- seq->poll_event = atomic_read(&md_event_count);
44699+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44700 return 0;
44701 }
44702 if (v == (void*)2) {
44703@@ -7182,7 +7182,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44704 return error;
44705
44706 seq = file->private_data;
44707- seq->poll_event = atomic_read(&md_event_count);
44708+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44709 return error;
44710 }
44711
44712@@ -7199,7 +7199,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44713 /* always allow read */
44714 mask = POLLIN | POLLRDNORM;
44715
44716- if (seq->poll_event != atomic_read(&md_event_count))
44717+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44718 mask |= POLLERR | POLLPRI;
44719 return mask;
44720 }
44721@@ -7246,7 +7246,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44722 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44723 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44724 (int)part_stat_read(&disk->part0, sectors[1]) -
44725- atomic_read(&disk->sync_io);
44726+ atomic_read_unchecked(&disk->sync_io);
44727 /* sync IO will cause sync_io to increase before the disk_stats
44728 * as sync_io is counted when a request starts, and
44729 * disk_stats is counted when it completes.
44730diff --git a/drivers/md/md.h b/drivers/md/md.h
44731index 318ca8f..31e4478 100644
44732--- a/drivers/md/md.h
44733+++ b/drivers/md/md.h
44734@@ -94,13 +94,13 @@ struct md_rdev {
44735 * only maintained for arrays that
44736 * support hot removal
44737 */
44738- atomic_t read_errors; /* number of consecutive read errors that
44739+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44740 * we have tried to ignore.
44741 */
44742 struct timespec last_read_error; /* monotonic time since our
44743 * last read error
44744 */
44745- atomic_t corrected_errors; /* number of corrected read errors,
44746+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44747 * for reporting to userspace and storing
44748 * in superblock.
44749 */
44750@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
44751
44752 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44753 {
44754- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44755+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44756 }
44757
44758 struct md_personality
44759diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44760index e8a9042..35bd145 100644
44761--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44762+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44763@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44764 * Flick into a mode where all blocks get allocated in the new area.
44765 */
44766 smm->begin = old_len;
44767- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44768+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44769
44770 /*
44771 * Extend.
44772@@ -714,7 +714,7 @@ out:
44773 /*
44774 * Switch back to normal behaviour.
44775 */
44776- memcpy(sm, &ops, sizeof(*sm));
44777+ memcpy((void *)sm, &ops, sizeof(*sm));
44778 return r;
44779 }
44780
44781diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44782index 3e6d115..ffecdeb 100644
44783--- a/drivers/md/persistent-data/dm-space-map.h
44784+++ b/drivers/md/persistent-data/dm-space-map.h
44785@@ -71,6 +71,7 @@ struct dm_space_map {
44786 dm_sm_threshold_fn fn,
44787 void *context);
44788 };
44789+typedef struct dm_space_map __no_const dm_space_map_no_const;
44790
44791 /*----------------------------------------------------------------*/
44792
44793diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44794index d34e238..34f8d98 100644
44795--- a/drivers/md/raid1.c
44796+++ b/drivers/md/raid1.c
44797@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44798 if (r1_sync_page_io(rdev, sect, s,
44799 bio->bi_io_vec[idx].bv_page,
44800 READ) != 0)
44801- atomic_add(s, &rdev->corrected_errors);
44802+ atomic_add_unchecked(s, &rdev->corrected_errors);
44803 }
44804 sectors -= s;
44805 sect += s;
44806@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44807 !test_bit(Faulty, &rdev->flags)) {
44808 if (r1_sync_page_io(rdev, sect, s,
44809 conf->tmppage, READ)) {
44810- atomic_add(s, &rdev->corrected_errors);
44811+ atomic_add_unchecked(s, &rdev->corrected_errors);
44812 printk(KERN_INFO
44813 "md/raid1:%s: read error corrected "
44814 "(%d sectors at %llu on %s)\n",
44815diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44816index a7196c4..439f012 100644
44817--- a/drivers/md/raid10.c
44818+++ b/drivers/md/raid10.c
44819@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
44820 /* The write handler will notice the lack of
44821 * R10BIO_Uptodate and record any errors etc
44822 */
44823- atomic_add(r10_bio->sectors,
44824+ atomic_add_unchecked(r10_bio->sectors,
44825 &conf->mirrors[d].rdev->corrected_errors);
44826
44827 /* for reconstruct, we always reschedule after a read.
44828@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44829 {
44830 struct timespec cur_time_mon;
44831 unsigned long hours_since_last;
44832- unsigned int read_errors = atomic_read(&rdev->read_errors);
44833+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44834
44835 ktime_get_ts(&cur_time_mon);
44836
44837@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44838 * overflowing the shift of read_errors by hours_since_last.
44839 */
44840 if (hours_since_last >= 8 * sizeof(read_errors))
44841- atomic_set(&rdev->read_errors, 0);
44842+ atomic_set_unchecked(&rdev->read_errors, 0);
44843 else
44844- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44845+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44846 }
44847
44848 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44849@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44850 return;
44851
44852 check_decay_read_errors(mddev, rdev);
44853- atomic_inc(&rdev->read_errors);
44854- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44855+ atomic_inc_unchecked(&rdev->read_errors);
44856+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44857 char b[BDEVNAME_SIZE];
44858 bdevname(rdev->bdev, b);
44859
44860@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44861 "md/raid10:%s: %s: Raid device exceeded "
44862 "read_error threshold [cur %d:max %d]\n",
44863 mdname(mddev), b,
44864- atomic_read(&rdev->read_errors), max_read_errors);
44865+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44866 printk(KERN_NOTICE
44867 "md/raid10:%s: %s: Failing raid device\n",
44868 mdname(mddev), b);
44869@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44870 sect +
44871 choose_data_offset(r10_bio, rdev)),
44872 bdevname(rdev->bdev, b));
44873- atomic_add(s, &rdev->corrected_errors);
44874+ atomic_add_unchecked(s, &rdev->corrected_errors);
44875 }
44876
44877 rdev_dec_pending(rdev, mddev);
44878diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44879index cd2f96b..3876e63 100644
44880--- a/drivers/md/raid5.c
44881+++ b/drivers/md/raid5.c
44882@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
44883 struct bio_vec bvl;
44884 struct bvec_iter iter;
44885 struct page *bio_page;
44886- int page_offset;
44887+ s64 page_offset;
44888 struct async_submit_ctl submit;
44889 enum async_tx_flags flags = 0;
44890
44891 if (bio->bi_iter.bi_sector >= sector)
44892- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
44893+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
44894 else
44895- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
44896+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
44897
44898 if (frombio)
44899 flags |= ASYNC_TX_FENCE;
44900 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
44901
44902 bio_for_each_segment(bvl, bio, iter) {
44903- int len = bvl.bv_len;
44904- int clen;
44905- int b_offset = 0;
44906+ s64 len = bvl.bv_len;
44907+ s64 clen;
44908+ s64 b_offset = 0;
44909
44910 if (page_offset < 0) {
44911 b_offset = -page_offset;
44912@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44913 return 1;
44914 }
44915
44916+#ifdef CONFIG_GRKERNSEC_HIDESYM
44917+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44918+#endif
44919+
44920 static int grow_stripes(struct r5conf *conf, int num)
44921 {
44922 struct kmem_cache *sc;
44923@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44924 "raid%d-%s", conf->level, mdname(conf->mddev));
44925 else
44926 sprintf(conf->cache_name[0],
44927+#ifdef CONFIG_GRKERNSEC_HIDESYM
44928+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44929+#else
44930 "raid%d-%p", conf->level, conf->mddev);
44931+#endif
44932 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44933
44934 conf->active_name = 0;
44935@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44936 mdname(conf->mddev), STRIPE_SECTORS,
44937 (unsigned long long)s,
44938 bdevname(rdev->bdev, b));
44939- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44940+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44941 clear_bit(R5_ReadError, &sh->dev[i].flags);
44942 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44943 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44944 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44945
44946- if (atomic_read(&rdev->read_errors))
44947- atomic_set(&rdev->read_errors, 0);
44948+ if (atomic_read_unchecked(&rdev->read_errors))
44949+ atomic_set_unchecked(&rdev->read_errors, 0);
44950 } else {
44951 const char *bdn = bdevname(rdev->bdev, b);
44952 int retry = 0;
44953 int set_bad = 0;
44954
44955 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44956- atomic_inc(&rdev->read_errors);
44957+ atomic_inc_unchecked(&rdev->read_errors);
44958 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44959 printk_ratelimited(
44960 KERN_WARNING
44961@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44962 mdname(conf->mddev),
44963 (unsigned long long)s,
44964 bdn);
44965- } else if (atomic_read(&rdev->read_errors)
44966+ } else if (atomic_read_unchecked(&rdev->read_errors)
44967 > conf->max_nr_stripes)
44968 printk(KERN_WARNING
44969 "md/raid:%s: Too many read errors, failing device %s.\n",
44970diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44971index 983db75..ef9248c 100644
44972--- a/drivers/media/dvb-core/dvbdev.c
44973+++ b/drivers/media/dvb-core/dvbdev.c
44974@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44975 const struct dvb_device *template, void *priv, int type)
44976 {
44977 struct dvb_device *dvbdev;
44978- struct file_operations *dvbdevfops;
44979+ file_operations_no_const *dvbdevfops;
44980 struct device *clsdev;
44981 int minor;
44982 int id;
44983diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44984index 6ad22b6..6e90e2a 100644
44985--- a/drivers/media/dvb-frontends/af9033.h
44986+++ b/drivers/media/dvb-frontends/af9033.h
44987@@ -96,6 +96,6 @@ struct af9033_ops {
44988 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44989 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44990 int onoff);
44991-};
44992+} __no_const;
44993
44994 #endif /* AF9033_H */
44995diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44996index 9b6c3bb..baeb5c7 100644
44997--- a/drivers/media/dvb-frontends/dib3000.h
44998+++ b/drivers/media/dvb-frontends/dib3000.h
44999@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45000 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45001 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45002 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45003-};
45004+} __no_const;
45005
45006 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45007 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45008diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45009index 1fea0e9..321ce8f 100644
45010--- a/drivers/media/dvb-frontends/dib7000p.h
45011+++ b/drivers/media/dvb-frontends/dib7000p.h
45012@@ -64,7 +64,7 @@ struct dib7000p_ops {
45013 int (*get_adc_power)(struct dvb_frontend *fe);
45014 int (*slave_reset)(struct dvb_frontend *fe);
45015 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45016-};
45017+} __no_const;
45018
45019 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45020 void *dib7000p_attach(struct dib7000p_ops *ops);
45021diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45022index 84cc103..5780c54 100644
45023--- a/drivers/media/dvb-frontends/dib8000.h
45024+++ b/drivers/media/dvb-frontends/dib8000.h
45025@@ -61,7 +61,7 @@ struct dib8000_ops {
45026 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45027 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45028 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45029-};
45030+} __no_const;
45031
45032 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45033 void *dib8000_attach(struct dib8000_ops *ops);
45034diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45035index 860c98fc..497fa25 100644
45036--- a/drivers/media/pci/cx88/cx88-video.c
45037+++ b/drivers/media/pci/cx88/cx88-video.c
45038@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45039
45040 /* ------------------------------------------------------------------ */
45041
45042-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45043-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45044-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45045+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45046+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45047+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45048
45049 module_param_array(video_nr, int, NULL, 0444);
45050 module_param_array(vbi_nr, int, NULL, 0444);
45051diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45052index 802642d..5534900 100644
45053--- a/drivers/media/pci/ivtv/ivtv-driver.c
45054+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45055@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45056 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45057
45058 /* ivtv instance counter */
45059-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45060+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45061
45062 /* Parameter declarations */
45063 static int cardtype[IVTV_MAX_CARDS];
45064diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45065index 570d119..ed25830 100644
45066--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45067+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45068@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
45069
45070 static int solo_sysfs_init(struct solo_dev *solo_dev)
45071 {
45072- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45073+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45074 struct device *dev = &solo_dev->dev;
45075 const char *driver;
45076 int i;
45077diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45078index 7ddc767..1c24361 100644
45079--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45080+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45081@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45082
45083 int solo_g723_init(struct solo_dev *solo_dev)
45084 {
45085- static struct snd_device_ops ops = { NULL };
45086+ static struct snd_device_ops ops = { };
45087 struct snd_card *card;
45088 struct snd_kcontrol_new kctl;
45089 char name[32];
45090diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45091index 8c84846..27b4f83 100644
45092--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45093+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45094@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45095
45096 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45097 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45098- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45099+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45100 if (p2m_id < 0)
45101 p2m_id = -p2m_id;
45102 }
45103diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45104index 1ca54b0..7d7cb9a 100644
45105--- a/drivers/media/pci/solo6x10/solo6x10.h
45106+++ b/drivers/media/pci/solo6x10/solo6x10.h
45107@@ -218,7 +218,7 @@ struct solo_dev {
45108
45109 /* P2M DMA Engine */
45110 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45111- atomic_t p2m_count;
45112+ atomic_unchecked_t p2m_count;
45113 int p2m_jiffies;
45114 unsigned int p2m_timeouts;
45115
45116diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
45117index c135165..dc69499 100644
45118--- a/drivers/media/pci/tw68/tw68-core.c
45119+++ b/drivers/media/pci/tw68/tw68-core.c
45120@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45121 module_param_array(card, int, NULL, 0444);
45122 MODULE_PARM_DESC(card, "card type");
45123
45124-static atomic_t tw68_instance = ATOMIC_INIT(0);
45125+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45126
45127 /* ------------------------------------------------------------------ */
45128
45129diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45130index ba2d8f9..1566684 100644
45131--- a/drivers/media/platform/omap/omap_vout.c
45132+++ b/drivers/media/platform/omap/omap_vout.c
45133@@ -63,7 +63,6 @@ enum omap_vout_channels {
45134 OMAP_VIDEO2,
45135 };
45136
45137-static struct videobuf_queue_ops video_vbq_ops;
45138 /* Variables configurable through module params*/
45139 static u32 video1_numbuffers = 3;
45140 static u32 video2_numbuffers = 3;
45141@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45142 {
45143 struct videobuf_queue *q;
45144 struct omap_vout_device *vout = NULL;
45145+ static struct videobuf_queue_ops video_vbq_ops = {
45146+ .buf_setup = omap_vout_buffer_setup,
45147+ .buf_prepare = omap_vout_buffer_prepare,
45148+ .buf_release = omap_vout_buffer_release,
45149+ .buf_queue = omap_vout_buffer_queue,
45150+ };
45151
45152 vout = video_drvdata(file);
45153 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45154@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
45155 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45156
45157 q = &vout->vbq;
45158- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45159- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45160- video_vbq_ops.buf_release = omap_vout_buffer_release;
45161- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45162 spin_lock_init(&vout->vbq_lock);
45163
45164 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45165diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45166index fb2acc5..a2fcbdc4 100644
45167--- a/drivers/media/platform/s5p-tv/mixer.h
45168+++ b/drivers/media/platform/s5p-tv/mixer.h
45169@@ -156,7 +156,7 @@ struct mxr_layer {
45170 /** layer index (unique identifier) */
45171 int idx;
45172 /** callbacks for layer methods */
45173- struct mxr_layer_ops ops;
45174+ struct mxr_layer_ops *ops;
45175 /** format array */
45176 const struct mxr_format **fmt_array;
45177 /** size of format array */
45178diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45179index 74344c7..a39e70e 100644
45180--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45181+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45182@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45183 {
45184 struct mxr_layer *layer;
45185 int ret;
45186- struct mxr_layer_ops ops = {
45187+ static struct mxr_layer_ops ops = {
45188 .release = mxr_graph_layer_release,
45189 .buffer_set = mxr_graph_buffer_set,
45190 .stream_set = mxr_graph_stream_set,
45191diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45192index b713403..53cb5ad 100644
45193--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45194+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45195@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45196 layer->update_buf = next;
45197 }
45198
45199- layer->ops.buffer_set(layer, layer->update_buf);
45200+ layer->ops->buffer_set(layer, layer->update_buf);
45201
45202 if (done && done != layer->shadow_buf)
45203 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45204diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45205index 72d4f2e..4b2ea0d 100644
45206--- a/drivers/media/platform/s5p-tv/mixer_video.c
45207+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45208@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45209 layer->geo.src.height = layer->geo.src.full_height;
45210
45211 mxr_geometry_dump(mdev, &layer->geo);
45212- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45213+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45214 mxr_geometry_dump(mdev, &layer->geo);
45215 }
45216
45217@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45218 layer->geo.dst.full_width = mbus_fmt.width;
45219 layer->geo.dst.full_height = mbus_fmt.height;
45220 layer->geo.dst.field = mbus_fmt.field;
45221- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45222+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45223
45224 mxr_geometry_dump(mdev, &layer->geo);
45225 }
45226@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45227 /* set source size to highest accepted value */
45228 geo->src.full_width = max(geo->dst.full_width, pix->width);
45229 geo->src.full_height = max(geo->dst.full_height, pix->height);
45230- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45231+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45232 mxr_geometry_dump(mdev, &layer->geo);
45233 /* set cropping to total visible screen */
45234 geo->src.width = pix->width;
45235@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45236 geo->src.x_offset = 0;
45237 geo->src.y_offset = 0;
45238 /* assure consistency of geometry */
45239- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45240+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45241 mxr_geometry_dump(mdev, &layer->geo);
45242 /* set full size to lowest possible value */
45243 geo->src.full_width = 0;
45244 geo->src.full_height = 0;
45245- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45246+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45247 mxr_geometry_dump(mdev, &layer->geo);
45248
45249 /* returning results */
45250@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45251 target->width = s->r.width;
45252 target->height = s->r.height;
45253
45254- layer->ops.fix_geometry(layer, stage, s->flags);
45255+ layer->ops->fix_geometry(layer, stage, s->flags);
45256
45257 /* retrieve update selection rectangle */
45258 res.left = target->x_offset;
45259@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45260 mxr_output_get(mdev);
45261
45262 mxr_layer_update_output(layer);
45263- layer->ops.format_set(layer);
45264+ layer->ops->format_set(layer);
45265 /* enabling layer in hardware */
45266 spin_lock_irqsave(&layer->enq_slock, flags);
45267 layer->state = MXR_LAYER_STREAMING;
45268 spin_unlock_irqrestore(&layer->enq_slock, flags);
45269
45270- layer->ops.stream_set(layer, MXR_ENABLE);
45271+ layer->ops->stream_set(layer, MXR_ENABLE);
45272 mxr_streamer_get(mdev);
45273
45274 return 0;
45275@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
45276 spin_unlock_irqrestore(&layer->enq_slock, flags);
45277
45278 /* disabling layer in hardware */
45279- layer->ops.stream_set(layer, MXR_DISABLE);
45280+ layer->ops->stream_set(layer, MXR_DISABLE);
45281 /* remove one streamer */
45282 mxr_streamer_put(mdev);
45283 /* allow changes in output configuration */
45284@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45285
45286 void mxr_layer_release(struct mxr_layer *layer)
45287 {
45288- if (layer->ops.release)
45289- layer->ops.release(layer);
45290+ if (layer->ops->release)
45291+ layer->ops->release(layer);
45292 }
45293
45294 void mxr_base_layer_release(struct mxr_layer *layer)
45295@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45296
45297 layer->mdev = mdev;
45298 layer->idx = idx;
45299- layer->ops = *ops;
45300+ layer->ops = ops;
45301
45302 spin_lock_init(&layer->enq_slock);
45303 INIT_LIST_HEAD(&layer->enq_list);
45304diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45305index c9388c4..ce71ece 100644
45306--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45307+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45308@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45309 {
45310 struct mxr_layer *layer;
45311 int ret;
45312- struct mxr_layer_ops ops = {
45313+ static struct mxr_layer_ops ops = {
45314 .release = mxr_vp_layer_release,
45315 .buffer_set = mxr_vp_buffer_set,
45316 .stream_set = mxr_vp_stream_set,
45317diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45318index 82affae..42833ec 100644
45319--- a/drivers/media/radio/radio-cadet.c
45320+++ b/drivers/media/radio/radio-cadet.c
45321@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45322 unsigned char readbuf[RDS_BUFFER];
45323 int i = 0;
45324
45325+ if (count > RDS_BUFFER)
45326+ return -EFAULT;
45327 mutex_lock(&dev->lock);
45328 if (dev->rdsstat == 0)
45329 cadet_start_rds(dev);
45330@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45331 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45332 mutex_unlock(&dev->lock);
45333
45334- if (i && copy_to_user(data, readbuf, i))
45335- return -EFAULT;
45336+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45337+ i = -EFAULT;
45338+
45339 return i;
45340 }
45341
45342diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45343index 5236035..c622c74 100644
45344--- a/drivers/media/radio/radio-maxiradio.c
45345+++ b/drivers/media/radio/radio-maxiradio.c
45346@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45347 /* TEA5757 pin mappings */
45348 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45349
45350-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45351+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45352
45353 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45354 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45355diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45356index 050b3bb..79f62b9 100644
45357--- a/drivers/media/radio/radio-shark.c
45358+++ b/drivers/media/radio/radio-shark.c
45359@@ -79,7 +79,7 @@ struct shark_device {
45360 u32 last_val;
45361 };
45362
45363-static atomic_t shark_instance = ATOMIC_INIT(0);
45364+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45365
45366 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45367 {
45368diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45369index 8654e0d..0608a64 100644
45370--- a/drivers/media/radio/radio-shark2.c
45371+++ b/drivers/media/radio/radio-shark2.c
45372@@ -74,7 +74,7 @@ struct shark_device {
45373 u8 *transfer_buffer;
45374 };
45375
45376-static atomic_t shark_instance = ATOMIC_INIT(0);
45377+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45378
45379 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45380 {
45381diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45382index dccf586..d5db411 100644
45383--- a/drivers/media/radio/radio-si476x.c
45384+++ b/drivers/media/radio/radio-si476x.c
45385@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45386 struct si476x_radio *radio;
45387 struct v4l2_ctrl *ctrl;
45388
45389- static atomic_t instance = ATOMIC_INIT(0);
45390+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45391
45392 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45393 if (!radio)
45394diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45395index 704397f..4d05977 100644
45396--- a/drivers/media/radio/wl128x/fmdrv_common.c
45397+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45398@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45399 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45400
45401 /* Radio Nr */
45402-static u32 radio_nr = -1;
45403+static int radio_nr = -1;
45404 module_param(radio_nr, int, 0444);
45405 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45406
45407diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45408index 9fd1527..8927230 100644
45409--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45410+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45411@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45412
45413 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45414 {
45415- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45416- char result[64];
45417- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45418- sizeof(result), 0);
45419+ char *buf;
45420+ char *result;
45421+ int retval;
45422+
45423+ buf = kmalloc(2, GFP_KERNEL);
45424+ if (buf == NULL)
45425+ return -ENOMEM;
45426+ result = kmalloc(64, GFP_KERNEL);
45427+ if (result == NULL) {
45428+ kfree(buf);
45429+ return -ENOMEM;
45430+ }
45431+
45432+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45433+ buf[1] = enable ? 1 : 0;
45434+
45435+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45436+
45437+ kfree(buf);
45438+ kfree(result);
45439+ return retval;
45440 }
45441
45442 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45443 {
45444- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45445- char state[3];
45446- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45447+ char *buf;
45448+ char *state;
45449+ int retval;
45450+
45451+ buf = kmalloc(2, GFP_KERNEL);
45452+ if (buf == NULL)
45453+ return -ENOMEM;
45454+ state = kmalloc(3, GFP_KERNEL);
45455+ if (state == NULL) {
45456+ kfree(buf);
45457+ return -ENOMEM;
45458+ }
45459+
45460+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45461+ buf[1] = enable ? 1 : 0;
45462+
45463+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45464+
45465+ kfree(buf);
45466+ kfree(state);
45467+ return retval;
45468 }
45469
45470 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45471 {
45472- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45473- char state[3];
45474+ char *query;
45475+ char *state;
45476 int ret;
45477+ query = kmalloc(1, GFP_KERNEL);
45478+ if (query == NULL)
45479+ return -ENOMEM;
45480+ state = kmalloc(3, GFP_KERNEL);
45481+ if (state == NULL) {
45482+ kfree(query);
45483+ return -ENOMEM;
45484+ }
45485+
45486+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45487
45488 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45489
45490- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45491- sizeof(state), 0);
45492+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45493 if (ret < 0) {
45494 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45495 "state info\n");
45496@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45497
45498 /* Copy this pointer as we are gonna need it in the release phase */
45499 cinergyt2_usb_device = adap->dev;
45500-
45501+ kfree(query);
45502+ kfree(state);
45503 return 0;
45504 }
45505
45506@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45507 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45508 {
45509 struct cinergyt2_state *st = d->priv;
45510- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45511+ u8 *key, *cmd;
45512 int i;
45513
45514+ cmd = kmalloc(1, GFP_KERNEL);
45515+ if (cmd == NULL)
45516+ return -EINVAL;
45517+ key = kzalloc(5, GFP_KERNEL);
45518+ if (key == NULL) {
45519+ kfree(cmd);
45520+ return -EINVAL;
45521+ }
45522+
45523+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45524+
45525 *state = REMOTE_NO_KEY_PRESSED;
45526
45527- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45528+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45529 if (key[4] == 0xff) {
45530 /* key repeat */
45531 st->rc_counter++;
45532@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45533 *event = d->last_event;
45534 deb_rc("repeat key, event %x\n",
45535 *event);
45536- return 0;
45537+ goto out;
45538 }
45539 }
45540 deb_rc("repeated key (non repeatable)\n");
45541 }
45542- return 0;
45543+ goto out;
45544 }
45545
45546 /* hack to pass checksum on the custom field */
45547@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45548
45549 deb_rc("key: %*ph\n", 5, key);
45550 }
45551+out:
45552+ kfree(cmd);
45553+ kfree(key);
45554 return 0;
45555 }
45556
45557diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45558index c890fe4..f9b2ae6 100644
45559--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45560+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45561@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45562 fe_status_t *status)
45563 {
45564 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45565- struct dvbt_get_status_msg result;
45566- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45567+ struct dvbt_get_status_msg *result;
45568+ u8 *cmd;
45569 int ret;
45570
45571- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45572- sizeof(result), 0);
45573+ cmd = kmalloc(1, GFP_KERNEL);
45574+ if (cmd == NULL)
45575+ return -ENOMEM;
45576+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45577+ if (result == NULL) {
45578+ kfree(cmd);
45579+ return -ENOMEM;
45580+ }
45581+
45582+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45583+
45584+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45585+ sizeof(*result), 0);
45586 if (ret < 0)
45587- return ret;
45588+ goto out;
45589
45590 *status = 0;
45591
45592- if (0xffff - le16_to_cpu(result.gain) > 30)
45593+ if (0xffff - le16_to_cpu(result->gain) > 30)
45594 *status |= FE_HAS_SIGNAL;
45595- if (result.lock_bits & (1 << 6))
45596+ if (result->lock_bits & (1 << 6))
45597 *status |= FE_HAS_LOCK;
45598- if (result.lock_bits & (1 << 5))
45599+ if (result->lock_bits & (1 << 5))
45600 *status |= FE_HAS_SYNC;
45601- if (result.lock_bits & (1 << 4))
45602+ if (result->lock_bits & (1 << 4))
45603 *status |= FE_HAS_CARRIER;
45604- if (result.lock_bits & (1 << 1))
45605+ if (result->lock_bits & (1 << 1))
45606 *status |= FE_HAS_VITERBI;
45607
45608 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45609 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45610 *status &= ~FE_HAS_LOCK;
45611
45612- return 0;
45613+out:
45614+ kfree(cmd);
45615+ kfree(result);
45616+ return ret;
45617 }
45618
45619 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45620 {
45621 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45622- struct dvbt_get_status_msg status;
45623- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45624+ struct dvbt_get_status_msg *status;
45625+ char *cmd;
45626 int ret;
45627
45628- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45629- sizeof(status), 0);
45630+ cmd = kmalloc(1, GFP_KERNEL);
45631+ if (cmd == NULL)
45632+ return -ENOMEM;
45633+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45634+ if (status == NULL) {
45635+ kfree(cmd);
45636+ return -ENOMEM;
45637+ }
45638+
45639+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45640+
45641+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45642+ sizeof(*status), 0);
45643 if (ret < 0)
45644- return ret;
45645+ goto out;
45646
45647- *ber = le32_to_cpu(status.viterbi_error_rate);
45648+ *ber = le32_to_cpu(status->viterbi_error_rate);
45649+out:
45650+ kfree(cmd);
45651+ kfree(status);
45652 return 0;
45653 }
45654
45655 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45656 {
45657 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45658- struct dvbt_get_status_msg status;
45659- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45660+ struct dvbt_get_status_msg *status;
45661+ u8 *cmd;
45662 int ret;
45663
45664- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45665- sizeof(status), 0);
45666+ cmd = kmalloc(1, GFP_KERNEL);
45667+ if (cmd == NULL)
45668+ return -ENOMEM;
45669+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45670+ if (status == NULL) {
45671+ kfree(cmd);
45672+ return -ENOMEM;
45673+ }
45674+
45675+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45676+
45677+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45678+ sizeof(*status), 0);
45679 if (ret < 0) {
45680 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45681 ret);
45682- return ret;
45683+ goto out;
45684 }
45685- *unc = le32_to_cpu(status.uncorrected_block_count);
45686- return 0;
45687+ *unc = le32_to_cpu(status->uncorrected_block_count);
45688+
45689+out:
45690+ kfree(cmd);
45691+ kfree(status);
45692+ return ret;
45693 }
45694
45695 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45696 u16 *strength)
45697 {
45698 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45699- struct dvbt_get_status_msg status;
45700- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45701+ struct dvbt_get_status_msg *status;
45702+ char *cmd;
45703 int ret;
45704
45705- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45706- sizeof(status), 0);
45707+ cmd = kmalloc(1, GFP_KERNEL);
45708+ if (cmd == NULL)
45709+ return -ENOMEM;
45710+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45711+ if (status == NULL) {
45712+ kfree(cmd);
45713+ return -ENOMEM;
45714+ }
45715+
45716+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45717+
45718+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45719+ sizeof(*status), 0);
45720 if (ret < 0) {
45721 err("cinergyt2_fe_read_signal_strength() Failed!"
45722 " (Error=%d)\n", ret);
45723- return ret;
45724+ goto out;
45725 }
45726- *strength = (0xffff - le16_to_cpu(status.gain));
45727+ *strength = (0xffff - le16_to_cpu(status->gain));
45728+
45729+out:
45730+ kfree(cmd);
45731+ kfree(status);
45732 return 0;
45733 }
45734
45735 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45736 {
45737 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45738- struct dvbt_get_status_msg status;
45739- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45740+ struct dvbt_get_status_msg *status;
45741+ char *cmd;
45742 int ret;
45743
45744- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45745- sizeof(status), 0);
45746+ cmd = kmalloc(1, GFP_KERNEL);
45747+ if (cmd == NULL)
45748+ return -ENOMEM;
45749+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45750+ if (status == NULL) {
45751+ kfree(cmd);
45752+ return -ENOMEM;
45753+ }
45754+
45755+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45756+
45757+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45758+ sizeof(*status), 0);
45759 if (ret < 0) {
45760 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45761- return ret;
45762+ goto out;
45763 }
45764- *snr = (status.snr << 8) | status.snr;
45765- return 0;
45766+ *snr = (status->snr << 8) | status->snr;
45767+
45768+out:
45769+ kfree(cmd);
45770+ kfree(status);
45771+ return ret;
45772 }
45773
45774 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45775@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45776 {
45777 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45778 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45779- struct dvbt_set_parameters_msg param;
45780- char result[2];
45781+ struct dvbt_set_parameters_msg *param;
45782+ char *result;
45783 int err;
45784
45785- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45786- param.tps = cpu_to_le16(compute_tps(fep));
45787- param.freq = cpu_to_le32(fep->frequency / 1000);
45788- param.flags = 0;
45789+ result = kmalloc(2, GFP_KERNEL);
45790+ if (result == NULL)
45791+ return -ENOMEM;
45792+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45793+ if (param == NULL) {
45794+ kfree(result);
45795+ return -ENOMEM;
45796+ }
45797+
45798+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45799+ param->tps = cpu_to_le16(compute_tps(fep));
45800+ param->freq = cpu_to_le32(fep->frequency / 1000);
45801+ param->flags = 0;
45802
45803 switch (fep->bandwidth_hz) {
45804 default:
45805 case 8000000:
45806- param.bandwidth = 8;
45807+ param->bandwidth = 8;
45808 break;
45809 case 7000000:
45810- param.bandwidth = 7;
45811+ param->bandwidth = 7;
45812 break;
45813 case 6000000:
45814- param.bandwidth = 6;
45815+ param->bandwidth = 6;
45816 break;
45817 }
45818
45819 err = dvb_usb_generic_rw(state->d,
45820- (char *)&param, sizeof(param),
45821- result, sizeof(result), 0);
45822+ (char *)param, sizeof(*param),
45823+ result, 2, 0);
45824 if (err < 0)
45825 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45826
45827- return (err < 0) ? err : 0;
45828+ kfree(result);
45829+ kfree(param);
45830+ return err;
45831 }
45832
45833 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45834diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45835index 733a7ff..f8b52e3 100644
45836--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45837+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45838@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45839
45840 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45841 {
45842- struct hexline hx;
45843- u8 reset;
45844+ struct hexline *hx;
45845+ u8 *reset;
45846 int ret,pos=0;
45847
45848+ reset = kmalloc(1, GFP_KERNEL);
45849+ if (reset == NULL)
45850+ return -ENOMEM;
45851+
45852+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45853+ if (hx == NULL) {
45854+ kfree(reset);
45855+ return -ENOMEM;
45856+ }
45857+
45858 /* stop the CPU */
45859- reset = 1;
45860- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45861+ reset[0] = 1;
45862+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45863 err("could not stop the USB controller CPU.");
45864
45865- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45866- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45867- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45868+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45869+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45870+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45871
45872- if (ret != hx.len) {
45873+ if (ret != hx->len) {
45874 err("error while transferring firmware "
45875 "(transferred size: %d, block size: %d)",
45876- ret,hx.len);
45877+ ret,hx->len);
45878 ret = -EINVAL;
45879 break;
45880 }
45881 }
45882 if (ret < 0) {
45883 err("firmware download failed at %d with %d",pos,ret);
45884+ kfree(reset);
45885+ kfree(hx);
45886 return ret;
45887 }
45888
45889 if (ret == 0) {
45890 /* restart the CPU */
45891- reset = 0;
45892- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45893+ reset[0] = 0;
45894+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45895 err("could not restart the USB controller CPU.");
45896 ret = -EINVAL;
45897 }
45898 } else
45899 ret = -EIO;
45900
45901+ kfree(reset);
45902+ kfree(hx);
45903+
45904 return ret;
45905 }
45906 EXPORT_SYMBOL(usb_cypress_load_firmware);
45907diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45908index 1a3df10..57997a5 100644
45909--- a/drivers/media/usb/dvb-usb/dw2102.c
45910+++ b/drivers/media/usb/dvb-usb/dw2102.c
45911@@ -118,7 +118,7 @@ struct su3000_state {
45912
45913 struct s6x0_state {
45914 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45915-};
45916+} __no_const;
45917
45918 /* debug */
45919 static int dvb_usb_dw2102_debug;
45920diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45921index 5801ae7..83f71fa 100644
45922--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45923+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45924@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45925 static int technisat_usb2_i2c_access(struct usb_device *udev,
45926 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45927 {
45928- u8 b[64];
45929- int ret, actual_length;
45930+ u8 *b = kmalloc(64, GFP_KERNEL);
45931+ int ret, actual_length, error = 0;
45932+
45933+ if (b == NULL)
45934+ return -ENOMEM;
45935
45936 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45937 debug_dump(tx, txlen, deb_i2c);
45938@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45939
45940 if (ret < 0) {
45941 err("i2c-error: out failed %02x = %d", device_addr, ret);
45942- return -ENODEV;
45943+ error = -ENODEV;
45944+ goto out;
45945 }
45946
45947 ret = usb_bulk_msg(udev,
45948@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45949 b, 64, &actual_length, 1000);
45950 if (ret < 0) {
45951 err("i2c-error: in failed %02x = %d", device_addr, ret);
45952- return -ENODEV;
45953+ error = -ENODEV;
45954+ goto out;
45955 }
45956
45957 if (b[0] != I2C_STATUS_OK) {
45958@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45959 /* handle tuner-i2c-nak */
45960 if (!(b[0] == I2C_STATUS_NAK &&
45961 device_addr == 0x60
45962- /* && device_is_technisat_usb2 */))
45963- return -ENODEV;
45964+ /* && device_is_technisat_usb2 */)) {
45965+ error = -ENODEV;
45966+ goto out;
45967+ }
45968 }
45969
45970 deb_i2c("status: %d, ", b[0]);
45971@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45972
45973 deb_i2c("\n");
45974
45975- return 0;
45976+out:
45977+ kfree(b);
45978+ return error;
45979 }
45980
45981 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45982@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45983 {
45984 int ret;
45985
45986- u8 led[8] = {
45987- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45988- 0
45989- };
45990+ u8 *led = kzalloc(8, GFP_KERNEL);
45991+
45992+ if (led == NULL)
45993+ return -ENOMEM;
45994
45995 if (disable_led_control && state != TECH_LED_OFF)
45996 return 0;
45997
45998+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45999+
46000 switch (state) {
46001 case TECH_LED_ON:
46002 led[1] = 0x82;
46003@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46004 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46005 USB_TYPE_VENDOR | USB_DIR_OUT,
46006 0, 0,
46007- led, sizeof(led), 500);
46008+ led, 8, 500);
46009
46010 mutex_unlock(&d->i2c_mutex);
46011+
46012+ kfree(led);
46013+
46014 return ret;
46015 }
46016
46017 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46018 {
46019 int ret;
46020- u8 b = 0;
46021+ u8 *b = kzalloc(1, GFP_KERNEL);
46022+
46023+ if (b == NULL)
46024+ return -ENOMEM;
46025
46026 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46027 return -EAGAIN;
46028@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46029 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46030 USB_TYPE_VENDOR | USB_DIR_OUT,
46031 (red << 8) | green, 0,
46032- &b, 1, 500);
46033+ b, 1, 500);
46034
46035 mutex_unlock(&d->i2c_mutex);
46036
46037+ kfree(b);
46038+
46039 return ret;
46040 }
46041
46042@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46043 struct dvb_usb_device_description **desc, int *cold)
46044 {
46045 int ret;
46046- u8 version[3];
46047+ u8 *version = kmalloc(3, GFP_KERNEL);
46048
46049 /* first select the interface */
46050 if (usb_set_interface(udev, 0, 1) != 0)
46051@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46052
46053 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46054
46055+ if (version == NULL)
46056+ return 0;
46057+
46058 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46059 GET_VERSION_INFO_VENDOR_REQUEST,
46060 USB_TYPE_VENDOR | USB_DIR_IN,
46061 0, 0,
46062- version, sizeof(version), 500);
46063+ version, 3, 500);
46064
46065 if (ret < 0)
46066 *cold = 1;
46067@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46068 *cold = 0;
46069 }
46070
46071+ kfree(version);
46072+
46073 return 0;
46074 }
46075
46076@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46077
46078 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46079 {
46080- u8 buf[62], *b;
46081+ u8 *buf, *b;
46082 int ret;
46083 struct ir_raw_event ev;
46084
46085+ buf = kmalloc(62, GFP_KERNEL);
46086+
46087+ if (buf == NULL)
46088+ return -ENOMEM;
46089+
46090 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46091 buf[1] = 0x08;
46092 buf[2] = 0x8f;
46093@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46094 GET_IR_DATA_VENDOR_REQUEST,
46095 USB_TYPE_VENDOR | USB_DIR_IN,
46096 0x8080, 0,
46097- buf, sizeof(buf), 500);
46098+ buf, 62, 500);
46099
46100 unlock:
46101 mutex_unlock(&d->i2c_mutex);
46102
46103- if (ret < 0)
46104+ if (ret < 0) {
46105+ kfree(buf);
46106 return ret;
46107+ }
46108
46109- if (ret == 1)
46110+ if (ret == 1) {
46111+ kfree(buf);
46112 return 0; /* no key pressed */
46113+ }
46114
46115 /* decoding */
46116 b = buf+1;
46117@@ -656,6 +689,8 @@ unlock:
46118
46119 ir_raw_event_handle(d->rc_dev);
46120
46121+ kfree(buf);
46122+
46123 return 1;
46124 }
46125
46126diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46127index af63543..0436f20 100644
46128--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46129+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46130@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46131 * by passing a very big num_planes value */
46132 uplane = compat_alloc_user_space(num_planes *
46133 sizeof(struct v4l2_plane));
46134- kp->m.planes = (__force struct v4l2_plane *)uplane;
46135+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46136
46137 while (--num_planes >= 0) {
46138 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46139@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46140 if (num_planes == 0)
46141 return 0;
46142
46143- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
46144+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46145 if (get_user(p, &up->m.planes))
46146 return -EFAULT;
46147 uplane32 = compat_ptr(p);
46148@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46149 get_user(kp->flags, &up->flags) ||
46150 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46151 return -EFAULT;
46152- kp->base = (__force void *)compat_ptr(tmp);
46153+ kp->base = (__force_kernel void *)compat_ptr(tmp);
46154 return 0;
46155 }
46156
46157@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46158 n * sizeof(struct v4l2_ext_control32)))
46159 return -EFAULT;
46160 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46161- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
46162+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
46163 while (--n >= 0) {
46164 u32 id;
46165
46166@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46167 {
46168 struct v4l2_ext_control32 __user *ucontrols;
46169 struct v4l2_ext_control __user *kcontrols =
46170- (__force struct v4l2_ext_control __user *)kp->controls;
46171+ (struct v4l2_ext_control __force_user *)kp->controls;
46172 int n = kp->count;
46173 compat_caddr_t p;
46174
46175@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46176 get_user(tmp, &up->edid) ||
46177 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46178 return -EFAULT;
46179- kp->edid = (__force u8 *)compat_ptr(tmp);
46180+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
46181 return 0;
46182 }
46183
46184diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46185index 015f92a..59e311e 100644
46186--- a/drivers/media/v4l2-core/v4l2-device.c
46187+++ b/drivers/media/v4l2-core/v4l2-device.c
46188@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46189 EXPORT_SYMBOL_GPL(v4l2_device_put);
46190
46191 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46192- atomic_t *instance)
46193+ atomic_unchecked_t *instance)
46194 {
46195- int num = atomic_inc_return(instance) - 1;
46196+ int num = atomic_inc_return_unchecked(instance) - 1;
46197 int len = strlen(basename);
46198
46199 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46200diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46201index b084072..36706d7 100644
46202--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46203+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46204@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
46205 struct file *file, void *fh, void *p);
46206 } u;
46207 void (*debug)(const void *arg, bool write_only);
46208-};
46209+} __do_const;
46210+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46211
46212 /* This control needs a priority check */
46213 #define INFO_FL_PRIO (1 << 0)
46214@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
46215 struct video_device *vfd = video_devdata(file);
46216 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46217 bool write_only = false;
46218- struct v4l2_ioctl_info default_info;
46219+ v4l2_ioctl_info_no_const default_info;
46220 const struct v4l2_ioctl_info *info;
46221 void *fh = file->private_data;
46222 struct v4l2_fh *vfh = NULL;
46223@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46224 ret = -EINVAL;
46225 break;
46226 }
46227- *user_ptr = (void __user *)buf->m.planes;
46228+ *user_ptr = (void __force_user *)buf->m.planes;
46229 *kernel_ptr = (void **)&buf->m.planes;
46230 *array_size = sizeof(struct v4l2_plane) * buf->length;
46231 ret = 1;
46232@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46233 ret = -EINVAL;
46234 break;
46235 }
46236- *user_ptr = (void __user *)edid->edid;
46237+ *user_ptr = (void __force_user *)edid->edid;
46238 *kernel_ptr = (void **)&edid->edid;
46239 *array_size = edid->blocks * 128;
46240 ret = 1;
46241@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46242 ret = -EINVAL;
46243 break;
46244 }
46245- *user_ptr = (void __user *)ctrls->controls;
46246+ *user_ptr = (void __force_user *)ctrls->controls;
46247 *kernel_ptr = (void **)&ctrls->controls;
46248 *array_size = sizeof(struct v4l2_ext_control)
46249 * ctrls->count;
46250@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46251 }
46252
46253 if (has_array_args) {
46254- *kernel_ptr = (void __force *)user_ptr;
46255+ *kernel_ptr = (void __force_kernel *)user_ptr;
46256 if (copy_to_user(user_ptr, mbuf, array_size))
46257 err = -EFAULT;
46258 goto out_array_args;
46259diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
46260index 24696f5..3637780 100644
46261--- a/drivers/memory/omap-gpmc.c
46262+++ b/drivers/memory/omap-gpmc.c
46263@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
46264 };
46265
46266 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
46267-static struct irq_chip gpmc_irq_chip;
46268 static int gpmc_irq_start;
46269
46270 static struct resource gpmc_mem_root;
46271@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
46272
46273 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
46274
46275+static struct irq_chip gpmc_irq_chip = {
46276+ .name = "gpmc",
46277+ .irq_startup = gpmc_irq_noop_ret,
46278+ .irq_enable = gpmc_irq_enable,
46279+ .irq_disable = gpmc_irq_disable,
46280+ .irq_shutdown = gpmc_irq_noop,
46281+ .irq_ack = gpmc_irq_noop,
46282+ .irq_mask = gpmc_irq_noop,
46283+ .irq_unmask = gpmc_irq_noop,
46284+};
46285+
46286 static int gpmc_setup_irq(void)
46287 {
46288 int i;
46289@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46290 return gpmc_irq_start;
46291 }
46292
46293- gpmc_irq_chip.name = "gpmc";
46294- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46295- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46296- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46297- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46298- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46299- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46300- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46301-
46302 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46303 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46304
46305diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46306index 187f836..679544b 100644
46307--- a/drivers/message/fusion/mptbase.c
46308+++ b/drivers/message/fusion/mptbase.c
46309@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46310 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46311 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46312
46313+#ifdef CONFIG_GRKERNSEC_HIDESYM
46314+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46315+#else
46316 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46317 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46318+#endif
46319+
46320 /*
46321 * Rounding UP to nearest 4-kB boundary here...
46322 */
46323@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46324 ioc->facts.GlobalCredits);
46325
46326 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46327+#ifdef CONFIG_GRKERNSEC_HIDESYM
46328+ NULL, NULL);
46329+#else
46330 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46331+#endif
46332 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46333 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46334 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46335diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46336index 5bdaae1..eced16f 100644
46337--- a/drivers/message/fusion/mptsas.c
46338+++ b/drivers/message/fusion/mptsas.c
46339@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46340 return 0;
46341 }
46342
46343+static inline void
46344+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46345+{
46346+ if (phy_info->port_details) {
46347+ phy_info->port_details->rphy = rphy;
46348+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46349+ ioc->name, rphy));
46350+ }
46351+
46352+ if (rphy) {
46353+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46354+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46355+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46356+ ioc->name, rphy, rphy->dev.release));
46357+ }
46358+}
46359+
46360 /* no mutex */
46361 static void
46362 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46363@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46364 return NULL;
46365 }
46366
46367-static inline void
46368-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46369-{
46370- if (phy_info->port_details) {
46371- phy_info->port_details->rphy = rphy;
46372- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46373- ioc->name, rphy));
46374- }
46375-
46376- if (rphy) {
46377- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46378- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46379- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46380- ioc->name, rphy, rphy->dev.release));
46381- }
46382-}
46383-
46384 static inline struct sas_port *
46385 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46386 {
46387diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46388index 9a8e185..27ff17d 100644
46389--- a/drivers/mfd/ab8500-debugfs.c
46390+++ b/drivers/mfd/ab8500-debugfs.c
46391@@ -100,7 +100,7 @@ static int irq_last;
46392 static u32 *irq_count;
46393 static int num_irqs;
46394
46395-static struct device_attribute **dev_attr;
46396+static device_attribute_no_const **dev_attr;
46397 static char **event_name;
46398
46399 static u8 avg_sample = SAMPLE_16;
46400diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
46401index 5615522..1eb6f3dc 100644
46402--- a/drivers/mfd/kempld-core.c
46403+++ b/drivers/mfd/kempld-core.c
46404@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
46405 .remove = kempld_remove,
46406 };
46407
46408-static struct dmi_system_id kempld_dmi_table[] __initdata = {
46409+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
46410 {
46411 .ident = "BHL6",
46412 .matches = {
46413diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46414index c880c89..45a7c68 100644
46415--- a/drivers/mfd/max8925-i2c.c
46416+++ b/drivers/mfd/max8925-i2c.c
46417@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46418 const struct i2c_device_id *id)
46419 {
46420 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46421- static struct max8925_chip *chip;
46422+ struct max8925_chip *chip;
46423 struct device_node *node = client->dev.of_node;
46424
46425 if (node && !pdata) {
46426diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46427index 7612d89..70549c2 100644
46428--- a/drivers/mfd/tps65910.c
46429+++ b/drivers/mfd/tps65910.c
46430@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46431 struct tps65910_platform_data *pdata)
46432 {
46433 int ret = 0;
46434- static struct regmap_irq_chip *tps6591x_irqs_chip;
46435+ struct regmap_irq_chip *tps6591x_irqs_chip;
46436
46437 if (!irq) {
46438 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46439diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46440index 1b772ef..01e77d33 100644
46441--- a/drivers/mfd/twl4030-irq.c
46442+++ b/drivers/mfd/twl4030-irq.c
46443@@ -34,6 +34,7 @@
46444 #include <linux/of.h>
46445 #include <linux/irqdomain.h>
46446 #include <linux/i2c/twl.h>
46447+#include <asm/pgtable.h>
46448
46449 #include "twl-core.h"
46450
46451@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46452 * Install an irq handler for each of the SIH modules;
46453 * clone dummy irq_chip since PIH can't *do* anything
46454 */
46455- twl4030_irq_chip = dummy_irq_chip;
46456- twl4030_irq_chip.name = "twl4030";
46457+ pax_open_kernel();
46458+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46459+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46460
46461- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46462+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46463+ pax_close_kernel();
46464
46465 for (i = irq_base; i < irq_end; i++) {
46466 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46467diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46468index 464419b..64bae8d 100644
46469--- a/drivers/misc/c2port/core.c
46470+++ b/drivers/misc/c2port/core.c
46471@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46472 goto error_idr_alloc;
46473 c2dev->id = ret;
46474
46475- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46476+ pax_open_kernel();
46477+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46478+ pax_close_kernel();
46479
46480 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46481 "c2port%d", c2dev->id);
46482diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46483index 8385177..2f54635 100644
46484--- a/drivers/misc/eeprom/sunxi_sid.c
46485+++ b/drivers/misc/eeprom/sunxi_sid.c
46486@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46487
46488 platform_set_drvdata(pdev, sid_data);
46489
46490- sid_bin_attr.size = sid_data->keysize;
46491+ pax_open_kernel();
46492+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46493+ pax_close_kernel();
46494 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46495 return -ENODEV;
46496
46497diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46498index 36f5d52..32311c3 100644
46499--- a/drivers/misc/kgdbts.c
46500+++ b/drivers/misc/kgdbts.c
46501@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46502 char before[BREAK_INSTR_SIZE];
46503 char after[BREAK_INSTR_SIZE];
46504
46505- probe_kernel_read(before, (char *)kgdbts_break_test,
46506+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46507 BREAK_INSTR_SIZE);
46508 init_simple_test();
46509 ts.tst = plant_and_detach_test;
46510@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46511 /* Activate test with initial breakpoint */
46512 if (!is_early)
46513 kgdb_breakpoint();
46514- probe_kernel_read(after, (char *)kgdbts_break_test,
46515+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46516 BREAK_INSTR_SIZE);
46517 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46518 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46519diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46520index 3ef4627..8d00486 100644
46521--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46522+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46523@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46524 * the lid is closed. This leads to interrupts as soon as a little move
46525 * is done.
46526 */
46527- atomic_inc(&lis3->count);
46528+ atomic_inc_unchecked(&lis3->count);
46529
46530 wake_up_interruptible(&lis3->misc_wait);
46531 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46532@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46533 if (lis3->pm_dev)
46534 pm_runtime_get_sync(lis3->pm_dev);
46535
46536- atomic_set(&lis3->count, 0);
46537+ atomic_set_unchecked(&lis3->count, 0);
46538 return 0;
46539 }
46540
46541@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46542 add_wait_queue(&lis3->misc_wait, &wait);
46543 while (true) {
46544 set_current_state(TASK_INTERRUPTIBLE);
46545- data = atomic_xchg(&lis3->count, 0);
46546+ data = atomic_xchg_unchecked(&lis3->count, 0);
46547 if (data)
46548 break;
46549
46550@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46551 struct lis3lv02d, miscdev);
46552
46553 poll_wait(file, &lis3->misc_wait, wait);
46554- if (atomic_read(&lis3->count))
46555+ if (atomic_read_unchecked(&lis3->count))
46556 return POLLIN | POLLRDNORM;
46557 return 0;
46558 }
46559diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46560index c439c82..1f20f57 100644
46561--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46562+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46563@@ -297,7 +297,7 @@ struct lis3lv02d {
46564 struct input_polled_dev *idev; /* input device */
46565 struct platform_device *pdev; /* platform device */
46566 struct regulator_bulk_data regulators[2];
46567- atomic_t count; /* interrupt count after last read */
46568+ atomic_unchecked_t count; /* interrupt count after last read */
46569 union axis_conversion ac; /* hw -> logical axis */
46570 int mapped_btns[3];
46571
46572diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46573index 2f30bad..c4c13d0 100644
46574--- a/drivers/misc/sgi-gru/gruhandles.c
46575+++ b/drivers/misc/sgi-gru/gruhandles.c
46576@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46577 unsigned long nsec;
46578
46579 nsec = CLKS2NSEC(clks);
46580- atomic_long_inc(&mcs_op_statistics[op].count);
46581- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46582+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46583+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46584 if (mcs_op_statistics[op].max < nsec)
46585 mcs_op_statistics[op].max = nsec;
46586 }
46587diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46588index 4f76359..cdfcb2e 100644
46589--- a/drivers/misc/sgi-gru/gruprocfs.c
46590+++ b/drivers/misc/sgi-gru/gruprocfs.c
46591@@ -32,9 +32,9 @@
46592
46593 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46594
46595-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46596+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46597 {
46598- unsigned long val = atomic_long_read(v);
46599+ unsigned long val = atomic_long_read_unchecked(v);
46600
46601 seq_printf(s, "%16lu %s\n", val, id);
46602 }
46603@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46604
46605 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46606 for (op = 0; op < mcsop_last; op++) {
46607- count = atomic_long_read(&mcs_op_statistics[op].count);
46608- total = atomic_long_read(&mcs_op_statistics[op].total);
46609+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46610+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46611 max = mcs_op_statistics[op].max;
46612 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46613 count ? total / count : 0, max);
46614diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46615index 5c3ce24..4915ccb 100644
46616--- a/drivers/misc/sgi-gru/grutables.h
46617+++ b/drivers/misc/sgi-gru/grutables.h
46618@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46619 * GRU statistics.
46620 */
46621 struct gru_stats_s {
46622- atomic_long_t vdata_alloc;
46623- atomic_long_t vdata_free;
46624- atomic_long_t gts_alloc;
46625- atomic_long_t gts_free;
46626- atomic_long_t gms_alloc;
46627- atomic_long_t gms_free;
46628- atomic_long_t gts_double_allocate;
46629- atomic_long_t assign_context;
46630- atomic_long_t assign_context_failed;
46631- atomic_long_t free_context;
46632- atomic_long_t load_user_context;
46633- atomic_long_t load_kernel_context;
46634- atomic_long_t lock_kernel_context;
46635- atomic_long_t unlock_kernel_context;
46636- atomic_long_t steal_user_context;
46637- atomic_long_t steal_kernel_context;
46638- atomic_long_t steal_context_failed;
46639- atomic_long_t nopfn;
46640- atomic_long_t asid_new;
46641- atomic_long_t asid_next;
46642- atomic_long_t asid_wrap;
46643- atomic_long_t asid_reuse;
46644- atomic_long_t intr;
46645- atomic_long_t intr_cbr;
46646- atomic_long_t intr_tfh;
46647- atomic_long_t intr_spurious;
46648- atomic_long_t intr_mm_lock_failed;
46649- atomic_long_t call_os;
46650- atomic_long_t call_os_wait_queue;
46651- atomic_long_t user_flush_tlb;
46652- atomic_long_t user_unload_context;
46653- atomic_long_t user_exception;
46654- atomic_long_t set_context_option;
46655- atomic_long_t check_context_retarget_intr;
46656- atomic_long_t check_context_unload;
46657- atomic_long_t tlb_dropin;
46658- atomic_long_t tlb_preload_page;
46659- atomic_long_t tlb_dropin_fail_no_asid;
46660- atomic_long_t tlb_dropin_fail_upm;
46661- atomic_long_t tlb_dropin_fail_invalid;
46662- atomic_long_t tlb_dropin_fail_range_active;
46663- atomic_long_t tlb_dropin_fail_idle;
46664- atomic_long_t tlb_dropin_fail_fmm;
46665- atomic_long_t tlb_dropin_fail_no_exception;
46666- atomic_long_t tfh_stale_on_fault;
46667- atomic_long_t mmu_invalidate_range;
46668- atomic_long_t mmu_invalidate_page;
46669- atomic_long_t flush_tlb;
46670- atomic_long_t flush_tlb_gru;
46671- atomic_long_t flush_tlb_gru_tgh;
46672- atomic_long_t flush_tlb_gru_zero_asid;
46673+ atomic_long_unchecked_t vdata_alloc;
46674+ atomic_long_unchecked_t vdata_free;
46675+ atomic_long_unchecked_t gts_alloc;
46676+ atomic_long_unchecked_t gts_free;
46677+ atomic_long_unchecked_t gms_alloc;
46678+ atomic_long_unchecked_t gms_free;
46679+ atomic_long_unchecked_t gts_double_allocate;
46680+ atomic_long_unchecked_t assign_context;
46681+ atomic_long_unchecked_t assign_context_failed;
46682+ atomic_long_unchecked_t free_context;
46683+ atomic_long_unchecked_t load_user_context;
46684+ atomic_long_unchecked_t load_kernel_context;
46685+ atomic_long_unchecked_t lock_kernel_context;
46686+ atomic_long_unchecked_t unlock_kernel_context;
46687+ atomic_long_unchecked_t steal_user_context;
46688+ atomic_long_unchecked_t steal_kernel_context;
46689+ atomic_long_unchecked_t steal_context_failed;
46690+ atomic_long_unchecked_t nopfn;
46691+ atomic_long_unchecked_t asid_new;
46692+ atomic_long_unchecked_t asid_next;
46693+ atomic_long_unchecked_t asid_wrap;
46694+ atomic_long_unchecked_t asid_reuse;
46695+ atomic_long_unchecked_t intr;
46696+ atomic_long_unchecked_t intr_cbr;
46697+ atomic_long_unchecked_t intr_tfh;
46698+ atomic_long_unchecked_t intr_spurious;
46699+ atomic_long_unchecked_t intr_mm_lock_failed;
46700+ atomic_long_unchecked_t call_os;
46701+ atomic_long_unchecked_t call_os_wait_queue;
46702+ atomic_long_unchecked_t user_flush_tlb;
46703+ atomic_long_unchecked_t user_unload_context;
46704+ atomic_long_unchecked_t user_exception;
46705+ atomic_long_unchecked_t set_context_option;
46706+ atomic_long_unchecked_t check_context_retarget_intr;
46707+ atomic_long_unchecked_t check_context_unload;
46708+ atomic_long_unchecked_t tlb_dropin;
46709+ atomic_long_unchecked_t tlb_preload_page;
46710+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46711+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46712+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46713+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46714+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46715+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46716+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46717+ atomic_long_unchecked_t tfh_stale_on_fault;
46718+ atomic_long_unchecked_t mmu_invalidate_range;
46719+ atomic_long_unchecked_t mmu_invalidate_page;
46720+ atomic_long_unchecked_t flush_tlb;
46721+ atomic_long_unchecked_t flush_tlb_gru;
46722+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46723+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46724
46725- atomic_long_t copy_gpa;
46726- atomic_long_t read_gpa;
46727+ atomic_long_unchecked_t copy_gpa;
46728+ atomic_long_unchecked_t read_gpa;
46729
46730- atomic_long_t mesq_receive;
46731- atomic_long_t mesq_receive_none;
46732- atomic_long_t mesq_send;
46733- atomic_long_t mesq_send_failed;
46734- atomic_long_t mesq_noop;
46735- atomic_long_t mesq_send_unexpected_error;
46736- atomic_long_t mesq_send_lb_overflow;
46737- atomic_long_t mesq_send_qlimit_reached;
46738- atomic_long_t mesq_send_amo_nacked;
46739- atomic_long_t mesq_send_put_nacked;
46740- atomic_long_t mesq_page_overflow;
46741- atomic_long_t mesq_qf_locked;
46742- atomic_long_t mesq_qf_noop_not_full;
46743- atomic_long_t mesq_qf_switch_head_failed;
46744- atomic_long_t mesq_qf_unexpected_error;
46745- atomic_long_t mesq_noop_unexpected_error;
46746- atomic_long_t mesq_noop_lb_overflow;
46747- atomic_long_t mesq_noop_qlimit_reached;
46748- atomic_long_t mesq_noop_amo_nacked;
46749- atomic_long_t mesq_noop_put_nacked;
46750- atomic_long_t mesq_noop_page_overflow;
46751+ atomic_long_unchecked_t mesq_receive;
46752+ atomic_long_unchecked_t mesq_receive_none;
46753+ atomic_long_unchecked_t mesq_send;
46754+ atomic_long_unchecked_t mesq_send_failed;
46755+ atomic_long_unchecked_t mesq_noop;
46756+ atomic_long_unchecked_t mesq_send_unexpected_error;
46757+ atomic_long_unchecked_t mesq_send_lb_overflow;
46758+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46759+ atomic_long_unchecked_t mesq_send_amo_nacked;
46760+ atomic_long_unchecked_t mesq_send_put_nacked;
46761+ atomic_long_unchecked_t mesq_page_overflow;
46762+ atomic_long_unchecked_t mesq_qf_locked;
46763+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46764+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46765+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46766+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46767+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46768+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46769+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46770+ atomic_long_unchecked_t mesq_noop_put_nacked;
46771+ atomic_long_unchecked_t mesq_noop_page_overflow;
46772
46773 };
46774
46775@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46776 tghop_invalidate, mcsop_last};
46777
46778 struct mcs_op_statistic {
46779- atomic_long_t count;
46780- atomic_long_t total;
46781+ atomic_long_unchecked_t count;
46782+ atomic_long_unchecked_t total;
46783 unsigned long max;
46784 };
46785
46786@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46787
46788 #define STAT(id) do { \
46789 if (gru_options & OPT_STATS) \
46790- atomic_long_inc(&gru_stats.id); \
46791+ atomic_long_inc_unchecked(&gru_stats.id); \
46792 } while (0)
46793
46794 #ifdef CONFIG_SGI_GRU_DEBUG
46795diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46796index c862cd4..0d176fe 100644
46797--- a/drivers/misc/sgi-xp/xp.h
46798+++ b/drivers/misc/sgi-xp/xp.h
46799@@ -288,7 +288,7 @@ struct xpc_interface {
46800 xpc_notify_func, void *);
46801 void (*received) (short, int, void *);
46802 enum xp_retval (*partid_to_nasids) (short, void *);
46803-};
46804+} __no_const;
46805
46806 extern struct xpc_interface xpc_interface;
46807
46808diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46809index 01be66d..e3a0c7e 100644
46810--- a/drivers/misc/sgi-xp/xp_main.c
46811+++ b/drivers/misc/sgi-xp/xp_main.c
46812@@ -78,13 +78,13 @@ xpc_notloaded(void)
46813 }
46814
46815 struct xpc_interface xpc_interface = {
46816- (void (*)(int))xpc_notloaded,
46817- (void (*)(int))xpc_notloaded,
46818- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46819- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46820+ .connect = (void (*)(int))xpc_notloaded,
46821+ .disconnect = (void (*)(int))xpc_notloaded,
46822+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46823+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46824 void *))xpc_notloaded,
46825- (void (*)(short, int, void *))xpc_notloaded,
46826- (enum xp_retval(*)(short, void *))xpc_notloaded
46827+ .received = (void (*)(short, int, void *))xpc_notloaded,
46828+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46829 };
46830 EXPORT_SYMBOL_GPL(xpc_interface);
46831
46832diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46833index b94d5f7..7f494c5 100644
46834--- a/drivers/misc/sgi-xp/xpc.h
46835+++ b/drivers/misc/sgi-xp/xpc.h
46836@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46837 void (*received_payload) (struct xpc_channel *, void *);
46838 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46839 };
46840+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46841
46842 /* struct xpc_partition act_state values (for XPC HB) */
46843
46844@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46845 /* found in xpc_main.c */
46846 extern struct device *xpc_part;
46847 extern struct device *xpc_chan;
46848-extern struct xpc_arch_operations xpc_arch_ops;
46849+extern xpc_arch_operations_no_const xpc_arch_ops;
46850 extern int xpc_disengage_timelimit;
46851 extern int xpc_disengage_timedout;
46852 extern int xpc_activate_IRQ_rcvd;
46853diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46854index 82dc574..8539ab2 100644
46855--- a/drivers/misc/sgi-xp/xpc_main.c
46856+++ b/drivers/misc/sgi-xp/xpc_main.c
46857@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46858 .notifier_call = xpc_system_die,
46859 };
46860
46861-struct xpc_arch_operations xpc_arch_ops;
46862+xpc_arch_operations_no_const xpc_arch_ops;
46863
46864 /*
46865 * Timer function to enforce the timelimit on the partition disengage.
46866@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46867
46868 if (((die_args->trapnr == X86_TRAP_MF) ||
46869 (die_args->trapnr == X86_TRAP_XF)) &&
46870- !user_mode_vm(die_args->regs))
46871+ !user_mode(die_args->regs))
46872 xpc_die_deactivate();
46873
46874 break;
46875diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46876index c69afb5..5c2d0f5 100644
46877--- a/drivers/mmc/card/block.c
46878+++ b/drivers/mmc/card/block.c
46879@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46880 if (idata->ic.postsleep_min_us)
46881 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46882
46883- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46884+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46885 err = -EFAULT;
46886 goto cmd_rel_host;
46887 }
46888diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46889index 18c4afe..43be71e 100644
46890--- a/drivers/mmc/host/dw_mmc.h
46891+++ b/drivers/mmc/host/dw_mmc.h
46892@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
46893 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
46894 int (*parse_dt)(struct dw_mci *host);
46895 int (*execute_tuning)(struct dw_mci_slot *slot);
46896-};
46897+} __do_const;
46898 #endif /* _DW_MMC_H_ */
46899diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46900index 7fe1619..ae0781b 100644
46901--- a/drivers/mmc/host/mmci.c
46902+++ b/drivers/mmc/host/mmci.c
46903@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
46904 mmc->caps |= MMC_CAP_CMD23;
46905
46906 if (variant->busy_detect) {
46907- mmci_ops.card_busy = mmci_card_busy;
46908+ pax_open_kernel();
46909+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46910+ pax_close_kernel();
46911 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46912 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46913 mmc->max_busy_timeout = 0;
46914diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46915index f84cfb0..aebe5d6 100644
46916--- a/drivers/mmc/host/omap_hsmmc.c
46917+++ b/drivers/mmc/host/omap_hsmmc.c
46918@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46919
46920 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46921 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46922- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46923+ pax_open_kernel();
46924+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46925+ pax_close_kernel();
46926 }
46927
46928 pm_runtime_enable(host->dev);
46929diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46930index 10ef824..88461a2 100644
46931--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46932+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46933@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46934 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46935 }
46936
46937- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46938- sdhci_esdhc_ops.platform_execute_tuning =
46939+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46940+ pax_open_kernel();
46941+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46942 esdhc_executing_tuning;
46943+ pax_close_kernel();
46944+ }
46945
46946 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46947 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46948diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46949index c6d2dd7..81b1ca3 100644
46950--- a/drivers/mmc/host/sdhci-s3c.c
46951+++ b/drivers/mmc/host/sdhci-s3c.c
46952@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46953 * we can use overriding functions instead of default.
46954 */
46955 if (sc->no_divider) {
46956- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46957- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46958- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46959+ pax_open_kernel();
46960+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46961+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46962+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46963+ pax_close_kernel();
46964 }
46965
46966 /* It supports additional host capabilities if needed */
46967diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46968index 423666b..81ff5eb 100644
46969--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46970+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46971@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46972 size_t totlen = 0, thislen;
46973 int ret = 0;
46974 size_t buflen = 0;
46975- static char *buffer;
46976+ char *buffer;
46977
46978 if (!ECCBUF_SIZE) {
46979 /* We should fall back to a general writev implementation.
46980diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46981index f44c606..aa4e804 100644
46982--- a/drivers/mtd/nand/denali.c
46983+++ b/drivers/mtd/nand/denali.c
46984@@ -24,6 +24,7 @@
46985 #include <linux/slab.h>
46986 #include <linux/mtd/mtd.h>
46987 #include <linux/module.h>
46988+#include <linux/slab.h>
46989
46990 #include "denali.h"
46991
46992diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46993index 33f3c3c..d6bbe6a 100644
46994--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46995+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46996@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46997
46998 /* first try to map the upper buffer directly */
46999 if (virt_addr_valid(this->upper_buf) &&
47000- !object_is_on_stack(this->upper_buf)) {
47001+ !object_starts_on_stack(this->upper_buf)) {
47002 sg_init_one(sgl, this->upper_buf, this->upper_len);
47003 ret = dma_map_sg(this->dev, sgl, 1, dr);
47004 if (ret == 0)
47005diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47006index a5dfbfb..8042ab4 100644
47007--- a/drivers/mtd/nftlmount.c
47008+++ b/drivers/mtd/nftlmount.c
47009@@ -24,6 +24,7 @@
47010 #include <asm/errno.h>
47011 #include <linux/delay.h>
47012 #include <linux/slab.h>
47013+#include <linux/sched.h>
47014 #include <linux/mtd/mtd.h>
47015 #include <linux/mtd/nand.h>
47016 #include <linux/mtd/nftl.h>
47017diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47018index c23184a..4115c41 100644
47019--- a/drivers/mtd/sm_ftl.c
47020+++ b/drivers/mtd/sm_ftl.c
47021@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47022 #define SM_CIS_VENDOR_OFFSET 0x59
47023 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47024 {
47025- struct attribute_group *attr_group;
47026+ attribute_group_no_const *attr_group;
47027 struct attribute **attributes;
47028 struct sm_sysfs_attribute *vendor_attribute;
47029 char *vendor;
47030diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47031index 7b11243..b3278a3 100644
47032--- a/drivers/net/bonding/bond_netlink.c
47033+++ b/drivers/net/bonding/bond_netlink.c
47034@@ -585,7 +585,7 @@ nla_put_failure:
47035 return -EMSGSIZE;
47036 }
47037
47038-struct rtnl_link_ops bond_link_ops __read_mostly = {
47039+struct rtnl_link_ops bond_link_ops = {
47040 .kind = "bond",
47041 .priv_size = sizeof(struct bonding),
47042 .setup = bond_setup,
47043diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47044index b3b922a..80bba38 100644
47045--- a/drivers/net/caif/caif_hsi.c
47046+++ b/drivers/net/caif/caif_hsi.c
47047@@ -1444,7 +1444,7 @@ err:
47048 return -ENODEV;
47049 }
47050
47051-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47052+static struct rtnl_link_ops caif_hsi_link_ops = {
47053 .kind = "cfhsi",
47054 .priv_size = sizeof(struct cfhsi),
47055 .setup = cfhsi_setup,
47056diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47057index 58808f65..0bdc7b3 100644
47058--- a/drivers/net/can/Kconfig
47059+++ b/drivers/net/can/Kconfig
47060@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47061
47062 config CAN_FLEXCAN
47063 tristate "Support for Freescale FLEXCAN based chips"
47064- depends on ARM || PPC
47065+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47066 ---help---
47067 Say Y here if you want to support for Freescale FlexCAN.
47068
47069diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47070index b0f6924..59e9640 100644
47071--- a/drivers/net/can/dev.c
47072+++ b/drivers/net/can/dev.c
47073@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47074 return -EOPNOTSUPP;
47075 }
47076
47077-static struct rtnl_link_ops can_link_ops __read_mostly = {
47078+static struct rtnl_link_ops can_link_ops = {
47079 .kind = "can",
47080 .maxtype = IFLA_CAN_MAX,
47081 .policy = can_policy,
47082diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47083index 674f367..ec3a31f 100644
47084--- a/drivers/net/can/vcan.c
47085+++ b/drivers/net/can/vcan.c
47086@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47087 dev->destructor = free_netdev;
47088 }
47089
47090-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47091+static struct rtnl_link_ops vcan_link_ops = {
47092 .kind = "vcan",
47093 .setup = vcan_setup,
47094 };
47095diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47096index 49adbf1..fff7ff8 100644
47097--- a/drivers/net/dummy.c
47098+++ b/drivers/net/dummy.c
47099@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47100 return 0;
47101 }
47102
47103-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47104+static struct rtnl_link_ops dummy_link_ops = {
47105 .kind = DRV_NAME,
47106 .setup = dummy_setup,
47107 .validate = dummy_validate,
47108diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47109index 0443654..4f0aa18 100644
47110--- a/drivers/net/ethernet/8390/ax88796.c
47111+++ b/drivers/net/ethernet/8390/ax88796.c
47112@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47113 if (ax->plat->reg_offsets)
47114 ei_local->reg_offset = ax->plat->reg_offsets;
47115 else {
47116+ resource_size_t _mem_size = mem_size;
47117+ do_div(_mem_size, 0x18);
47118 ei_local->reg_offset = ax->reg_offsets;
47119 for (ret = 0; ret < 0x18; ret++)
47120- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47121+ ax->reg_offsets[ret] = _mem_size * ret;
47122 }
47123
47124 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47125diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47126index 6725dc0..163549c 100644
47127--- a/drivers/net/ethernet/altera/altera_tse_main.c
47128+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47129@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
47130 return 0;
47131 }
47132
47133-static struct net_device_ops altera_tse_netdev_ops = {
47134+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47135 .ndo_open = tse_open,
47136 .ndo_stop = tse_shutdown,
47137 .ndo_start_xmit = tse_start_xmit,
47138@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47139 ndev->netdev_ops = &altera_tse_netdev_ops;
47140 altera_tse_set_ethtool_ops(ndev);
47141
47142+ pax_open_kernel();
47143 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47144
47145 if (priv->hash_filter)
47146 altera_tse_netdev_ops.ndo_set_rx_mode =
47147 tse_set_rx_mode_hashfilter;
47148+ pax_close_kernel();
47149
47150 /* Scatter/gather IO is not supported,
47151 * so it is turned off
47152diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47153index 29a0927..5a348e24 100644
47154--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47155+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47156@@ -1122,14 +1122,14 @@ do { \
47157 * operations, everything works on mask values.
47158 */
47159 #define XMDIO_READ(_pdata, _mmd, _reg) \
47160- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47161+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47162 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47163
47164 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47165 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47166
47167 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47168- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47169+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47170 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47171
47172 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47173diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47174index 8a50b01..39c1ad0 100644
47175--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47176+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47177@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47178
47179 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47180
47181- pdata->hw_if.config_dcb_tc(pdata);
47182+ pdata->hw_if->config_dcb_tc(pdata);
47183
47184 return 0;
47185 }
47186@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47187
47188 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47189
47190- pdata->hw_if.config_dcb_pfc(pdata);
47191+ pdata->hw_if->config_dcb_pfc(pdata);
47192
47193 return 0;
47194 }
47195diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47196index d81fc6b..6f8ab25 100644
47197--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47198+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47199@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47200
47201 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47202 {
47203- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47204+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47205 struct xgbe_channel *channel;
47206 struct xgbe_ring *ring;
47207 struct xgbe_ring_data *rdata;
47208@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47209
47210 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47211 {
47212- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47213+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47214 struct xgbe_channel *channel;
47215 struct xgbe_ring *ring;
47216 struct xgbe_ring_desc *rdesc;
47217@@ -620,17 +620,12 @@ err_out:
47218 return 0;
47219 }
47220
47221-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47222-{
47223- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47224-
47225- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47226- desc_if->free_ring_resources = xgbe_free_ring_resources;
47227- desc_if->map_tx_skb = xgbe_map_tx_skb;
47228- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
47229- desc_if->unmap_rdata = xgbe_unmap_rdata;
47230- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47231- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47232-
47233- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47234-}
47235+const struct xgbe_desc_if default_xgbe_desc_if = {
47236+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47237+ .free_ring_resources = xgbe_free_ring_resources,
47238+ .map_tx_skb = xgbe_map_tx_skb,
47239+ .map_rx_buffer = xgbe_map_rx_buffer,
47240+ .unmap_rdata = xgbe_unmap_rdata,
47241+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47242+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47243+};
47244diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47245index 400757b..d8c53f6 100644
47246--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47247+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47248@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47249
47250 static int xgbe_init(struct xgbe_prv_data *pdata)
47251 {
47252- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47253+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47254 int ret;
47255
47256 DBGPR("-->xgbe_init\n");
47257@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47258 return 0;
47259 }
47260
47261-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47262-{
47263- DBGPR("-->xgbe_init_function_ptrs\n");
47264-
47265- hw_if->tx_complete = xgbe_tx_complete;
47266-
47267- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47268- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47269- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47270- hw_if->set_mac_address = xgbe_set_mac_address;
47271-
47272- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47273- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47274-
47275- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47276- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47277- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47278- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47279- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47280-
47281- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47282- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47283-
47284- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47285- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47286- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47287-
47288- hw_if->enable_tx = xgbe_enable_tx;
47289- hw_if->disable_tx = xgbe_disable_tx;
47290- hw_if->enable_rx = xgbe_enable_rx;
47291- hw_if->disable_rx = xgbe_disable_rx;
47292-
47293- hw_if->powerup_tx = xgbe_powerup_tx;
47294- hw_if->powerdown_tx = xgbe_powerdown_tx;
47295- hw_if->powerup_rx = xgbe_powerup_rx;
47296- hw_if->powerdown_rx = xgbe_powerdown_rx;
47297-
47298- hw_if->dev_xmit = xgbe_dev_xmit;
47299- hw_if->dev_read = xgbe_dev_read;
47300- hw_if->enable_int = xgbe_enable_int;
47301- hw_if->disable_int = xgbe_disable_int;
47302- hw_if->init = xgbe_init;
47303- hw_if->exit = xgbe_exit;
47304+const struct xgbe_hw_if default_xgbe_hw_if = {
47305+ .tx_complete = xgbe_tx_complete,
47306+
47307+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47308+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47309+ .add_mac_addresses = xgbe_add_mac_addresses,
47310+ .set_mac_address = xgbe_set_mac_address,
47311+
47312+ .enable_rx_csum = xgbe_enable_rx_csum,
47313+ .disable_rx_csum = xgbe_disable_rx_csum,
47314+
47315+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47316+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47317+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47318+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47319+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47320+
47321+ .read_mmd_regs = xgbe_read_mmd_regs,
47322+ .write_mmd_regs = xgbe_write_mmd_regs,
47323+
47324+ .set_gmii_speed = xgbe_set_gmii_speed,
47325+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47326+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47327+
47328+ .enable_tx = xgbe_enable_tx,
47329+ .disable_tx = xgbe_disable_tx,
47330+ .enable_rx = xgbe_enable_rx,
47331+ .disable_rx = xgbe_disable_rx,
47332+
47333+ .powerup_tx = xgbe_powerup_tx,
47334+ .powerdown_tx = xgbe_powerdown_tx,
47335+ .powerup_rx = xgbe_powerup_rx,
47336+ .powerdown_rx = xgbe_powerdown_rx,
47337+
47338+ .dev_xmit = xgbe_dev_xmit,
47339+ .dev_read = xgbe_dev_read,
47340+ .enable_int = xgbe_enable_int,
47341+ .disable_int = xgbe_disable_int,
47342+ .init = xgbe_init,
47343+ .exit = xgbe_exit,
47344
47345 /* Descriptor related Sequences have to be initialized here */
47346- hw_if->tx_desc_init = xgbe_tx_desc_init;
47347- hw_if->rx_desc_init = xgbe_rx_desc_init;
47348- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47349- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47350- hw_if->is_last_desc = xgbe_is_last_desc;
47351- hw_if->is_context_desc = xgbe_is_context_desc;
47352- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47353+ .tx_desc_init = xgbe_tx_desc_init,
47354+ .rx_desc_init = xgbe_rx_desc_init,
47355+ .tx_desc_reset = xgbe_tx_desc_reset,
47356+ .rx_desc_reset = xgbe_rx_desc_reset,
47357+ .is_last_desc = xgbe_is_last_desc,
47358+ .is_context_desc = xgbe_is_context_desc,
47359+ .tx_start_xmit = xgbe_tx_start_xmit,
47360
47361 /* For FLOW ctrl */
47362- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47363- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47364+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47365+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47366
47367 /* For RX coalescing */
47368- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47369- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47370- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47371- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47372+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47373+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47374+ .usec_to_riwt = xgbe_usec_to_riwt,
47375+ .riwt_to_usec = xgbe_riwt_to_usec,
47376
47377 /* For RX and TX threshold config */
47378- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47379- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47380+ .config_rx_threshold = xgbe_config_rx_threshold,
47381+ .config_tx_threshold = xgbe_config_tx_threshold,
47382
47383 /* For RX and TX Store and Forward Mode config */
47384- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47385- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47386+ .config_rsf_mode = xgbe_config_rsf_mode,
47387+ .config_tsf_mode = xgbe_config_tsf_mode,
47388
47389 /* For TX DMA Operating on Second Frame config */
47390- hw_if->config_osp_mode = xgbe_config_osp_mode;
47391+ .config_osp_mode = xgbe_config_osp_mode,
47392
47393 /* For RX and TX PBL config */
47394- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47395- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47396- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47397- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47398- hw_if->config_pblx8 = xgbe_config_pblx8;
47399+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47400+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47401+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47402+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47403+ .config_pblx8 = xgbe_config_pblx8,
47404
47405 /* For MMC statistics support */
47406- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47407- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47408- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47409+ .tx_mmc_int = xgbe_tx_mmc_int,
47410+ .rx_mmc_int = xgbe_rx_mmc_int,
47411+ .read_mmc_stats = xgbe_read_mmc_stats,
47412
47413 /* For PTP config */
47414- hw_if->config_tstamp = xgbe_config_tstamp;
47415- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47416- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47417- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47418- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47419+ .config_tstamp = xgbe_config_tstamp,
47420+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47421+ .set_tstamp_time = xgbe_set_tstamp_time,
47422+ .get_tstamp_time = xgbe_get_tstamp_time,
47423+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47424
47425 /* For Data Center Bridging config */
47426- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47427- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47428+ .config_dcb_tc = xgbe_config_dcb_tc,
47429+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47430
47431 /* For Receive Side Scaling */
47432- hw_if->enable_rss = xgbe_enable_rss;
47433- hw_if->disable_rss = xgbe_disable_rss;
47434- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47435- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47436-
47437- DBGPR("<--xgbe_init_function_ptrs\n");
47438-}
47439+ .enable_rss = xgbe_enable_rss,
47440+ .disable_rss = xgbe_disable_rss,
47441+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47442+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47443+};
47444diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47445index 885b02b..4b31a4c 100644
47446--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47447+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47448@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47449 * support, tell it now
47450 */
47451 if (ring->tx.xmit_more)
47452- pdata->hw_if.tx_start_xmit(channel, ring);
47453+ pdata->hw_if->tx_start_xmit(channel, ring);
47454
47455 return NETDEV_TX_BUSY;
47456 }
47457@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47458
47459 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47460 {
47461- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47462+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47463 struct xgbe_channel *channel;
47464 enum xgbe_int int_id;
47465 unsigned int i;
47466@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47467
47468 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47469 {
47470- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47471+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47472 struct xgbe_channel *channel;
47473 enum xgbe_int int_id;
47474 unsigned int i;
47475@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47476 static irqreturn_t xgbe_isr(int irq, void *data)
47477 {
47478 struct xgbe_prv_data *pdata = data;
47479- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47480+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47481 struct xgbe_channel *channel;
47482 unsigned int dma_isr, dma_ch_isr;
47483 unsigned int mac_isr, mac_tssr;
47484@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
47485
47486 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47487 {
47488- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47489+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47490
47491 DBGPR("-->xgbe_init_tx_coalesce\n");
47492
47493@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47494
47495 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47496 {
47497- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47498+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47499
47500 DBGPR("-->xgbe_init_rx_coalesce\n");
47501
47502@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47503
47504 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47505 {
47506- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47507+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47508 struct xgbe_channel *channel;
47509 struct xgbe_ring *ring;
47510 struct xgbe_ring_data *rdata;
47511@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47512
47513 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47514 {
47515- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47516+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47517 struct xgbe_channel *channel;
47518 struct xgbe_ring *ring;
47519 struct xgbe_ring_data *rdata;
47520@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47521 static void xgbe_adjust_link(struct net_device *netdev)
47522 {
47523 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47524- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47525+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47526 struct phy_device *phydev = pdata->phydev;
47527 int new_state = 0;
47528
47529@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47530 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47531 {
47532 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47533- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47534+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47535 unsigned long flags;
47536
47537 DBGPR("-->xgbe_powerdown\n");
47538@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47539 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47540 {
47541 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47542- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47543+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47544 unsigned long flags;
47545
47546 DBGPR("-->xgbe_powerup\n");
47547@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47548
47549 static int xgbe_start(struct xgbe_prv_data *pdata)
47550 {
47551- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47552+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47553 struct net_device *netdev = pdata->netdev;
47554 int ret;
47555
47556@@ -976,7 +976,7 @@ err_napi:
47557
47558 static void xgbe_stop(struct xgbe_prv_data *pdata)
47559 {
47560- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47561+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47562 struct xgbe_channel *channel;
47563 struct net_device *netdev = pdata->netdev;
47564 struct netdev_queue *txq;
47565@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47566 return -ERANGE;
47567 }
47568
47569- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47570+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47571
47572 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47573
47574@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47575 static int xgbe_open(struct net_device *netdev)
47576 {
47577 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47578- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47579+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47580 int ret;
47581
47582 DBGPR("-->xgbe_open\n");
47583@@ -1424,7 +1424,7 @@ err_phy_init:
47584 static int xgbe_close(struct net_device *netdev)
47585 {
47586 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47587- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47588+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47589
47590 DBGPR("-->xgbe_close\n");
47591
47592@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
47593 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47594 {
47595 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47596- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47597- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47598+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47599+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47600 struct xgbe_channel *channel;
47601 struct xgbe_ring *ring;
47602 struct xgbe_packet_data *packet;
47603@@ -1521,7 +1521,7 @@ tx_netdev_return:
47604 static void xgbe_set_rx_mode(struct net_device *netdev)
47605 {
47606 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47607- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47608+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47609 unsigned int pr_mode, am_mode;
47610
47611 DBGPR("-->xgbe_set_rx_mode\n");
47612@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47613 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47614 {
47615 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47616- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47617+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47618 struct sockaddr *saddr = addr;
47619
47620 DBGPR("-->xgbe_set_mac_address\n");
47621@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47622
47623 DBGPR("-->%s\n", __func__);
47624
47625- pdata->hw_if.read_mmc_stats(pdata);
47626+ pdata->hw_if->read_mmc_stats(pdata);
47627
47628 s->rx_packets = pstats->rxframecount_gb;
47629 s->rx_bytes = pstats->rxoctetcount_gb;
47630@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47631 u16 vid)
47632 {
47633 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47634- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47635+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47636
47637 DBGPR("-->%s\n", __func__);
47638
47639@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47640 u16 vid)
47641 {
47642 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47643- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47644+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47645
47646 DBGPR("-->%s\n", __func__);
47647
47648@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
47649 netdev_features_t features)
47650 {
47651 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47652- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47653+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47654 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47655 int ret = 0;
47656
47657@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47658 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47659 {
47660 struct xgbe_prv_data *pdata = channel->pdata;
47661- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47662- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47663+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47664+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47665 struct xgbe_ring *ring = channel->rx_ring;
47666 struct xgbe_ring_data *rdata;
47667
47668@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47669 static int xgbe_tx_poll(struct xgbe_channel *channel)
47670 {
47671 struct xgbe_prv_data *pdata = channel->pdata;
47672- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47673- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47674+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47675+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47676 struct xgbe_ring *ring = channel->tx_ring;
47677 struct xgbe_ring_data *rdata;
47678 struct xgbe_ring_desc *rdesc;
47679@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
47680 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47681 {
47682 struct xgbe_prv_data *pdata = channel->pdata;
47683- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47684+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47685 struct xgbe_ring *ring = channel->rx_ring;
47686 struct xgbe_ring_data *rdata;
47687 struct xgbe_packet_data *packet;
47688diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47689index ebf4893..a8f51c6 100644
47690--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47691+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47692@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47693
47694 DBGPR("-->%s\n", __func__);
47695
47696- pdata->hw_if.read_mmc_stats(pdata);
47697+ pdata->hw_if->read_mmc_stats(pdata);
47698 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47699 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47700 *data++ = *(u64 *)stat;
47701@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47702 struct ethtool_coalesce *ec)
47703 {
47704 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47705- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47706+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47707 unsigned int riwt;
47708
47709 DBGPR("-->xgbe_get_coalesce\n");
47710@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47711 struct ethtool_coalesce *ec)
47712 {
47713 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47714- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47715+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47716 unsigned int rx_frames, rx_riwt, rx_usecs;
47717 unsigned int tx_frames, tx_usecs;
47718
47719@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
47720 const u8 *key, const u8 hfunc)
47721 {
47722 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47723- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47724+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47725 unsigned int ret;
47726
47727 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
47728diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47729index 32dd651..225cca3 100644
47730--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47731+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47732@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47733 DBGPR("<--xgbe_default_config\n");
47734 }
47735
47736-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47737-{
47738- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47739- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47740-}
47741-
47742 #ifdef CONFIG_ACPI
47743 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
47744 {
47745@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
47746 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
47747
47748 /* Set all the function pointers */
47749- xgbe_init_all_fptrs(pdata);
47750- hw_if = &pdata->hw_if;
47751- desc_if = &pdata->desc_if;
47752+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47753+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47754
47755 /* Issue software reset to device */
47756 hw_if->exit(pdata);
47757diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47758index 59e267f..0842a88 100644
47759--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47760+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47761@@ -126,7 +126,7 @@
47762 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47763 {
47764 struct xgbe_prv_data *pdata = mii->priv;
47765- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47766+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47767 int mmd_data;
47768
47769 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47770@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47771 u16 mmd_val)
47772 {
47773 struct xgbe_prv_data *pdata = mii->priv;
47774- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47775+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47776 int mmd_data = mmd_val;
47777
47778 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47779diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47780index f326178..8bd7daf 100644
47781--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47782+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47783@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47784 tstamp_cc);
47785 u64 nsec;
47786
47787- nsec = pdata->hw_if.get_tstamp_time(pdata);
47788+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47789
47790 return nsec;
47791 }
47792@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47793
47794 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47795
47796- pdata->hw_if.update_tstamp_addend(pdata, addend);
47797+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47798
47799 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47800
47801diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47802index 13e8f95..1d8beef 100644
47803--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47804+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47805@@ -675,8 +675,8 @@ struct xgbe_prv_data {
47806 int dev_irq;
47807 unsigned int per_channel_irq;
47808
47809- struct xgbe_hw_if hw_if;
47810- struct xgbe_desc_if desc_if;
47811+ struct xgbe_hw_if *hw_if;
47812+ struct xgbe_desc_if *desc_if;
47813
47814 /* AXI DMA settings */
47815 unsigned int coherent;
47816@@ -798,6 +798,9 @@ struct xgbe_prv_data {
47817 #endif
47818 };
47819
47820+extern const struct xgbe_hw_if default_xgbe_hw_if;
47821+extern const struct xgbe_desc_if default_xgbe_desc_if;
47822+
47823 /* Function prototypes*/
47824
47825 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47826diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47827index adcacda..fa6e0ae 100644
47828--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47829+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47830@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47831 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47832 {
47833 /* RX_MODE controlling object */
47834- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47835+ bnx2x_init_rx_mode_obj(bp);
47836
47837 /* multicast configuration controlling object */
47838 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47839diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47840index 07cdf9b..b08ecc7 100644
47841--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47842+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47843@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47844 return rc;
47845 }
47846
47847-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47848- struct bnx2x_rx_mode_obj *o)
47849+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47850 {
47851 if (CHIP_IS_E1x(bp)) {
47852- o->wait_comp = bnx2x_empty_rx_mode_wait;
47853- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47854+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47855+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47856 } else {
47857- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47858- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47859+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47860+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47861 }
47862 }
47863
47864diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47865index 86baecb..ff3bb46 100644
47866--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47867+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47868@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47869
47870 /********************* RX MODE ****************/
47871
47872-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47873- struct bnx2x_rx_mode_obj *o);
47874+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47875
47876 /**
47877 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47878diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47879index 31c9f82..e65e986 100644
47880--- a/drivers/net/ethernet/broadcom/tg3.h
47881+++ b/drivers/net/ethernet/broadcom/tg3.h
47882@@ -150,6 +150,7 @@
47883 #define CHIPREV_ID_5750_A0 0x4000
47884 #define CHIPREV_ID_5750_A1 0x4001
47885 #define CHIPREV_ID_5750_A3 0x4003
47886+#define CHIPREV_ID_5750_C1 0x4201
47887 #define CHIPREV_ID_5750_C2 0x4202
47888 #define CHIPREV_ID_5752_A0_HW 0x5000
47889 #define CHIPREV_ID_5752_A0 0x6000
47890diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47891index 903466e..b285864 100644
47892--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47893+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47894@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47895 }
47896
47897 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47898- bna_cb_ioceth_enable,
47899- bna_cb_ioceth_disable,
47900- bna_cb_ioceth_hbfail,
47901- bna_cb_ioceth_reset
47902+ .enable_cbfn = bna_cb_ioceth_enable,
47903+ .disable_cbfn = bna_cb_ioceth_disable,
47904+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47905+ .reset_cbfn = bna_cb_ioceth_reset
47906 };
47907
47908 static void bna_attr_init(struct bna_ioceth *ioceth)
47909diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47910index 8cffcdf..aadf043 100644
47911--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47912+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47913@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47914 */
47915 struct l2t_skb_cb {
47916 arp_failure_handler_func arp_failure_handler;
47917-};
47918+} __no_const;
47919
47920 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47921
47922diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47923index d929951..a2c23f5 100644
47924--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47925+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47926@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47927
47928 int i;
47929 struct adapter *ap = netdev2adap(dev);
47930- static const unsigned int *reg_ranges;
47931+ const unsigned int *reg_ranges;
47932 int arr_size = 0, buf_size = 0;
47933
47934 if (is_t4(ap->params.chip)) {
47935diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
47936index badff18..e15c4ec 100644
47937--- a/drivers/net/ethernet/dec/tulip/de4x5.c
47938+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
47939@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47940 for (i=0; i<ETH_ALEN; i++) {
47941 tmp.addr[i] = dev->dev_addr[i];
47942 }
47943- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47944+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
47945 break;
47946
47947 case DE4X5_SET_HWADDR: /* Set the hardware address */
47948@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
47949 spin_lock_irqsave(&lp->lock, flags);
47950 memcpy(&statbuf, &lp->pktStats, ioc->len);
47951 spin_unlock_irqrestore(&lp->lock, flags);
47952- if (copy_to_user(ioc->data, &statbuf, ioc->len))
47953+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
47954 return -EFAULT;
47955 break;
47956 }
47957diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
47958index e6b790f..051ba2d 100644
47959--- a/drivers/net/ethernet/emulex/benet/be_main.c
47960+++ b/drivers/net/ethernet/emulex/benet/be_main.c
47961@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
47962
47963 if (wrapped)
47964 newacc += 65536;
47965- ACCESS_ONCE(*acc) = newacc;
47966+ ACCESS_ONCE_RW(*acc) = newacc;
47967 }
47968
47969 static void populate_erx_stats(struct be_adapter *adapter,
47970diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
47971index 6d0c5d5..55be363 100644
47972--- a/drivers/net/ethernet/faraday/ftgmac100.c
47973+++ b/drivers/net/ethernet/faraday/ftgmac100.c
47974@@ -30,6 +30,8 @@
47975 #include <linux/netdevice.h>
47976 #include <linux/phy.h>
47977 #include <linux/platform_device.h>
47978+#include <linux/interrupt.h>
47979+#include <linux/irqreturn.h>
47980 #include <net/ip.h>
47981
47982 #include "ftgmac100.h"
47983diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
47984index dce5f7b..2433466 100644
47985--- a/drivers/net/ethernet/faraday/ftmac100.c
47986+++ b/drivers/net/ethernet/faraday/ftmac100.c
47987@@ -31,6 +31,8 @@
47988 #include <linux/module.h>
47989 #include <linux/netdevice.h>
47990 #include <linux/platform_device.h>
47991+#include <linux/interrupt.h>
47992+#include <linux/irqreturn.h>
47993
47994 #include "ftmac100.h"
47995
47996diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47997index fabcfa1..188fd22 100644
47998--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
47999+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48000@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48001 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48002
48003 /* Update the base adjustement value. */
48004- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48005+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48006 smp_mb(); /* Force the above update. */
48007 }
48008
48009diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48010index 79c00f5..8da39f6 100644
48011--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48012+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48013@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48014 }
48015
48016 /* update the base incval used to calculate frequency adjustment */
48017- ACCESS_ONCE(adapter->base_incval) = incval;
48018+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48019 smp_mb();
48020
48021 /* need lock to prevent incorrect read while modifying cyclecounter */
48022diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48023index 55f9f5c..18cc64b 100644
48024--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48025+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48026@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48027 wmb();
48028
48029 /* we want to dirty this cache line once */
48030- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48031- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48032+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48033+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48034
48035 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48036
48037diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48038index 6223930..975033d 100644
48039--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48040+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48041@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48042 struct __vxge_hw_fifo *fifo;
48043 struct vxge_hw_fifo_config *config;
48044 u32 txdl_size, txdl_per_memblock;
48045- struct vxge_hw_mempool_cbs fifo_mp_callback;
48046+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48047+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48048+ };
48049+
48050 struct __vxge_hw_virtualpath *vpath;
48051
48052 if ((vp == NULL) || (attr == NULL)) {
48053@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48054 goto exit;
48055 }
48056
48057- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48058-
48059 fifo->mempool =
48060 __vxge_hw_mempool_create(vpath->hldev,
48061 fifo->config->memblock_size,
48062diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48063index 2bb48d5..d1a865d 100644
48064--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48065+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48066@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48067 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48068 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48069 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48070- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48071+ pax_open_kernel();
48072+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48073+ pax_close_kernel();
48074 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48075 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48076 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48077diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48078index be7d7a6..a8983f8 100644
48079--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48080+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48081@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48082 case QLCNIC_NON_PRIV_FUNC:
48083 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48084 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48085- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48086+ pax_open_kernel();
48087+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48088+ pax_close_kernel();
48089 break;
48090 case QLCNIC_PRIV_FUNC:
48091 ahw->op_mode = QLCNIC_PRIV_FUNC;
48092 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48093- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48094+ pax_open_kernel();
48095+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48096+ pax_close_kernel();
48097 break;
48098 case QLCNIC_MGMT_FUNC:
48099 ahw->op_mode = QLCNIC_MGMT_FUNC;
48100 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48101- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48102+ pax_open_kernel();
48103+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48104+ pax_close_kernel();
48105 break;
48106 default:
48107 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48108diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48109index 332bb8a..e6adcd1 100644
48110--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48111+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48112@@ -1285,7 +1285,7 @@ flash_temp:
48113 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48114 {
48115 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48116- static const struct qlcnic_dump_operations *fw_dump_ops;
48117+ const struct qlcnic_dump_operations *fw_dump_ops;
48118 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48119 u32 entry_offset, dump, no_entries, buf_offset = 0;
48120 int i, k, ops_cnt, ops_index, dump_size = 0;
48121diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48122index c70ab40..00b28e0 100644
48123--- a/drivers/net/ethernet/realtek/r8169.c
48124+++ b/drivers/net/ethernet/realtek/r8169.c
48125@@ -788,22 +788,22 @@ struct rtl8169_private {
48126 struct mdio_ops {
48127 void (*write)(struct rtl8169_private *, int, int);
48128 int (*read)(struct rtl8169_private *, int);
48129- } mdio_ops;
48130+ } __no_const mdio_ops;
48131
48132 struct pll_power_ops {
48133 void (*down)(struct rtl8169_private *);
48134 void (*up)(struct rtl8169_private *);
48135- } pll_power_ops;
48136+ } __no_const pll_power_ops;
48137
48138 struct jumbo_ops {
48139 void (*enable)(struct rtl8169_private *);
48140 void (*disable)(struct rtl8169_private *);
48141- } jumbo_ops;
48142+ } __no_const jumbo_ops;
48143
48144 struct csi_ops {
48145 void (*write)(struct rtl8169_private *, int, int);
48146 u32 (*read)(struct rtl8169_private *, int);
48147- } csi_ops;
48148+ } __no_const csi_ops;
48149
48150 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48151 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48152diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48153index 6b861e3..204ac86 100644
48154--- a/drivers/net/ethernet/sfc/ptp.c
48155+++ b/drivers/net/ethernet/sfc/ptp.c
48156@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48157 ptp->start.dma_addr);
48158
48159 /* Clear flag that signals MC ready */
48160- ACCESS_ONCE(*start) = 0;
48161+ ACCESS_ONCE_RW(*start) = 0;
48162 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48163 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48164 EFX_BUG_ON_PARANOID(rc);
48165diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
48166index 10b6173..b605dfd5 100644
48167--- a/drivers/net/ethernet/sfc/selftest.c
48168+++ b/drivers/net/ethernet/sfc/selftest.c
48169@@ -46,7 +46,7 @@ struct efx_loopback_payload {
48170 struct iphdr ip;
48171 struct udphdr udp;
48172 __be16 iteration;
48173- const char msg[64];
48174+ char msg[64];
48175 } __packed;
48176
48177 /* Loopback test source MAC address */
48178diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48179index 08c483b..2c4a553 100644
48180--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48181+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48182@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48183
48184 writel(value, ioaddr + MMC_CNTRL);
48185
48186- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48187- MMC_CNTRL, value);
48188+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48189+// MMC_CNTRL, value);
48190 }
48191
48192 /* To mask all all interrupts.*/
48193diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
48194index 17e2766..c332f1e 100644
48195--- a/drivers/net/ethernet/via/via-rhine.c
48196+++ b/drivers/net/ethernet/via/via-rhine.c
48197@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
48198 }
48199 };
48200
48201-static struct dmi_system_id rhine_dmi_table[] __initdata = {
48202+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
48203 {
48204 .ident = "EPIA-M",
48205 .matches = {
48206diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48207index 384ca4f..dd7d4f9 100644
48208--- a/drivers/net/hyperv/hyperv_net.h
48209+++ b/drivers/net/hyperv/hyperv_net.h
48210@@ -171,7 +171,7 @@ struct rndis_device {
48211 enum rndis_device_state state;
48212 bool link_state;
48213 bool link_change;
48214- atomic_t new_req_id;
48215+ atomic_unchecked_t new_req_id;
48216
48217 spinlock_t request_lock;
48218 struct list_head req_list;
48219diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48220index 7816d98..7890614 100644
48221--- a/drivers/net/hyperv/rndis_filter.c
48222+++ b/drivers/net/hyperv/rndis_filter.c
48223@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48224 * template
48225 */
48226 set = &rndis_msg->msg.set_req;
48227- set->req_id = atomic_inc_return(&dev->new_req_id);
48228+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48229
48230 /* Add to the request list */
48231 spin_lock_irqsave(&dev->request_lock, flags);
48232@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48233
48234 /* Setup the rndis set */
48235 halt = &request->request_msg.msg.halt_req;
48236- halt->req_id = atomic_inc_return(&dev->new_req_id);
48237+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48238
48239 /* Ignore return since this msg is optional. */
48240 rndis_filter_send_request(dev, request);
48241diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48242index 34f846b..4a0d5b1 100644
48243--- a/drivers/net/ifb.c
48244+++ b/drivers/net/ifb.c
48245@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48246 return 0;
48247 }
48248
48249-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48250+static struct rtnl_link_ops ifb_link_ops = {
48251 .kind = "ifb",
48252 .priv_size = sizeof(struct ifb_private),
48253 .setup = ifb_setup,
48254diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48255index 1df38bd..4bc20b0 100644
48256--- a/drivers/net/macvlan.c
48257+++ b/drivers/net/macvlan.c
48258@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48259 free_nskb:
48260 kfree_skb(nskb);
48261 err:
48262- atomic_long_inc(&skb->dev->rx_dropped);
48263+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48264 }
48265
48266 static void macvlan_flush_sources(struct macvlan_port *port,
48267@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48268 int macvlan_link_register(struct rtnl_link_ops *ops)
48269 {
48270 /* common fields */
48271- ops->priv_size = sizeof(struct macvlan_dev);
48272- ops->validate = macvlan_validate;
48273- ops->maxtype = IFLA_MACVLAN_MAX;
48274- ops->policy = macvlan_policy;
48275- ops->changelink = macvlan_changelink;
48276- ops->get_size = macvlan_get_size;
48277- ops->fill_info = macvlan_fill_info;
48278+ pax_open_kernel();
48279+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48280+ *(void **)&ops->validate = macvlan_validate;
48281+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48282+ *(const void **)&ops->policy = macvlan_policy;
48283+ *(void **)&ops->changelink = macvlan_changelink;
48284+ *(void **)&ops->get_size = macvlan_get_size;
48285+ *(void **)&ops->fill_info = macvlan_fill_info;
48286+ pax_close_kernel();
48287
48288 return rtnl_link_register(ops);
48289 };
48290@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48291 return NOTIFY_DONE;
48292 }
48293
48294-static struct notifier_block macvlan_notifier_block __read_mostly = {
48295+static struct notifier_block macvlan_notifier_block = {
48296 .notifier_call = macvlan_device_event,
48297 };
48298
48299diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48300index 27ecc5c..f636328 100644
48301--- a/drivers/net/macvtap.c
48302+++ b/drivers/net/macvtap.c
48303@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48304 dev->tx_queue_len = TUN_READQ_SIZE;
48305 }
48306
48307-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48308+static struct rtnl_link_ops macvtap_link_ops = {
48309 .kind = "macvtap",
48310 .setup = macvtap_setup,
48311 .newlink = macvtap_newlink,
48312@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48313
48314 ret = 0;
48315 u = q->flags;
48316- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48317+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48318 put_user(u, &ifr->ifr_flags))
48319 ret = -EFAULT;
48320 macvtap_put_vlan(vlan);
48321@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48322 return NOTIFY_DONE;
48323 }
48324
48325-static struct notifier_block macvtap_notifier_block __read_mostly = {
48326+static struct notifier_block macvtap_notifier_block = {
48327 .notifier_call = macvtap_device_event,
48328 };
48329
48330diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48331index 34924df..a747360 100644
48332--- a/drivers/net/nlmon.c
48333+++ b/drivers/net/nlmon.c
48334@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48335 return 0;
48336 }
48337
48338-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48339+static struct rtnl_link_ops nlmon_link_ops = {
48340 .kind = "nlmon",
48341 .priv_size = sizeof(struct nlmon),
48342 .setup = nlmon_setup,
48343diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48344index bdfe51f..e7845c7 100644
48345--- a/drivers/net/phy/phy_device.c
48346+++ b/drivers/net/phy/phy_device.c
48347@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48348 * zero on success.
48349 *
48350 */
48351-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48352+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48353 struct phy_c45_device_ids *c45_ids) {
48354 int phy_reg;
48355 int i, reg_addr;
48356@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48357 * its return value is in turn returned.
48358 *
48359 */
48360-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48361+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48362 bool is_c45, struct phy_c45_device_ids *c45_ids)
48363 {
48364 int phy_reg;
48365@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48366 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48367 {
48368 struct phy_c45_device_ids c45_ids = {0};
48369- u32 phy_id = 0;
48370+ int phy_id = 0;
48371 int r;
48372
48373 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48374diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48375index af034db..1611c0b2 100644
48376--- a/drivers/net/ppp/ppp_generic.c
48377+++ b/drivers/net/ppp/ppp_generic.c
48378@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48379 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48380 struct ppp_stats stats;
48381 struct ppp_comp_stats cstats;
48382- char *vers;
48383
48384 switch (cmd) {
48385 case SIOCGPPPSTATS:
48386@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48387 break;
48388
48389 case SIOCGPPPVER:
48390- vers = PPP_VERSION;
48391- if (copy_to_user(addr, vers, strlen(vers) + 1))
48392+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48393 break;
48394 err = 0;
48395 break;
48396diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48397index 079f7ad..b2a2bfa7 100644
48398--- a/drivers/net/slip/slhc.c
48399+++ b/drivers/net/slip/slhc.c
48400@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48401 register struct tcphdr *thp;
48402 register struct iphdr *ip;
48403 register struct cstate *cs;
48404- int len, hdrlen;
48405+ long len, hdrlen;
48406 unsigned char *cp = icp;
48407
48408 /* We've got a compressed packet; read the change byte */
48409diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48410index 7d39484..d58499d 100644
48411--- a/drivers/net/team/team.c
48412+++ b/drivers/net/team/team.c
48413@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
48414 return TEAM_DEFAULT_NUM_RX_QUEUES;
48415 }
48416
48417-static struct rtnl_link_ops team_link_ops __read_mostly = {
48418+static struct rtnl_link_ops team_link_ops = {
48419 .kind = DRV_NAME,
48420 .priv_size = sizeof(struct team),
48421 .setup = team_setup,
48422@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
48423 return NOTIFY_DONE;
48424 }
48425
48426-static struct notifier_block team_notifier_block __read_mostly = {
48427+static struct notifier_block team_notifier_block = {
48428 .notifier_call = team_device_event,
48429 };
48430
48431diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48432index 857dca4..642f532 100644
48433--- a/drivers/net/tun.c
48434+++ b/drivers/net/tun.c
48435@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48436 return -EINVAL;
48437 }
48438
48439-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48440+static struct rtnl_link_ops tun_link_ops = {
48441 .kind = DRV_NAME,
48442 .priv_size = sizeof(struct tun_struct),
48443 .setup = tun_setup,
48444@@ -1830,7 +1830,7 @@ unlock:
48445 }
48446
48447 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48448- unsigned long arg, int ifreq_len)
48449+ unsigned long arg, size_t ifreq_len)
48450 {
48451 struct tun_file *tfile = file->private_data;
48452 struct tun_struct *tun;
48453@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48454 int le;
48455 int ret;
48456
48457+ if (ifreq_len > sizeof ifr)
48458+ return -EFAULT;
48459+
48460 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48461 if (copy_from_user(&ifr, argp, ifreq_len))
48462 return -EFAULT;
48463diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48464index 778e915..58c4d95 100644
48465--- a/drivers/net/usb/hso.c
48466+++ b/drivers/net/usb/hso.c
48467@@ -70,7 +70,7 @@
48468 #include <asm/byteorder.h>
48469 #include <linux/serial_core.h>
48470 #include <linux/serial.h>
48471-
48472+#include <asm/local.h>
48473
48474 #define MOD_AUTHOR "Option Wireless"
48475 #define MOD_DESCRIPTION "USB High Speed Option driver"
48476@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48477 struct urb *urb;
48478
48479 urb = serial->rx_urb[0];
48480- if (serial->port.count > 0) {
48481+ if (atomic_read(&serial->port.count) > 0) {
48482 count = put_rxbuf_data(urb, serial);
48483 if (count == -1)
48484 return;
48485@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48486 DUMP1(urb->transfer_buffer, urb->actual_length);
48487
48488 /* Anyone listening? */
48489- if (serial->port.count == 0)
48490+ if (atomic_read(&serial->port.count) == 0)
48491 return;
48492
48493 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48494@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48495 tty_port_tty_set(&serial->port, tty);
48496
48497 /* check for port already opened, if not set the termios */
48498- serial->port.count++;
48499- if (serial->port.count == 1) {
48500+ if (atomic_inc_return(&serial->port.count) == 1) {
48501 serial->rx_state = RX_IDLE;
48502 /* Force default termio settings */
48503 _hso_serial_set_termios(tty, NULL);
48504@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48505 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48506 if (result) {
48507 hso_stop_serial_device(serial->parent);
48508- serial->port.count--;
48509+ atomic_dec(&serial->port.count);
48510 } else {
48511 kref_get(&serial->parent->ref);
48512 }
48513@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48514
48515 /* reset the rts and dtr */
48516 /* do the actual close */
48517- serial->port.count--;
48518+ atomic_dec(&serial->port.count);
48519
48520- if (serial->port.count <= 0) {
48521- serial->port.count = 0;
48522+ if (atomic_read(&serial->port.count) <= 0) {
48523+ atomic_set(&serial->port.count, 0);
48524 tty_port_tty_set(&serial->port, NULL);
48525 if (!usb_gone)
48526 hso_stop_serial_device(serial->parent);
48527@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48528
48529 /* the actual setup */
48530 spin_lock_irqsave(&serial->serial_lock, flags);
48531- if (serial->port.count)
48532+ if (atomic_read(&serial->port.count))
48533 _hso_serial_set_termios(tty, old);
48534 else
48535 tty->termios = *old;
48536@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
48537 D1("Pending read interrupt on port %d\n", i);
48538 spin_lock(&serial->serial_lock);
48539 if (serial->rx_state == RX_IDLE &&
48540- serial->port.count > 0) {
48541+ atomic_read(&serial->port.count) > 0) {
48542 /* Setup and send a ctrl req read on
48543 * port i */
48544 if (!serial->rx_urb_filled[0]) {
48545@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
48546 /* Start all serial ports */
48547 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48548 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48549- if (dev2ser(serial_table[i])->port.count) {
48550+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48551 result =
48552 hso_start_serial_device(serial_table[i], GFP_NOIO);
48553 hso_kick_transmit(dev2ser(serial_table[i]));
48554diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48555index 9f7c0ab..1577b4a 100644
48556--- a/drivers/net/usb/r8152.c
48557+++ b/drivers/net/usb/r8152.c
48558@@ -601,7 +601,7 @@ struct r8152 {
48559 void (*unload)(struct r8152 *);
48560 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48561 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48562- } rtl_ops;
48563+ } __no_const rtl_ops;
48564
48565 int intr_interval;
48566 u32 saved_wolopts;
48567diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48568index a2515887..6d13233 100644
48569--- a/drivers/net/usb/sierra_net.c
48570+++ b/drivers/net/usb/sierra_net.c
48571@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48572 /* atomic counter partially included in MAC address to make sure 2 devices
48573 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48574 */
48575-static atomic_t iface_counter = ATOMIC_INIT(0);
48576+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48577
48578 /*
48579 * SYNC Timer Delay definition used to set the expiry time
48580@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48581 dev->net->netdev_ops = &sierra_net_device_ops;
48582
48583 /* change MAC addr to include, ifacenum, and to be unique */
48584- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48585+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48586 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48587
48588 /* we will have to manufacture ethernet headers, prepare template */
48589diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48590index 59b0e97..a6ed579 100644
48591--- a/drivers/net/virtio_net.c
48592+++ b/drivers/net/virtio_net.c
48593@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48594 #define RECEIVE_AVG_WEIGHT 64
48595
48596 /* Minimum alignment for mergeable packet buffers. */
48597-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48598+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48599
48600 #define VIRTNET_DRIVER_VERSION "1.0.0"
48601
48602diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48603index fceb637..37c70fd 100644
48604--- a/drivers/net/vxlan.c
48605+++ b/drivers/net/vxlan.c
48606@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
48607 return vxlan->net;
48608 }
48609
48610-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48611+static struct rtnl_link_ops vxlan_link_ops = {
48612 .kind = "vxlan",
48613 .maxtype = IFLA_VXLAN_MAX,
48614 .policy = vxlan_policy,
48615@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48616 return NOTIFY_DONE;
48617 }
48618
48619-static struct notifier_block vxlan_notifier_block __read_mostly = {
48620+static struct notifier_block vxlan_notifier_block = {
48621 .notifier_call = vxlan_lowerdev_event,
48622 };
48623
48624diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48625index 5920c99..ff2e4a5 100644
48626--- a/drivers/net/wan/lmc/lmc_media.c
48627+++ b/drivers/net/wan/lmc/lmc_media.c
48628@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48629 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48630
48631 lmc_media_t lmc_ds3_media = {
48632- lmc_ds3_init, /* special media init stuff */
48633- lmc_ds3_default, /* reset to default state */
48634- lmc_ds3_set_status, /* reset status to state provided */
48635- lmc_dummy_set_1, /* set clock source */
48636- lmc_dummy_set2_1, /* set line speed */
48637- lmc_ds3_set_100ft, /* set cable length */
48638- lmc_ds3_set_scram, /* set scrambler */
48639- lmc_ds3_get_link_status, /* get link status */
48640- lmc_dummy_set_1, /* set link status */
48641- lmc_ds3_set_crc_length, /* set CRC length */
48642- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48643- lmc_ds3_watchdog
48644+ .init = lmc_ds3_init, /* special media init stuff */
48645+ .defaults = lmc_ds3_default, /* reset to default state */
48646+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48647+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48648+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48649+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48650+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48651+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48652+ .set_link_status = lmc_dummy_set_1, /* set link status */
48653+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48654+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48655+ .watchdog = lmc_ds3_watchdog
48656 };
48657
48658 lmc_media_t lmc_hssi_media = {
48659- lmc_hssi_init, /* special media init stuff */
48660- lmc_hssi_default, /* reset to default state */
48661- lmc_hssi_set_status, /* reset status to state provided */
48662- lmc_hssi_set_clock, /* set clock source */
48663- lmc_dummy_set2_1, /* set line speed */
48664- lmc_dummy_set_1, /* set cable length */
48665- lmc_dummy_set_1, /* set scrambler */
48666- lmc_hssi_get_link_status, /* get link status */
48667- lmc_hssi_set_link_status, /* set link status */
48668- lmc_hssi_set_crc_length, /* set CRC length */
48669- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48670- lmc_hssi_watchdog
48671+ .init = lmc_hssi_init, /* special media init stuff */
48672+ .defaults = lmc_hssi_default, /* reset to default state */
48673+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48674+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48675+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48676+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48677+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48678+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48679+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48680+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48681+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48682+ .watchdog = lmc_hssi_watchdog
48683 };
48684
48685-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48686- lmc_ssi_default, /* reset to default state */
48687- lmc_ssi_set_status, /* reset status to state provided */
48688- lmc_ssi_set_clock, /* set clock source */
48689- lmc_ssi_set_speed, /* set line speed */
48690- lmc_dummy_set_1, /* set cable length */
48691- lmc_dummy_set_1, /* set scrambler */
48692- lmc_ssi_get_link_status, /* get link status */
48693- lmc_ssi_set_link_status, /* set link status */
48694- lmc_ssi_set_crc_length, /* set CRC length */
48695- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48696- lmc_ssi_watchdog
48697+lmc_media_t lmc_ssi_media = {
48698+ .init = lmc_ssi_init, /* special media init stuff */
48699+ .defaults = lmc_ssi_default, /* reset to default state */
48700+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48701+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48702+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48703+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48704+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48705+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48706+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48707+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48708+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48709+ .watchdog = lmc_ssi_watchdog
48710 };
48711
48712 lmc_media_t lmc_t1_media = {
48713- lmc_t1_init, /* special media init stuff */
48714- lmc_t1_default, /* reset to default state */
48715- lmc_t1_set_status, /* reset status to state provided */
48716- lmc_t1_set_clock, /* set clock source */
48717- lmc_dummy_set2_1, /* set line speed */
48718- lmc_dummy_set_1, /* set cable length */
48719- lmc_dummy_set_1, /* set scrambler */
48720- lmc_t1_get_link_status, /* get link status */
48721- lmc_dummy_set_1, /* set link status */
48722- lmc_t1_set_crc_length, /* set CRC length */
48723- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48724- lmc_t1_watchdog
48725+ .init = lmc_t1_init, /* special media init stuff */
48726+ .defaults = lmc_t1_default, /* reset to default state */
48727+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48728+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48729+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48730+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48731+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48732+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48733+ .set_link_status = lmc_dummy_set_1, /* set link status */
48734+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48735+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48736+ .watchdog = lmc_t1_watchdog
48737 };
48738
48739 static void
48740diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48741index feacc3b..5bac0de 100644
48742--- a/drivers/net/wan/z85230.c
48743+++ b/drivers/net/wan/z85230.c
48744@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48745
48746 struct z8530_irqhandler z8530_sync =
48747 {
48748- z8530_rx,
48749- z8530_tx,
48750- z8530_status
48751+ .rx = z8530_rx,
48752+ .tx = z8530_tx,
48753+ .status = z8530_status
48754 };
48755
48756 EXPORT_SYMBOL(z8530_sync);
48757@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48758 }
48759
48760 static struct z8530_irqhandler z8530_dma_sync = {
48761- z8530_dma_rx,
48762- z8530_dma_tx,
48763- z8530_dma_status
48764+ .rx = z8530_dma_rx,
48765+ .tx = z8530_dma_tx,
48766+ .status = z8530_dma_status
48767 };
48768
48769 static struct z8530_irqhandler z8530_txdma_sync = {
48770- z8530_rx,
48771- z8530_dma_tx,
48772- z8530_dma_status
48773+ .rx = z8530_rx,
48774+ .tx = z8530_dma_tx,
48775+ .status = z8530_dma_status
48776 };
48777
48778 /**
48779@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48780
48781 struct z8530_irqhandler z8530_nop=
48782 {
48783- z8530_rx_clear,
48784- z8530_tx_clear,
48785- z8530_status_clear
48786+ .rx = z8530_rx_clear,
48787+ .tx = z8530_tx_clear,
48788+ .status = z8530_status_clear
48789 };
48790
48791
48792diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48793index 0b60295..b8bfa5b 100644
48794--- a/drivers/net/wimax/i2400m/rx.c
48795+++ b/drivers/net/wimax/i2400m/rx.c
48796@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48797 if (i2400m->rx_roq == NULL)
48798 goto error_roq_alloc;
48799
48800- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48801+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48802 GFP_KERNEL);
48803 if (rd == NULL) {
48804 result = -ENOMEM;
48805diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48806index e71a2ce..2268d61 100644
48807--- a/drivers/net/wireless/airo.c
48808+++ b/drivers/net/wireless/airo.c
48809@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48810 struct airo_info *ai = dev->ml_priv;
48811 int ridcode;
48812 int enabled;
48813- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48814+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48815 unsigned char *iobuf;
48816
48817 /* Only super-user can write RIDs */
48818diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48819index da92bfa..5a9001a 100644
48820--- a/drivers/net/wireless/at76c50x-usb.c
48821+++ b/drivers/net/wireless/at76c50x-usb.c
48822@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48823 }
48824
48825 /* Convert timeout from the DFU status to jiffies */
48826-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48827+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48828 {
48829 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48830 | (s->poll_timeout[1] << 8)
48831diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48832index 2fd9e18..3f55bdd 100644
48833--- a/drivers/net/wireless/ath/ath10k/htc.c
48834+++ b/drivers/net/wireless/ath/ath10k/htc.c
48835@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48836 /* registered target arrival callback from the HIF layer */
48837 int ath10k_htc_init(struct ath10k *ar)
48838 {
48839- struct ath10k_hif_cb htc_callbacks;
48840+ static struct ath10k_hif_cb htc_callbacks = {
48841+ .rx_completion = ath10k_htc_rx_completion_handler,
48842+ .tx_completion = ath10k_htc_tx_completion_handler,
48843+ };
48844 struct ath10k_htc_ep *ep = NULL;
48845 struct ath10k_htc *htc = &ar->htc;
48846
48847@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
48848 ath10k_htc_reset_endpoint_states(htc);
48849
48850 /* setup HIF layer callbacks */
48851- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48852- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48853 htc->ar = ar;
48854
48855 /* Get HIF default pipe for HTC message exchange */
48856diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48857index 527179c..a890150 100644
48858--- a/drivers/net/wireless/ath/ath10k/htc.h
48859+++ b/drivers/net/wireless/ath/ath10k/htc.h
48860@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48861
48862 struct ath10k_htc_ops {
48863 void (*target_send_suspend_complete)(struct ath10k *ar);
48864-};
48865+} __no_const;
48866
48867 struct ath10k_htc_ep_ops {
48868 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48869 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48870 void (*ep_tx_credits)(struct ath10k *);
48871-};
48872+} __no_const;
48873
48874 /* service connection information */
48875 struct ath10k_htc_svc_conn_req {
48876diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48877index f816909..e56cd8b 100644
48878--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48879+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48880@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48881 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48882 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48883
48884- ACCESS_ONCE(ads->ds_link) = i->link;
48885- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48886+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48887+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48888
48889 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48890 ctl6 = SM(i->keytype, AR_EncrType);
48891@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48892
48893 if ((i->is_first || i->is_last) &&
48894 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48895- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48896+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48897 | set11nTries(i->rates, 1)
48898 | set11nTries(i->rates, 2)
48899 | set11nTries(i->rates, 3)
48900 | (i->dur_update ? AR_DurUpdateEna : 0)
48901 | SM(0, AR_BurstDur);
48902
48903- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48904+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48905 | set11nRate(i->rates, 1)
48906 | set11nRate(i->rates, 2)
48907 | set11nRate(i->rates, 3);
48908 } else {
48909- ACCESS_ONCE(ads->ds_ctl2) = 0;
48910- ACCESS_ONCE(ads->ds_ctl3) = 0;
48911+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48912+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48913 }
48914
48915 if (!i->is_first) {
48916- ACCESS_ONCE(ads->ds_ctl0) = 0;
48917- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48918- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48919+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48920+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48921+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48922 return;
48923 }
48924
48925@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48926 break;
48927 }
48928
48929- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48930+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48931 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48932 | SM(i->txpower[0], AR_XmitPower0)
48933 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48934@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48935 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48936 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48937
48938- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48939- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48940+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48941+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48942
48943 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48944 return;
48945
48946- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48947+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48948 | set11nPktDurRTSCTS(i->rates, 1);
48949
48950- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48951+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48952 | set11nPktDurRTSCTS(i->rates, 3);
48953
48954- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48955+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48956 | set11nRateFlags(i->rates, 1)
48957 | set11nRateFlags(i->rates, 2)
48958 | set11nRateFlags(i->rates, 3)
48959 | SM(i->rtscts_rate, AR_RTSCTSRate);
48960
48961- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48962- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48963- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48964+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
48965+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
48966+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
48967 }
48968
48969 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
48970diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48971index da84b70..83e4978 100644
48972--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48973+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
48974@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48975 (i->qcu << AR_TxQcuNum_S) | desc_len;
48976
48977 checksum += val;
48978- ACCESS_ONCE(ads->info) = val;
48979+ ACCESS_ONCE_RW(ads->info) = val;
48980
48981 checksum += i->link;
48982- ACCESS_ONCE(ads->link) = i->link;
48983+ ACCESS_ONCE_RW(ads->link) = i->link;
48984
48985 checksum += i->buf_addr[0];
48986- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
48987+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
48988 checksum += i->buf_addr[1];
48989- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
48990+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
48991 checksum += i->buf_addr[2];
48992- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
48993+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
48994 checksum += i->buf_addr[3];
48995- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
48996+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
48997
48998 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
48999- ACCESS_ONCE(ads->ctl3) = val;
49000+ ACCESS_ONCE_RW(ads->ctl3) = val;
49001 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49002- ACCESS_ONCE(ads->ctl5) = val;
49003+ ACCESS_ONCE_RW(ads->ctl5) = val;
49004 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49005- ACCESS_ONCE(ads->ctl7) = val;
49006+ ACCESS_ONCE_RW(ads->ctl7) = val;
49007 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49008- ACCESS_ONCE(ads->ctl9) = val;
49009+ ACCESS_ONCE_RW(ads->ctl9) = val;
49010
49011 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49012- ACCESS_ONCE(ads->ctl10) = checksum;
49013+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49014
49015 if (i->is_first || i->is_last) {
49016- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49017+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49018 | set11nTries(i->rates, 1)
49019 | set11nTries(i->rates, 2)
49020 | set11nTries(i->rates, 3)
49021 | (i->dur_update ? AR_DurUpdateEna : 0)
49022 | SM(0, AR_BurstDur);
49023
49024- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49025+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49026 | set11nRate(i->rates, 1)
49027 | set11nRate(i->rates, 2)
49028 | set11nRate(i->rates, 3);
49029 } else {
49030- ACCESS_ONCE(ads->ctl13) = 0;
49031- ACCESS_ONCE(ads->ctl14) = 0;
49032+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49033+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49034 }
49035
49036 ads->ctl20 = 0;
49037@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49038
49039 ctl17 = SM(i->keytype, AR_EncrType);
49040 if (!i->is_first) {
49041- ACCESS_ONCE(ads->ctl11) = 0;
49042- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49043- ACCESS_ONCE(ads->ctl15) = 0;
49044- ACCESS_ONCE(ads->ctl16) = 0;
49045- ACCESS_ONCE(ads->ctl17) = ctl17;
49046- ACCESS_ONCE(ads->ctl18) = 0;
49047- ACCESS_ONCE(ads->ctl19) = 0;
49048+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49049+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49050+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49051+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49052+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49053+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49054+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49055 return;
49056 }
49057
49058- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49059+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49060 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49061 | SM(i->txpower[0], AR_XmitPower0)
49062 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49063@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49064 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49065 ctl12 |= SM(val, AR_PAPRDChainMask);
49066
49067- ACCESS_ONCE(ads->ctl12) = ctl12;
49068- ACCESS_ONCE(ads->ctl17) = ctl17;
49069+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49070+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49071
49072- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49073+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49074 | set11nPktDurRTSCTS(i->rates, 1);
49075
49076- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49077+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49078 | set11nPktDurRTSCTS(i->rates, 3);
49079
49080- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49081+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49082 | set11nRateFlags(i->rates, 1)
49083 | set11nRateFlags(i->rates, 2)
49084 | set11nRateFlags(i->rates, 3)
49085 | SM(i->rtscts_rate, AR_RTSCTSRate);
49086
49087- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49088+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49089
49090- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49091- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49092- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49093+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49094+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49095+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49096 }
49097
49098 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49099diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49100index e82e570..8c3cf90 100644
49101--- a/drivers/net/wireless/ath/ath9k/hw.h
49102+++ b/drivers/net/wireless/ath/ath9k/hw.h
49103@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
49104
49105 /* ANI */
49106 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49107-};
49108+} __no_const;
49109
49110 /**
49111 * struct ath_spec_scan - parameters for Atheros spectral scan
49112@@ -722,7 +722,7 @@ struct ath_hw_ops {
49113 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49114 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49115 #endif
49116-};
49117+} __no_const;
49118
49119 struct ath_nf_limits {
49120 s16 max;
49121diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49122index 9ede991..a8f08fb 100644
49123--- a/drivers/net/wireless/ath/ath9k/main.c
49124+++ b/drivers/net/wireless/ath/ath9k/main.c
49125@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
49126 if (!ath9k_is_chanctx_enabled())
49127 return;
49128
49129- ath9k_ops.hw_scan = ath9k_hw_scan;
49130- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49131- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49132- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49133- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49134- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49135- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49136- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49137- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49138- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49139+ pax_open_kernel();
49140+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49141+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49142+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49143+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49144+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49145+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49146+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49147+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49148+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49149+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49150+ pax_close_kernel();
49151 }
49152
49153 #endif
49154diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49155index 058a9f2..d5cb1ba 100644
49156--- a/drivers/net/wireless/b43/phy_lp.c
49157+++ b/drivers/net/wireless/b43/phy_lp.c
49158@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49159 {
49160 struct ssb_bus *bus = dev->dev->sdev->bus;
49161
49162- static const struct b206x_channel *chandata = NULL;
49163+ const struct b206x_channel *chandata = NULL;
49164 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49165 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49166 u16 old_comm15, scale;
49167diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49168index e566580..2c218ca 100644
49169--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49170+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49171@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49172 */
49173 if (il3945_mod_params.disable_hw_scan) {
49174 D_INFO("Disabling hw_scan\n");
49175- il3945_mac_ops.hw_scan = NULL;
49176+ pax_open_kernel();
49177+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49178+ pax_close_kernel();
49179 }
49180
49181 D_INFO("*** LOAD DRIVER ***\n");
49182diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49183index 0ffb6ff..c0b7f0e 100644
49184--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49185+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49186@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49187 {
49188 struct iwl_priv *priv = file->private_data;
49189 char buf[64];
49190- int buf_size;
49191+ size_t buf_size;
49192 u32 offset, len;
49193
49194 memset(buf, 0, sizeof(buf));
49195@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49196 struct iwl_priv *priv = file->private_data;
49197
49198 char buf[8];
49199- int buf_size;
49200+ size_t buf_size;
49201 u32 reset_flag;
49202
49203 memset(buf, 0, sizeof(buf));
49204@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49205 {
49206 struct iwl_priv *priv = file->private_data;
49207 char buf[8];
49208- int buf_size;
49209+ size_t buf_size;
49210 int ht40;
49211
49212 memset(buf, 0, sizeof(buf));
49213@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49214 {
49215 struct iwl_priv *priv = file->private_data;
49216 char buf[8];
49217- int buf_size;
49218+ size_t buf_size;
49219 int value;
49220
49221 memset(buf, 0, sizeof(buf));
49222@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49223 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49224 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49225
49226-static const char *fmt_value = " %-30s %10u\n";
49227-static const char *fmt_hex = " %-30s 0x%02X\n";
49228-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49229-static const char *fmt_header =
49230+static const char fmt_value[] = " %-30s %10u\n";
49231+static const char fmt_hex[] = " %-30s 0x%02X\n";
49232+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49233+static const char fmt_header[] =
49234 "%-32s current cumulative delta max\n";
49235
49236 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49237@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49238 {
49239 struct iwl_priv *priv = file->private_data;
49240 char buf[8];
49241- int buf_size;
49242+ size_t buf_size;
49243 int clear;
49244
49245 memset(buf, 0, sizeof(buf));
49246@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49247 {
49248 struct iwl_priv *priv = file->private_data;
49249 char buf[8];
49250- int buf_size;
49251+ size_t buf_size;
49252 int trace;
49253
49254 memset(buf, 0, sizeof(buf));
49255@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49256 {
49257 struct iwl_priv *priv = file->private_data;
49258 char buf[8];
49259- int buf_size;
49260+ size_t buf_size;
49261 int missed;
49262
49263 memset(buf, 0, sizeof(buf));
49264@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49265
49266 struct iwl_priv *priv = file->private_data;
49267 char buf[8];
49268- int buf_size;
49269+ size_t buf_size;
49270 int plcp;
49271
49272 memset(buf, 0, sizeof(buf));
49273@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49274
49275 struct iwl_priv *priv = file->private_data;
49276 char buf[8];
49277- int buf_size;
49278+ size_t buf_size;
49279 int flush;
49280
49281 memset(buf, 0, sizeof(buf));
49282@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49283
49284 struct iwl_priv *priv = file->private_data;
49285 char buf[8];
49286- int buf_size;
49287+ size_t buf_size;
49288 int rts;
49289
49290 if (!priv->cfg->ht_params)
49291@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49292 {
49293 struct iwl_priv *priv = file->private_data;
49294 char buf[8];
49295- int buf_size;
49296+ size_t buf_size;
49297
49298 memset(buf, 0, sizeof(buf));
49299 buf_size = min(count, sizeof(buf) - 1);
49300@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49301 struct iwl_priv *priv = file->private_data;
49302 u32 event_log_flag;
49303 char buf[8];
49304- int buf_size;
49305+ size_t buf_size;
49306
49307 /* check that the interface is up */
49308 if (!iwl_is_ready(priv))
49309@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49310 struct iwl_priv *priv = file->private_data;
49311 char buf[8];
49312 u32 calib_disabled;
49313- int buf_size;
49314+ size_t buf_size;
49315
49316 memset(buf, 0, sizeof(buf));
49317 buf_size = min(count, sizeof(buf) - 1);
49318diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49319index 69935aa..c1ca128 100644
49320--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49321+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49322@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49323 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49324
49325 char buf[8];
49326- int buf_size;
49327+ size_t buf_size;
49328 u32 reset_flag;
49329
49330 memset(buf, 0, sizeof(buf));
49331@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49332 {
49333 struct iwl_trans *trans = file->private_data;
49334 char buf[8];
49335- int buf_size;
49336+ size_t buf_size;
49337 int csr;
49338
49339 memset(buf, 0, sizeof(buf));
49340diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49341index 8908be6..fe97ddd 100644
49342--- a/drivers/net/wireless/mac80211_hwsim.c
49343+++ b/drivers/net/wireless/mac80211_hwsim.c
49344@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
49345 if (channels < 1)
49346 return -EINVAL;
49347
49348- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49349- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49350- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49351- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49352- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49353- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49354- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49355- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49356- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49357- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49358- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49359- mac80211_hwsim_assign_vif_chanctx;
49360- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49361- mac80211_hwsim_unassign_vif_chanctx;
49362+ pax_open_kernel();
49363+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49364+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49365+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49366+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49367+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49368+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49369+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49370+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49371+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49372+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49373+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49374+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49375+ pax_close_kernel();
49376
49377 spin_lock_init(&hwsim_radio_lock);
49378 INIT_LIST_HEAD(&hwsim_radios);
49379diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49380index 60d44ce..884dd1c 100644
49381--- a/drivers/net/wireless/rndis_wlan.c
49382+++ b/drivers/net/wireless/rndis_wlan.c
49383@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49384
49385 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49386
49387- if (rts_threshold < 0 || rts_threshold > 2347)
49388+ if (rts_threshold > 2347)
49389 rts_threshold = 2347;
49390
49391 tmp = cpu_to_le32(rts_threshold);
49392diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49393index 9bb398b..b0cc047 100644
49394--- a/drivers/net/wireless/rt2x00/rt2x00.h
49395+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49396@@ -375,7 +375,7 @@ struct rt2x00_intf {
49397 * for hardware which doesn't support hardware
49398 * sequence counting.
49399 */
49400- atomic_t seqno;
49401+ atomic_unchecked_t seqno;
49402 };
49403
49404 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49405diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49406index 68b620b..92ecd9e 100644
49407--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49408+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49409@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49410 * sequence counter given by mac80211.
49411 */
49412 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49413- seqno = atomic_add_return(0x10, &intf->seqno);
49414+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49415 else
49416- seqno = atomic_read(&intf->seqno);
49417+ seqno = atomic_read_unchecked(&intf->seqno);
49418
49419 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49420 hdr->seq_ctrl |= cpu_to_le16(seqno);
49421diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49422index b661f896..ddf7d2b 100644
49423--- a/drivers/net/wireless/ti/wl1251/sdio.c
49424+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49425@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49426
49427 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49428
49429- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49430- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49431+ pax_open_kernel();
49432+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49433+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49434+ pax_close_kernel();
49435
49436 wl1251_info("using dedicated interrupt line");
49437 } else {
49438- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49439- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49440+ pax_open_kernel();
49441+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49442+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49443+ pax_close_kernel();
49444
49445 wl1251_info("using SDIO interrupt");
49446 }
49447diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49448index 144d1f8..7030936 100644
49449--- a/drivers/net/wireless/ti/wl12xx/main.c
49450+++ b/drivers/net/wireless/ti/wl12xx/main.c
49451@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49452 sizeof(wl->conf.mem));
49453
49454 /* read data preparation is only needed by wl127x */
49455- wl->ops->prepare_read = wl127x_prepare_read;
49456+ pax_open_kernel();
49457+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49458+ pax_close_kernel();
49459
49460 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49461 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49462@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49463 sizeof(wl->conf.mem));
49464
49465 /* read data preparation is only needed by wl127x */
49466- wl->ops->prepare_read = wl127x_prepare_read;
49467+ pax_open_kernel();
49468+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49469+ pax_close_kernel();
49470
49471 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49472 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49473diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49474index 717c4f5..a813aeb 100644
49475--- a/drivers/net/wireless/ti/wl18xx/main.c
49476+++ b/drivers/net/wireless/ti/wl18xx/main.c
49477@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49478 }
49479
49480 if (!checksum_param) {
49481- wl18xx_ops.set_rx_csum = NULL;
49482- wl18xx_ops.init_vif = NULL;
49483+ pax_open_kernel();
49484+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49485+ *(void **)&wl18xx_ops.init_vif = NULL;
49486+ pax_close_kernel();
49487 }
49488
49489 /* Enable 11a Band only if we have 5G antennas */
49490diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49491index a912dc0..a8225ba 100644
49492--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49493+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49494@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49495 {
49496 struct zd_usb *usb = urb->context;
49497 struct zd_usb_interrupt *intr = &usb->intr;
49498- int len;
49499+ unsigned int len;
49500 u16 int_num;
49501
49502 ZD_ASSERT(in_interrupt());
49503diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49504index ce2e2cf..f81e500 100644
49505--- a/drivers/nfc/nfcwilink.c
49506+++ b/drivers/nfc/nfcwilink.c
49507@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49508
49509 static int nfcwilink_probe(struct platform_device *pdev)
49510 {
49511- static struct nfcwilink *drv;
49512+ struct nfcwilink *drv;
49513 int rc;
49514 __u32 protocols;
49515
49516diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49517index 24d3d24..ff70d28 100644
49518--- a/drivers/nfc/st21nfca/st21nfca.c
49519+++ b/drivers/nfc/st21nfca/st21nfca.c
49520@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49521 goto exit;
49522 }
49523
49524- gate = uid_skb->data;
49525+ memcpy(gate, uid_skb->data, uid_skb->len);
49526 *len = uid_skb->len;
49527 exit:
49528 kfree_skb(uid_skb);
49529diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49530index 3a896c9..ac7b1c8 100644
49531--- a/drivers/of/fdt.c
49532+++ b/drivers/of/fdt.c
49533@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49534 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49535 return 0;
49536 }
49537- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49538+ pax_open_kernel();
49539+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49540+ pax_close_kernel();
49541 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49542 }
49543 late_initcall(of_fdt_raw_init);
49544diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49545index d93b2b6..ae50401 100644
49546--- a/drivers/oprofile/buffer_sync.c
49547+++ b/drivers/oprofile/buffer_sync.c
49548@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49549 if (cookie == NO_COOKIE)
49550 offset = pc;
49551 if (cookie == INVALID_COOKIE) {
49552- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49553+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49554 offset = pc;
49555 }
49556 if (cookie != last_cookie) {
49557@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49558 /* add userspace sample */
49559
49560 if (!mm) {
49561- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49562+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49563 return 0;
49564 }
49565
49566 cookie = lookup_dcookie(mm, s->eip, &offset);
49567
49568 if (cookie == INVALID_COOKIE) {
49569- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49570+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49571 return 0;
49572 }
49573
49574@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49575 /* ignore backtraces if failed to add a sample */
49576 if (state == sb_bt_start) {
49577 state = sb_bt_ignore;
49578- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49579+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49580 }
49581 }
49582 release_mm(mm);
49583diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49584index c0cc4e7..44d4e54 100644
49585--- a/drivers/oprofile/event_buffer.c
49586+++ b/drivers/oprofile/event_buffer.c
49587@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49588 }
49589
49590 if (buffer_pos == buffer_size) {
49591- atomic_inc(&oprofile_stats.event_lost_overflow);
49592+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49593 return;
49594 }
49595
49596diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49597index ed2c3ec..deda85a 100644
49598--- a/drivers/oprofile/oprof.c
49599+++ b/drivers/oprofile/oprof.c
49600@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49601 if (oprofile_ops.switch_events())
49602 return;
49603
49604- atomic_inc(&oprofile_stats.multiplex_counter);
49605+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49606 start_switch_worker();
49607 }
49608
49609diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49610index ee2cfce..7f8f699 100644
49611--- a/drivers/oprofile/oprofile_files.c
49612+++ b/drivers/oprofile/oprofile_files.c
49613@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49614
49615 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49616
49617-static ssize_t timeout_read(struct file *file, char __user *buf,
49618+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49619 size_t count, loff_t *offset)
49620 {
49621 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49622diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49623index 59659ce..6c860a0 100644
49624--- a/drivers/oprofile/oprofile_stats.c
49625+++ b/drivers/oprofile/oprofile_stats.c
49626@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49627 cpu_buf->sample_invalid_eip = 0;
49628 }
49629
49630- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49631- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49632- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49633- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49634- atomic_set(&oprofile_stats.multiplex_counter, 0);
49635+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49636+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49637+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49638+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49639+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49640 }
49641
49642
49643diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49644index 1fc622b..8c48fc3 100644
49645--- a/drivers/oprofile/oprofile_stats.h
49646+++ b/drivers/oprofile/oprofile_stats.h
49647@@ -13,11 +13,11 @@
49648 #include <linux/atomic.h>
49649
49650 struct oprofile_stat_struct {
49651- atomic_t sample_lost_no_mm;
49652- atomic_t sample_lost_no_mapping;
49653- atomic_t bt_lost_no_mapping;
49654- atomic_t event_lost_overflow;
49655- atomic_t multiplex_counter;
49656+ atomic_unchecked_t sample_lost_no_mm;
49657+ atomic_unchecked_t sample_lost_no_mapping;
49658+ atomic_unchecked_t bt_lost_no_mapping;
49659+ atomic_unchecked_t event_lost_overflow;
49660+ atomic_unchecked_t multiplex_counter;
49661 };
49662
49663 extern struct oprofile_stat_struct oprofile_stats;
49664diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49665index 3f49345..c750d0b 100644
49666--- a/drivers/oprofile/oprofilefs.c
49667+++ b/drivers/oprofile/oprofilefs.c
49668@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49669
49670 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49671 {
49672- atomic_t *val = file->private_data;
49673- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49674+ atomic_unchecked_t *val = file->private_data;
49675+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49676 }
49677
49678
49679@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49680
49681
49682 int oprofilefs_create_ro_atomic(struct dentry *root,
49683- char const *name, atomic_t *val)
49684+ char const *name, atomic_unchecked_t *val)
49685 {
49686 return __oprofilefs_create_file(root, name,
49687 &atomic_ro_fops, 0444, val);
49688diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49689index bdef916..88c7dee 100644
49690--- a/drivers/oprofile/timer_int.c
49691+++ b/drivers/oprofile/timer_int.c
49692@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49693 return NOTIFY_OK;
49694 }
49695
49696-static struct notifier_block __refdata oprofile_cpu_notifier = {
49697+static struct notifier_block oprofile_cpu_notifier = {
49698 .notifier_call = oprofile_cpu_notify,
49699 };
49700
49701diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49702index 3b47080..6cd05dd 100644
49703--- a/drivers/parport/procfs.c
49704+++ b/drivers/parport/procfs.c
49705@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49706
49707 *ppos += len;
49708
49709- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49710+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49711 }
49712
49713 #ifdef CONFIG_PARPORT_1284
49714@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49715
49716 *ppos += len;
49717
49718- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49719+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49720 }
49721 #endif /* IEEE1284.3 support. */
49722
49723diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
49724index ba46e58..90cfc24 100644
49725--- a/drivers/pci/host/pci-host-generic.c
49726+++ b/drivers/pci/host/pci-host-generic.c
49727@@ -26,9 +26,9 @@
49728 #include <linux/platform_device.h>
49729
49730 struct gen_pci_cfg_bus_ops {
49731+ struct pci_ops ops;
49732 u32 bus_shift;
49733- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
49734-};
49735+} __do_const;
49736
49737 struct gen_pci_cfg_windows {
49738 struct resource res;
49739@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
49740 }
49741
49742 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
49743+ .ops = {
49744+ .map_bus = gen_pci_map_cfg_bus_cam,
49745+ .read = pci_generic_config_read,
49746+ .write = pci_generic_config_write,
49747+ },
49748 .bus_shift = 16,
49749- .map_bus = gen_pci_map_cfg_bus_cam,
49750 };
49751
49752 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
49753@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
49754 }
49755
49756 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
49757+ .ops = {
49758+ .map_bus = gen_pci_map_cfg_bus_ecam,
49759+ .read = pci_generic_config_read,
49760+ .write = pci_generic_config_write,
49761+ },
49762 .bus_shift = 20,
49763- .map_bus = gen_pci_map_cfg_bus_ecam,
49764-};
49765-
49766-static struct pci_ops gen_pci_ops = {
49767- .read = pci_generic_config_read,
49768- .write = pci_generic_config_write,
49769 };
49770
49771 static const struct of_device_id gen_pci_of_match[] = {
49772@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
49773 .private_data = (void **)&pci,
49774 .setup = gen_pci_setup,
49775 .map_irq = of_irq_parse_and_map_pci,
49776- .ops = &gen_pci_ops,
49777 };
49778
49779 if (!pci)
49780@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
49781
49782 of_id = of_match_node(gen_pci_of_match, np);
49783 pci->cfg.ops = of_id->data;
49784- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
49785+ hw.ops = &pci->cfg.ops->ops;
49786 pci->host.dev.parent = dev;
49787 INIT_LIST_HEAD(&pci->host.windows);
49788 INIT_LIST_HEAD(&pci->resources);
49789diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49790index 6ca2399..68d866b 100644
49791--- a/drivers/pci/hotplug/acpiphp_ibm.c
49792+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49793@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49794 goto init_cleanup;
49795 }
49796
49797- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49798+ pax_open_kernel();
49799+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49800+ pax_close_kernel();
49801 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49802
49803 return retval;
49804diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49805index 66b7bbe..26bee78 100644
49806--- a/drivers/pci/hotplug/cpcihp_generic.c
49807+++ b/drivers/pci/hotplug/cpcihp_generic.c
49808@@ -73,7 +73,6 @@ static u16 port;
49809 static unsigned int enum_bit;
49810 static u8 enum_mask;
49811
49812-static struct cpci_hp_controller_ops generic_hpc_ops;
49813 static struct cpci_hp_controller generic_hpc;
49814
49815 static int __init validate_parameters(void)
49816@@ -139,6 +138,10 @@ static int query_enum(void)
49817 return ((value & enum_mask) == enum_mask);
49818 }
49819
49820+static struct cpci_hp_controller_ops generic_hpc_ops = {
49821+ .query_enum = query_enum,
49822+};
49823+
49824 static int __init cpcihp_generic_init(void)
49825 {
49826 int status;
49827@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49828 pci_dev_put(dev);
49829
49830 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49831- generic_hpc_ops.query_enum = query_enum;
49832 generic_hpc.ops = &generic_hpc_ops;
49833
49834 status = cpci_hp_register_controller(&generic_hpc);
49835diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49836index 7ecf34e..effed62 100644
49837--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49838+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49839@@ -59,7 +59,6 @@
49840 /* local variables */
49841 static bool debug;
49842 static bool poll;
49843-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49844 static struct cpci_hp_controller zt5550_hpc;
49845
49846 /* Primary cPCI bus bridge device */
49847@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49848 return 0;
49849 }
49850
49851+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49852+ .query_enum = zt5550_hc_query_enum,
49853+};
49854+
49855 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49856 {
49857 int status;
49858@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49859 dbg("returned from zt5550_hc_config");
49860
49861 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49862- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49863 zt5550_hpc.ops = &zt5550_hpc_ops;
49864 if (!poll) {
49865 zt5550_hpc.irq = hc_dev->irq;
49866 zt5550_hpc.irq_flags = IRQF_SHARED;
49867 zt5550_hpc.dev_id = hc_dev;
49868
49869- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49870- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49871- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49872+ pax_open_kernel();
49873+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49874+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49875+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49876+ pax_open_kernel();
49877 } else {
49878 info("using ENUM# polling mode");
49879 }
49880diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49881index 1e08ff8c..3cd145f 100644
49882--- a/drivers/pci/hotplug/cpqphp_nvram.c
49883+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49884@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49885
49886 void compaq_nvram_init (void __iomem *rom_start)
49887 {
49888+#ifndef CONFIG_PAX_KERNEXEC
49889 if (rom_start)
49890 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49891+#endif
49892
49893 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49894
49895diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49896index 56d8486..f26113f 100644
49897--- a/drivers/pci/hotplug/pci_hotplug_core.c
49898+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49899@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49900 return -EINVAL;
49901 }
49902
49903- slot->ops->owner = owner;
49904- slot->ops->mod_name = mod_name;
49905+ pax_open_kernel();
49906+ *(struct module **)&slot->ops->owner = owner;
49907+ *(const char **)&slot->ops->mod_name = mod_name;
49908+ pax_close_kernel();
49909
49910 mutex_lock(&pci_hp_mutex);
49911 /*
49912diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49913index 07aa722..84514b4 100644
49914--- a/drivers/pci/hotplug/pciehp_core.c
49915+++ b/drivers/pci/hotplug/pciehp_core.c
49916@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49917 struct slot *slot = ctrl->slot;
49918 struct hotplug_slot *hotplug = NULL;
49919 struct hotplug_slot_info *info = NULL;
49920- struct hotplug_slot_ops *ops = NULL;
49921+ hotplug_slot_ops_no_const *ops = NULL;
49922 char name[SLOT_NAME_SIZE];
49923 int retval = -ENOMEM;
49924
49925diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49926index c3e7dfc..cbd9625 100644
49927--- a/drivers/pci/msi.c
49928+++ b/drivers/pci/msi.c
49929@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49930 {
49931 struct attribute **msi_attrs;
49932 struct attribute *msi_attr;
49933- struct device_attribute *msi_dev_attr;
49934- struct attribute_group *msi_irq_group;
49935+ device_attribute_no_const *msi_dev_attr;
49936+ attribute_group_no_const *msi_irq_group;
49937 const struct attribute_group **msi_irq_groups;
49938 struct msi_desc *entry;
49939 int ret = -ENOMEM;
49940@@ -573,7 +573,7 @@ error_attrs:
49941 count = 0;
49942 msi_attr = msi_attrs[count];
49943 while (msi_attr) {
49944- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49945+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49946 kfree(msi_attr->name);
49947 kfree(msi_dev_attr);
49948 ++count;
49949diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49950index 312f23a..d21181c 100644
49951--- a/drivers/pci/pci-sysfs.c
49952+++ b/drivers/pci/pci-sysfs.c
49953@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49954 {
49955 /* allocate attribute structure, piggyback attribute name */
49956 int name_len = write_combine ? 13 : 10;
49957- struct bin_attribute *res_attr;
49958+ bin_attribute_no_const *res_attr;
49959 int retval;
49960
49961 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49962@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49963 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49964 {
49965 int retval;
49966- struct bin_attribute *attr;
49967+ bin_attribute_no_const *attr;
49968
49969 /* If the device has VPD, try to expose it in sysfs. */
49970 if (dev->vpd) {
49971@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49972 {
49973 int retval;
49974 int rom_size = 0;
49975- struct bin_attribute *attr;
49976+ bin_attribute_no_const *attr;
49977
49978 if (!sysfs_initialized)
49979 return -EACCES;
49980diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49981index 4091f82..7d98eef 100644
49982--- a/drivers/pci/pci.h
49983+++ b/drivers/pci/pci.h
49984@@ -99,7 +99,7 @@ struct pci_vpd_ops {
49985 struct pci_vpd {
49986 unsigned int len;
49987 const struct pci_vpd_ops *ops;
49988- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49989+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49990 };
49991
49992 int pci_vpd_pci22_init(struct pci_dev *dev);
49993diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49994index 820740a..8b1c673 100644
49995--- a/drivers/pci/pcie/aspm.c
49996+++ b/drivers/pci/pcie/aspm.c
49997@@ -27,9 +27,9 @@
49998 #define MODULE_PARAM_PREFIX "pcie_aspm."
49999
50000 /* Note: those are not register definitions */
50001-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50002-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50003-#define ASPM_STATE_L1 (4) /* L1 state */
50004+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50005+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50006+#define ASPM_STATE_L1 (4U) /* L1 state */
50007 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50008 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50009
50010diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
50011index be35da2..ec16cdb 100644
50012--- a/drivers/pci/pcie/portdrv_pci.c
50013+++ b/drivers/pci/pcie/portdrv_pci.c
50014@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
50015 return 0;
50016 }
50017
50018-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
50019+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
50020 /*
50021 * Boxes that should not use MSI for PCIe PME signaling.
50022 */
50023diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50024index 8d2f400..c97cc91 100644
50025--- a/drivers/pci/probe.c
50026+++ b/drivers/pci/probe.c
50027@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50028 u16 orig_cmd;
50029 struct pci_bus_region region, inverted_region;
50030
50031- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50032+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50033
50034 /* No printks while decoding is disabled! */
50035 if (!dev->mmio_always_on) {
50036diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50037index 3f155e7..0f4b1f0 100644
50038--- a/drivers/pci/proc.c
50039+++ b/drivers/pci/proc.c
50040@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50041 static int __init pci_proc_init(void)
50042 {
50043 struct pci_dev *dev = NULL;
50044+
50045+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50046+#ifdef CONFIG_GRKERNSEC_PROC_USER
50047+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50048+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50049+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50050+#endif
50051+#else
50052 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50053+#endif
50054 proc_create("devices", 0, proc_bus_pci_dir,
50055 &proc_bus_pci_dev_operations);
50056 proc_initialized = 1;
50057diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50058index b84fdd6..b89d829 100644
50059--- a/drivers/platform/chrome/chromeos_laptop.c
50060+++ b/drivers/platform/chrome/chromeos_laptop.c
50061@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50062 .callback = chromeos_laptop_dmi_matched, \
50063 .driver_data = (void *)&board_
50064
50065-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50066+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50067 {
50068 .ident = "Samsung Series 5 550",
50069 .matches = {
50070diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
50071index 3474920..acc9581 100644
50072--- a/drivers/platform/chrome/chromeos_pstore.c
50073+++ b/drivers/platform/chrome/chromeos_pstore.c
50074@@ -13,7 +13,7 @@
50075 #include <linux/platform_device.h>
50076 #include <linux/pstore_ram.h>
50077
50078-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
50079+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
50080 {
50081 /*
50082 * Today all Chromebooks/boxes ship with Google_* as version and
50083diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50084index 1e1e594..8fe59c5 100644
50085--- a/drivers/platform/x86/alienware-wmi.c
50086+++ b/drivers/platform/x86/alienware-wmi.c
50087@@ -150,7 +150,7 @@ struct wmax_led_args {
50088 } __packed;
50089
50090 static struct platform_device *platform_device;
50091-static struct device_attribute *zone_dev_attrs;
50092+static device_attribute_no_const *zone_dev_attrs;
50093 static struct attribute **zone_attrs;
50094 static struct platform_zone *zone_data;
50095
50096@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50097 }
50098 };
50099
50100-static struct attribute_group zone_attribute_group = {
50101+static attribute_group_no_const zone_attribute_group = {
50102 .name = "rgb_zones",
50103 };
50104
50105diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50106index 7543a56..367ca8ed 100644
50107--- a/drivers/platform/x86/asus-wmi.c
50108+++ b/drivers/platform/x86/asus-wmi.c
50109@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50110 int err;
50111 u32 retval = -1;
50112
50113+#ifdef CONFIG_GRKERNSEC_KMEM
50114+ return -EPERM;
50115+#endif
50116+
50117 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50118
50119 if (err < 0)
50120@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50121 int err;
50122 u32 retval = -1;
50123
50124+#ifdef CONFIG_GRKERNSEC_KMEM
50125+ return -EPERM;
50126+#endif
50127+
50128 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50129 &retval);
50130
50131@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
50132 union acpi_object *obj;
50133 acpi_status status;
50134
50135+#ifdef CONFIG_GRKERNSEC_KMEM
50136+ return -EPERM;
50137+#endif
50138+
50139 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50140 1, asus->debug.method_id,
50141 &input, &output);
50142diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
50143index 15c0fab..f674006 100644
50144--- a/drivers/platform/x86/compal-laptop.c
50145+++ b/drivers/platform/x86/compal-laptop.c
50146@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
50147 return 1;
50148 }
50149
50150-static struct dmi_system_id __initdata compal_dmi_table[] = {
50151+static const struct dmi_system_id __initconst compal_dmi_table[] = {
50152 {
50153 .ident = "FL90/IFL90",
50154 .matches = {
50155diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
50156index 458e6c9..089aee7 100644
50157--- a/drivers/platform/x86/hdaps.c
50158+++ b/drivers/platform/x86/hdaps.c
50159@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
50160 "ThinkPad T42p", so the order of the entries matters.
50161 If your ThinkPad is not recognized, please update to latest
50162 BIOS. This is especially the case for some R52 ThinkPads. */
50163-static struct dmi_system_id __initdata hdaps_whitelist[] = {
50164+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
50165 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
50166 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
50167 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
50168diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
50169index 97c2be1..2ee50ce 100644
50170--- a/drivers/platform/x86/ibm_rtl.c
50171+++ b/drivers/platform/x86/ibm_rtl.c
50172@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
50173 }
50174
50175
50176-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
50177+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
50178 { \
50179 .matches = { \
50180 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
50181diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
50182index a4a4258..a58a04c 100644
50183--- a/drivers/platform/x86/intel_oaktrail.c
50184+++ b/drivers/platform/x86/intel_oaktrail.c
50185@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
50186 return 0;
50187 }
50188
50189-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
50190+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
50191 {
50192 .ident = "OakTrail platform",
50193 .matches = {
50194diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50195index 0859877..59d596d 100644
50196--- a/drivers/platform/x86/msi-laptop.c
50197+++ b/drivers/platform/x86/msi-laptop.c
50198@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
50199 return 1;
50200 }
50201
50202-static struct dmi_system_id __initdata msi_dmi_table[] = {
50203+static const struct dmi_system_id __initconst msi_dmi_table[] = {
50204 {
50205 .ident = "MSI S270",
50206 .matches = {
50207@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50208
50209 if (!quirks->ec_read_only) {
50210 /* allow userland write sysfs file */
50211- dev_attr_bluetooth.store = store_bluetooth;
50212- dev_attr_wlan.store = store_wlan;
50213- dev_attr_threeg.store = store_threeg;
50214- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50215- dev_attr_wlan.attr.mode |= S_IWUSR;
50216- dev_attr_threeg.attr.mode |= S_IWUSR;
50217+ pax_open_kernel();
50218+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50219+ *(void **)&dev_attr_wlan.store = store_wlan;
50220+ *(void **)&dev_attr_threeg.store = store_threeg;
50221+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50222+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50223+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50224+ pax_close_kernel();
50225 }
50226
50227 /* disable hardware control by fn key */
50228diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50229index 6d2bac0..ec2b029 100644
50230--- a/drivers/platform/x86/msi-wmi.c
50231+++ b/drivers/platform/x86/msi-wmi.c
50232@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50233 static void msi_wmi_notify(u32 value, void *context)
50234 {
50235 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50236- static struct key_entry *key;
50237+ struct key_entry *key;
50238 union acpi_object *obj;
50239 acpi_status status;
50240
50241diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
50242index 9e701b2..c68a7b5 100644
50243--- a/drivers/platform/x86/samsung-laptop.c
50244+++ b/drivers/platform/x86/samsung-laptop.c
50245@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
50246 return 0;
50247 }
50248
50249-static struct dmi_system_id __initdata samsung_dmi_table[] = {
50250+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
50251 {
50252 .matches = {
50253 DMI_MATCH(DMI_SYS_VENDOR,
50254diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
50255index e6aac72..e11ff24 100644
50256--- a/drivers/platform/x86/samsung-q10.c
50257+++ b/drivers/platform/x86/samsung-q10.c
50258@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
50259 return 1;
50260 }
50261
50262-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
50263+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
50264 {
50265 .ident = "Samsung Q10",
50266 .matches = {
50267diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50268index e51c1e7..71bb385 100644
50269--- a/drivers/platform/x86/sony-laptop.c
50270+++ b/drivers/platform/x86/sony-laptop.c
50271@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50272 }
50273
50274 /* High speed charging function */
50275-static struct device_attribute *hsc_handle;
50276+static device_attribute_no_const *hsc_handle;
50277
50278 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50279 struct device_attribute *attr,
50280@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50281 }
50282
50283 /* low battery function */
50284-static struct device_attribute *lowbatt_handle;
50285+static device_attribute_no_const *lowbatt_handle;
50286
50287 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50288 struct device_attribute *attr,
50289@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50290 }
50291
50292 /* fan speed function */
50293-static struct device_attribute *fan_handle, *hsf_handle;
50294+static device_attribute_no_const *fan_handle, *hsf_handle;
50295
50296 static ssize_t sony_nc_hsfan_store(struct device *dev,
50297 struct device_attribute *attr,
50298@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50299 }
50300
50301 /* USB charge function */
50302-static struct device_attribute *uc_handle;
50303+static device_attribute_no_const *uc_handle;
50304
50305 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50306 struct device_attribute *attr,
50307@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50308 }
50309
50310 /* Panel ID function */
50311-static struct device_attribute *panel_handle;
50312+static device_attribute_no_const *panel_handle;
50313
50314 static ssize_t sony_nc_panelid_show(struct device *dev,
50315 struct device_attribute *attr, char *buffer)
50316@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50317 }
50318
50319 /* smart connect function */
50320-static struct device_attribute *sc_handle;
50321+static device_attribute_no_const *sc_handle;
50322
50323 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50324 struct device_attribute *attr,
50325@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
50326 .drv.pm = &sony_pic_pm,
50327 };
50328
50329-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
50330+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
50331 {
50332 .ident = "Sony Vaio",
50333 .matches = {
50334diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50335index 3b8ceee..e18652c 100644
50336--- a/drivers/platform/x86/thinkpad_acpi.c
50337+++ b/drivers/platform/x86/thinkpad_acpi.c
50338@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
50339 return 0;
50340 }
50341
50342-void static hotkey_mask_warn_incomplete_mask(void)
50343+static void hotkey_mask_warn_incomplete_mask(void)
50344 {
50345 /* log only what the user can fix... */
50346 const u32 wantedmask = hotkey_driver_mask &
50347@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50348 && !tp_features.bright_unkfw)
50349 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50350 }
50351+}
50352
50353 #undef TPACPI_COMPARE_KEY
50354 #undef TPACPI_MAY_SEND_KEY
50355-}
50356
50357 /*
50358 * Polling driver
50359diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50360index 438d4c7..ca8a2fb 100644
50361--- a/drivers/pnp/pnpbios/bioscalls.c
50362+++ b/drivers/pnp/pnpbios/bioscalls.c
50363@@ -59,7 +59,7 @@ do { \
50364 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50365 } while(0)
50366
50367-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50368+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50369 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50370
50371 /*
50372@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50373
50374 cpu = get_cpu();
50375 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50376+
50377+ pax_open_kernel();
50378 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50379+ pax_close_kernel();
50380
50381 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50382 spin_lock_irqsave(&pnp_bios_lock, flags);
50383@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50384 :"memory");
50385 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50386
50387+ pax_open_kernel();
50388 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50389+ pax_close_kernel();
50390+
50391 put_cpu();
50392
50393 /* If we get here and this is set then the PnP BIOS faulted on us. */
50394@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50395 return status;
50396 }
50397
50398-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50399+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50400 {
50401 int i;
50402
50403@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50404 pnp_bios_callpoint.offset = header->fields.pm16offset;
50405 pnp_bios_callpoint.segment = PNP_CS16;
50406
50407+ pax_open_kernel();
50408+
50409 for_each_possible_cpu(i) {
50410 struct desc_struct *gdt = get_cpu_gdt_table(i);
50411 if (!gdt)
50412@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50413 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50414 (unsigned long)__va(header->fields.pm16dseg));
50415 }
50416+
50417+ pax_close_kernel();
50418 }
50419diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
50420index facd43b..b291260 100644
50421--- a/drivers/pnp/pnpbios/core.c
50422+++ b/drivers/pnp/pnpbios/core.c
50423@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
50424 return 0;
50425 }
50426
50427-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
50428+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
50429 { /* PnPBIOS GPF on boot */
50430 .callback = exploding_pnp_bios,
50431 .ident = "Higraded P14H",
50432diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50433index 0c52e2a..3421ab7 100644
50434--- a/drivers/power/pda_power.c
50435+++ b/drivers/power/pda_power.c
50436@@ -37,7 +37,11 @@ static int polling;
50437
50438 #if IS_ENABLED(CONFIG_USB_PHY)
50439 static struct usb_phy *transceiver;
50440-static struct notifier_block otg_nb;
50441+static int otg_handle_notification(struct notifier_block *nb,
50442+ unsigned long event, void *unused);
50443+static struct notifier_block otg_nb = {
50444+ .notifier_call = otg_handle_notification
50445+};
50446 #endif
50447
50448 static struct regulator *ac_draw;
50449@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50450
50451 #if IS_ENABLED(CONFIG_USB_PHY)
50452 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50453- otg_nb.notifier_call = otg_handle_notification;
50454 ret = usb_register_notifier(transceiver, &otg_nb);
50455 if (ret) {
50456 dev_err(dev, "failure to register otg notifier\n");
50457diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50458index cc439fd..8fa30df 100644
50459--- a/drivers/power/power_supply.h
50460+++ b/drivers/power/power_supply.h
50461@@ -16,12 +16,12 @@ struct power_supply;
50462
50463 #ifdef CONFIG_SYSFS
50464
50465-extern void power_supply_init_attrs(struct device_type *dev_type);
50466+extern void power_supply_init_attrs(void);
50467 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50468
50469 #else
50470
50471-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50472+static inline void power_supply_init_attrs(void) {}
50473 #define power_supply_uevent NULL
50474
50475 #endif /* CONFIG_SYSFS */
50476diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50477index 694e8cd..9f03483 100644
50478--- a/drivers/power/power_supply_core.c
50479+++ b/drivers/power/power_supply_core.c
50480@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50481 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50482 EXPORT_SYMBOL_GPL(power_supply_notifier);
50483
50484-static struct device_type power_supply_dev_type;
50485+extern const struct attribute_group *power_supply_attr_groups[];
50486+static struct device_type power_supply_dev_type = {
50487+ .groups = power_supply_attr_groups,
50488+};
50489
50490 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50491 struct power_supply *supply)
50492@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50493 return PTR_ERR(power_supply_class);
50494
50495 power_supply_class->dev_uevent = power_supply_uevent;
50496- power_supply_init_attrs(&power_supply_dev_type);
50497+ power_supply_init_attrs();
50498
50499 return 0;
50500 }
50501diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50502index 62653f5..d0bb485 100644
50503--- a/drivers/power/power_supply_sysfs.c
50504+++ b/drivers/power/power_supply_sysfs.c
50505@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50506 .is_visible = power_supply_attr_is_visible,
50507 };
50508
50509-static const struct attribute_group *power_supply_attr_groups[] = {
50510+const struct attribute_group *power_supply_attr_groups[] = {
50511 &power_supply_attr_group,
50512 NULL,
50513 };
50514
50515-void power_supply_init_attrs(struct device_type *dev_type)
50516+void power_supply_init_attrs(void)
50517 {
50518 int i;
50519
50520- dev_type->groups = power_supply_attr_groups;
50521-
50522 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50523 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50524 }
50525diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50526index 84419af..268ede8 100644
50527--- a/drivers/powercap/powercap_sys.c
50528+++ b/drivers/powercap/powercap_sys.c
50529@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50530 struct device_attribute name_attr;
50531 };
50532
50533+static ssize_t show_constraint_name(struct device *dev,
50534+ struct device_attribute *dev_attr,
50535+ char *buf);
50536+
50537 static struct powercap_constraint_attr
50538- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50539+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50540+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50541+ .power_limit_attr = {
50542+ .attr = {
50543+ .name = NULL,
50544+ .mode = S_IWUSR | S_IRUGO
50545+ },
50546+ .show = show_constraint_power_limit_uw,
50547+ .store = store_constraint_power_limit_uw
50548+ },
50549+
50550+ .time_window_attr = {
50551+ .attr = {
50552+ .name = NULL,
50553+ .mode = S_IWUSR | S_IRUGO
50554+ },
50555+ .show = show_constraint_time_window_us,
50556+ .store = store_constraint_time_window_us
50557+ },
50558+
50559+ .max_power_attr = {
50560+ .attr = {
50561+ .name = NULL,
50562+ .mode = S_IRUGO
50563+ },
50564+ .show = show_constraint_max_power_uw,
50565+ .store = NULL
50566+ },
50567+
50568+ .min_power_attr = {
50569+ .attr = {
50570+ .name = NULL,
50571+ .mode = S_IRUGO
50572+ },
50573+ .show = show_constraint_min_power_uw,
50574+ .store = NULL
50575+ },
50576+
50577+ .max_time_window_attr = {
50578+ .attr = {
50579+ .name = NULL,
50580+ .mode = S_IRUGO
50581+ },
50582+ .show = show_constraint_max_time_window_us,
50583+ .store = NULL
50584+ },
50585+
50586+ .min_time_window_attr = {
50587+ .attr = {
50588+ .name = NULL,
50589+ .mode = S_IRUGO
50590+ },
50591+ .show = show_constraint_min_time_window_us,
50592+ .store = NULL
50593+ },
50594+
50595+ .name_attr = {
50596+ .attr = {
50597+ .name = NULL,
50598+ .mode = S_IRUGO
50599+ },
50600+ .show = show_constraint_name,
50601+ .store = NULL
50602+ }
50603+ }
50604+};
50605
50606 /* A list of powercap control_types */
50607 static LIST_HEAD(powercap_cntrl_list);
50608@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50609 }
50610
50611 static int create_constraint_attribute(int id, const char *name,
50612- int mode,
50613- struct device_attribute *dev_attr,
50614- ssize_t (*show)(struct device *,
50615- struct device_attribute *, char *),
50616- ssize_t (*store)(struct device *,
50617- struct device_attribute *,
50618- const char *, size_t)
50619- )
50620+ struct device_attribute *dev_attr)
50621 {
50622+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50623
50624- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50625- id, name);
50626- if (!dev_attr->attr.name)
50627+ if (!name)
50628 return -ENOMEM;
50629- dev_attr->attr.mode = mode;
50630- dev_attr->show = show;
50631- dev_attr->store = store;
50632+
50633+ pax_open_kernel();
50634+ *(const char **)&dev_attr->attr.name = name;
50635+ pax_close_kernel();
50636
50637 return 0;
50638 }
50639@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50640
50641 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50642 ret = create_constraint_attribute(i, "power_limit_uw",
50643- S_IWUSR | S_IRUGO,
50644- &constraint_attrs[i].power_limit_attr,
50645- show_constraint_power_limit_uw,
50646- store_constraint_power_limit_uw);
50647+ &constraint_attrs[i].power_limit_attr);
50648 if (ret)
50649 goto err_alloc;
50650 ret = create_constraint_attribute(i, "time_window_us",
50651- S_IWUSR | S_IRUGO,
50652- &constraint_attrs[i].time_window_attr,
50653- show_constraint_time_window_us,
50654- store_constraint_time_window_us);
50655+ &constraint_attrs[i].time_window_attr);
50656 if (ret)
50657 goto err_alloc;
50658- ret = create_constraint_attribute(i, "name", S_IRUGO,
50659- &constraint_attrs[i].name_attr,
50660- show_constraint_name,
50661- NULL);
50662+ ret = create_constraint_attribute(i, "name",
50663+ &constraint_attrs[i].name_attr);
50664 if (ret)
50665 goto err_alloc;
50666- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50667- &constraint_attrs[i].max_power_attr,
50668- show_constraint_max_power_uw,
50669- NULL);
50670+ ret = create_constraint_attribute(i, "max_power_uw",
50671+ &constraint_attrs[i].max_power_attr);
50672 if (ret)
50673 goto err_alloc;
50674- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50675- &constraint_attrs[i].min_power_attr,
50676- show_constraint_min_power_uw,
50677- NULL);
50678+ ret = create_constraint_attribute(i, "min_power_uw",
50679+ &constraint_attrs[i].min_power_attr);
50680 if (ret)
50681 goto err_alloc;
50682 ret = create_constraint_attribute(i, "max_time_window_us",
50683- S_IRUGO,
50684- &constraint_attrs[i].max_time_window_attr,
50685- show_constraint_max_time_window_us,
50686- NULL);
50687+ &constraint_attrs[i].max_time_window_attr);
50688 if (ret)
50689 goto err_alloc;
50690 ret = create_constraint_attribute(i, "min_time_window_us",
50691- S_IRUGO,
50692- &constraint_attrs[i].min_time_window_attr,
50693- show_constraint_min_time_window_us,
50694- NULL);
50695+ &constraint_attrs[i].min_time_window_attr);
50696 if (ret)
50697 goto err_alloc;
50698
50699@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50700 power_zone->zone_dev_attrs[count++] =
50701 &dev_attr_max_energy_range_uj.attr;
50702 if (power_zone->ops->get_energy_uj) {
50703+ pax_open_kernel();
50704 if (power_zone->ops->reset_energy_uj)
50705- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50706+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50707 else
50708- dev_attr_energy_uj.attr.mode = S_IRUGO;
50709+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50710+ pax_close_kernel();
50711 power_zone->zone_dev_attrs[count++] =
50712 &dev_attr_energy_uj.attr;
50713 }
50714diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50715index 9c5d414..c7900ce 100644
50716--- a/drivers/ptp/ptp_private.h
50717+++ b/drivers/ptp/ptp_private.h
50718@@ -51,7 +51,7 @@ struct ptp_clock {
50719 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50720 wait_queue_head_t tsev_wq;
50721 int defunct; /* tells readers to go away when clock is being removed */
50722- struct device_attribute *pin_dev_attr;
50723+ device_attribute_no_const *pin_dev_attr;
50724 struct attribute **pin_attr;
50725 struct attribute_group pin_attr_group;
50726 };
50727diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50728index 302e626..12579af 100644
50729--- a/drivers/ptp/ptp_sysfs.c
50730+++ b/drivers/ptp/ptp_sysfs.c
50731@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50732 goto no_pin_attr;
50733
50734 for (i = 0; i < n_pins; i++) {
50735- struct device_attribute *da = &ptp->pin_dev_attr[i];
50736+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50737 sysfs_attr_init(&da->attr);
50738 da->attr.name = info->pin_config[i].name;
50739 da->attr.mode = 0644;
50740diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50741index a4a8a6d..a3456f4 100644
50742--- a/drivers/regulator/core.c
50743+++ b/drivers/regulator/core.c
50744@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50745 const struct regulation_constraints *constraints = NULL;
50746 const struct regulator_init_data *init_data;
50747 struct regulator_config *config = NULL;
50748- static atomic_t regulator_no = ATOMIC_INIT(-1);
50749+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
50750 struct regulator_dev *rdev;
50751 struct device *dev;
50752 int ret, i;
50753@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50754 rdev->dev.class = &regulator_class;
50755 rdev->dev.parent = dev;
50756 dev_set_name(&rdev->dev, "regulator.%lu",
50757- (unsigned long) atomic_inc_return(&regulator_no));
50758+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
50759 ret = device_register(&rdev->dev);
50760 if (ret != 0) {
50761 put_device(&rdev->dev);
50762diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50763index 7eee2ca..4024513 100644
50764--- a/drivers/regulator/max8660.c
50765+++ b/drivers/regulator/max8660.c
50766@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50767 max8660->shadow_regs[MAX8660_OVER1] = 5;
50768 } else {
50769 /* Otherwise devices can be toggled via software */
50770- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50771- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50772+ pax_open_kernel();
50773+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50774+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50775+ pax_close_kernel();
50776 }
50777
50778 /*
50779diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50780index c3d55c2..0dddfe6 100644
50781--- a/drivers/regulator/max8973-regulator.c
50782+++ b/drivers/regulator/max8973-regulator.c
50783@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50784 if (!pdata || !pdata->enable_ext_control) {
50785 max->desc.enable_reg = MAX8973_VOUT;
50786 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50787- max->ops.enable = regulator_enable_regmap;
50788- max->ops.disable = regulator_disable_regmap;
50789- max->ops.is_enabled = regulator_is_enabled_regmap;
50790+ pax_open_kernel();
50791+ *(void **)&max->ops.enable = regulator_enable_regmap;
50792+ *(void **)&max->ops.disable = regulator_disable_regmap;
50793+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50794+ pax_close_kernel();
50795 }
50796
50797 if (pdata) {
50798diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50799index 0d17c92..a29f627 100644
50800--- a/drivers/regulator/mc13892-regulator.c
50801+++ b/drivers/regulator/mc13892-regulator.c
50802@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50803 mc13xxx_unlock(mc13892);
50804
50805 /* update mc13892_vcam ops */
50806- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50807+ pax_open_kernel();
50808+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50809 sizeof(struct regulator_ops));
50810- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50811- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50812+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50813+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50814+ pax_close_kernel();
50815 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50816
50817 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50818diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50819index 5b2e761..c8c8a4a 100644
50820--- a/drivers/rtc/rtc-cmos.c
50821+++ b/drivers/rtc/rtc-cmos.c
50822@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50823 hpet_rtc_timer_init();
50824
50825 /* export at least the first block of NVRAM */
50826- nvram.size = address_space - NVRAM_OFFSET;
50827+ pax_open_kernel();
50828+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50829+ pax_close_kernel();
50830 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50831 if (retval < 0) {
50832 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50833diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50834index 799c34b..8e9786a 100644
50835--- a/drivers/rtc/rtc-dev.c
50836+++ b/drivers/rtc/rtc-dev.c
50837@@ -16,6 +16,7 @@
50838 #include <linux/module.h>
50839 #include <linux/rtc.h>
50840 #include <linux/sched.h>
50841+#include <linux/grsecurity.h>
50842 #include "rtc-core.h"
50843
50844 static dev_t rtc_devt;
50845@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50846 if (copy_from_user(&tm, uarg, sizeof(tm)))
50847 return -EFAULT;
50848
50849+ gr_log_timechange();
50850+
50851 return rtc_set_time(rtc, &tm);
50852
50853 case RTC_PIE_ON:
50854diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50855index 4ffabb3..1f87fca 100644
50856--- a/drivers/rtc/rtc-ds1307.c
50857+++ b/drivers/rtc/rtc-ds1307.c
50858@@ -107,7 +107,7 @@ struct ds1307 {
50859 u8 offset; /* register's offset */
50860 u8 regs[11];
50861 u16 nvram_offset;
50862- struct bin_attribute *nvram;
50863+ bin_attribute_no_const *nvram;
50864 enum ds_type type;
50865 unsigned long flags;
50866 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50867diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50868index 90abb5b..e0bf6dd 100644
50869--- a/drivers/rtc/rtc-m48t59.c
50870+++ b/drivers/rtc/rtc-m48t59.c
50871@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50872 if (IS_ERR(m48t59->rtc))
50873 return PTR_ERR(m48t59->rtc);
50874
50875- m48t59_nvram_attr.size = pdata->offset;
50876+ pax_open_kernel();
50877+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50878+ pax_close_kernel();
50879
50880 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50881 if (ret)
50882diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50883index e693af6..2e525b6 100644
50884--- a/drivers/scsi/bfa/bfa_fcpim.h
50885+++ b/drivers/scsi/bfa/bfa_fcpim.h
50886@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50887
50888 struct bfa_itn_s {
50889 bfa_isr_func_t isr;
50890-};
50891+} __no_const;
50892
50893 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50894 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50895diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50896index 0f19455..ef7adb5 100644
50897--- a/drivers/scsi/bfa/bfa_fcs.c
50898+++ b/drivers/scsi/bfa/bfa_fcs.c
50899@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50900 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50901
50902 static struct bfa_fcs_mod_s fcs_modules[] = {
50903- { bfa_fcs_port_attach, NULL, NULL },
50904- { bfa_fcs_uf_attach, NULL, NULL },
50905- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50906- bfa_fcs_fabric_modexit },
50907+ {
50908+ .attach = bfa_fcs_port_attach,
50909+ .modinit = NULL,
50910+ .modexit = NULL
50911+ },
50912+ {
50913+ .attach = bfa_fcs_uf_attach,
50914+ .modinit = NULL,
50915+ .modexit = NULL
50916+ },
50917+ {
50918+ .attach = bfa_fcs_fabric_attach,
50919+ .modinit = bfa_fcs_fabric_modinit,
50920+ .modexit = bfa_fcs_fabric_modexit
50921+ },
50922 };
50923
50924 /*
50925diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50926index ff75ef8..2dfe00a 100644
50927--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50928+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50929@@ -89,15 +89,26 @@ static struct {
50930 void (*offline) (struct bfa_fcs_lport_s *port);
50931 } __port_action[] = {
50932 {
50933- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50934- bfa_fcs_lport_unknown_offline}, {
50935- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50936- bfa_fcs_lport_fab_offline}, {
50937- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50938- bfa_fcs_lport_n2n_offline}, {
50939- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50940- bfa_fcs_lport_loop_offline},
50941- };
50942+ .init = bfa_fcs_lport_unknown_init,
50943+ .online = bfa_fcs_lport_unknown_online,
50944+ .offline = bfa_fcs_lport_unknown_offline
50945+ },
50946+ {
50947+ .init = bfa_fcs_lport_fab_init,
50948+ .online = bfa_fcs_lport_fab_online,
50949+ .offline = bfa_fcs_lport_fab_offline
50950+ },
50951+ {
50952+ .init = bfa_fcs_lport_n2n_init,
50953+ .online = bfa_fcs_lport_n2n_online,
50954+ .offline = bfa_fcs_lport_n2n_offline
50955+ },
50956+ {
50957+ .init = bfa_fcs_lport_loop_init,
50958+ .online = bfa_fcs_lport_loop_online,
50959+ .offline = bfa_fcs_lport_loop_offline
50960+ },
50961+};
50962
50963 /*
50964 * fcs_port_sm FCS logical port state machine
50965diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50966index a38aafa0..fe8f03b 100644
50967--- a/drivers/scsi/bfa/bfa_ioc.h
50968+++ b/drivers/scsi/bfa/bfa_ioc.h
50969@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50970 bfa_ioc_disable_cbfn_t disable_cbfn;
50971 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50972 bfa_ioc_reset_cbfn_t reset_cbfn;
50973-};
50974+} __no_const;
50975
50976 /*
50977 * IOC event notification mechanism.
50978@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50979 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50980 enum bfi_ioc_state fwstate);
50981 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50982-};
50983+} __no_const;
50984
50985 /*
50986 * Queue element to wait for room in request queue. FIFO order is
50987diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50988index a14c784..6de6790 100644
50989--- a/drivers/scsi/bfa/bfa_modules.h
50990+++ b/drivers/scsi/bfa/bfa_modules.h
50991@@ -78,12 +78,12 @@ enum {
50992 \
50993 extern struct bfa_module_s hal_mod_ ## __mod; \
50994 struct bfa_module_s hal_mod_ ## __mod = { \
50995- bfa_ ## __mod ## _meminfo, \
50996- bfa_ ## __mod ## _attach, \
50997- bfa_ ## __mod ## _detach, \
50998- bfa_ ## __mod ## _start, \
50999- bfa_ ## __mod ## _stop, \
51000- bfa_ ## __mod ## _iocdisable, \
51001+ .meminfo = bfa_ ## __mod ## _meminfo, \
51002+ .attach = bfa_ ## __mod ## _attach, \
51003+ .detach = bfa_ ## __mod ## _detach, \
51004+ .start = bfa_ ## __mod ## _start, \
51005+ .stop = bfa_ ## __mod ## _stop, \
51006+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51007 }
51008
51009 #define BFA_CACHELINE_SZ (256)
51010diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51011index 045c4e1..13de803 100644
51012--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51013+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51014@@ -33,8 +33,8 @@
51015 */
51016 #include "libfcoe.h"
51017
51018-static atomic_t ctlr_num;
51019-static atomic_t fcf_num;
51020+static atomic_unchecked_t ctlr_num;
51021+static atomic_unchecked_t fcf_num;
51022
51023 /*
51024 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51025@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51026 if (!ctlr)
51027 goto out;
51028
51029- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51030+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51031 ctlr->f = f;
51032 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51033 INIT_LIST_HEAD(&ctlr->fcfs);
51034@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51035 fcf->dev.parent = &ctlr->dev;
51036 fcf->dev.bus = &fcoe_bus_type;
51037 fcf->dev.type = &fcoe_fcf_device_type;
51038- fcf->id = atomic_inc_return(&fcf_num) - 1;
51039+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51040 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51041
51042 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51043@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51044 {
51045 int error;
51046
51047- atomic_set(&ctlr_num, 0);
51048- atomic_set(&fcf_num, 0);
51049+ atomic_set_unchecked(&ctlr_num, 0);
51050+ atomic_set_unchecked(&fcf_num, 0);
51051
51052 error = bus_register(&fcoe_bus_type);
51053 if (error)
51054diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51055index 8bb173e..20236b4 100644
51056--- a/drivers/scsi/hosts.c
51057+++ b/drivers/scsi/hosts.c
51058@@ -42,7 +42,7 @@
51059 #include "scsi_logging.h"
51060
51061
51062-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51063+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51064
51065
51066 static void scsi_host_cls_release(struct device *dev)
51067@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51068 * subtract one because we increment first then return, but we need to
51069 * know what the next host number was before increment
51070 */
51071- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51072+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51073 shost->dma_channel = 0xff;
51074
51075 /* These three are default values which can be overridden */
51076diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51077index a1cfbd3..d7f8ebc 100644
51078--- a/drivers/scsi/hpsa.c
51079+++ b/drivers/scsi/hpsa.c
51080@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51081 struct reply_queue_buffer *rq = &h->reply_queue[q];
51082
51083 if (h->transMethod & CFGTBL_Trans_io_accel1)
51084- return h->access.command_completed(h, q);
51085+ return h->access->command_completed(h, q);
51086
51087 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51088- return h->access.command_completed(h, q);
51089+ return h->access->command_completed(h, q);
51090
51091 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51092 a = rq->head[rq->current_entry];
51093@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
51094 break;
51095 default:
51096 set_performant_mode(h, c);
51097- h->access.submit_command(h, c);
51098+ h->access->submit_command(h, c);
51099 }
51100 }
51101
51102@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
51103
51104 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51105 {
51106- return h->access.command_completed(h, q);
51107+ return h->access->command_completed(h, q);
51108 }
51109
51110 static inline bool interrupt_pending(struct ctlr_info *h)
51111 {
51112- return h->access.intr_pending(h);
51113+ return h->access->intr_pending(h);
51114 }
51115
51116 static inline long interrupt_not_for_us(struct ctlr_info *h)
51117 {
51118- return (h->access.intr_pending(h) == 0) ||
51119+ return (h->access->intr_pending(h) == 0) ||
51120 (h->interrupts_enabled == 0);
51121 }
51122
51123@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
51124 if (prod_index < 0)
51125 return prod_index;
51126 h->product_name = products[prod_index].product_name;
51127- h->access = *(products[prod_index].access);
51128+ h->access = products[prod_index].access;
51129
51130 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
51131 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
51132@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
51133 unsigned long flags;
51134 u32 lockup_detected;
51135
51136- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51137+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51138 spin_lock_irqsave(&h->lock, flags);
51139 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
51140 if (!lockup_detected) {
51141@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
51142 }
51143
51144 /* make sure the board interrupts are off */
51145- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51146+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51147
51148 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51149 goto clean2;
51150@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
51151 * fake ones to scoop up any residual completions.
51152 */
51153 spin_lock_irqsave(&h->lock, flags);
51154- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51155+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51156 spin_unlock_irqrestore(&h->lock, flags);
51157 hpsa_free_irqs(h);
51158 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
51159@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
51160 dev_info(&h->pdev->dev, "Board READY.\n");
51161 dev_info(&h->pdev->dev,
51162 "Waiting for stale completions to drain.\n");
51163- h->access.set_intr_mask(h, HPSA_INTR_ON);
51164+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51165 msleep(10000);
51166- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51167+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51168
51169 rc = controller_reset_failed(h->cfgtable);
51170 if (rc)
51171@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
51172
51173
51174 /* Turn the interrupts on so we can service requests */
51175- h->access.set_intr_mask(h, HPSA_INTR_ON);
51176+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51177
51178 hpsa_hba_inquiry(h);
51179 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51180@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51181 * To write all data in the battery backed cache to disks
51182 */
51183 hpsa_flush_cache(h);
51184- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51185+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51186 hpsa_free_irqs_and_disable_msix(h);
51187 }
51188
51189@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51190 CFGTBL_Trans_enable_directed_msix |
51191 (trans_support & (CFGTBL_Trans_io_accel1 |
51192 CFGTBL_Trans_io_accel2));
51193- struct access_method access = SA5_performant_access;
51194+ struct access_method *access = &SA5_performant_access;
51195
51196 /* This is a bit complicated. There are 8 registers on
51197 * the controller which we write to to tell it 8 different
51198@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51199 * perform the superfluous readl() after each command submission.
51200 */
51201 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51202- access = SA5_performant_access_no_read;
51203+ access = &SA5_performant_access_no_read;
51204
51205 /* Controller spec: zero out this buffer. */
51206 for (i = 0; i < h->nreply_queues; i++)
51207@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51208 * enable outbound interrupt coalescing in accelerator mode;
51209 */
51210 if (trans_support & CFGTBL_Trans_io_accel1) {
51211- access = SA5_ioaccel_mode1_access;
51212+ access = &SA5_ioaccel_mode1_access;
51213 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51214 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51215 } else {
51216 if (trans_support & CFGTBL_Trans_io_accel2) {
51217- access = SA5_ioaccel_mode2_access;
51218+ access = &SA5_ioaccel_mode2_access;
51219 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51220 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51221 }
51222diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51223index 6577130..955f9a4 100644
51224--- a/drivers/scsi/hpsa.h
51225+++ b/drivers/scsi/hpsa.h
51226@@ -143,7 +143,7 @@ struct ctlr_info {
51227 unsigned int msix_vector;
51228 unsigned int msi_vector;
51229 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51230- struct access_method access;
51231+ struct access_method *access;
51232 char hba_mode_enabled;
51233
51234 /* queue and queue Info */
51235@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51236 }
51237
51238 static struct access_method SA5_access = {
51239- SA5_submit_command,
51240- SA5_intr_mask,
51241- SA5_intr_pending,
51242- SA5_completed,
51243+ .submit_command = SA5_submit_command,
51244+ .set_intr_mask = SA5_intr_mask,
51245+ .intr_pending = SA5_intr_pending,
51246+ .command_completed = SA5_completed,
51247 };
51248
51249 static struct access_method SA5_ioaccel_mode1_access = {
51250- SA5_submit_command,
51251- SA5_performant_intr_mask,
51252- SA5_ioaccel_mode1_intr_pending,
51253- SA5_ioaccel_mode1_completed,
51254+ .submit_command = SA5_submit_command,
51255+ .set_intr_mask = SA5_performant_intr_mask,
51256+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51257+ .command_completed = SA5_ioaccel_mode1_completed,
51258 };
51259
51260 static struct access_method SA5_ioaccel_mode2_access = {
51261- SA5_submit_command_ioaccel2,
51262- SA5_performant_intr_mask,
51263- SA5_performant_intr_pending,
51264- SA5_performant_completed,
51265+ .submit_command = SA5_submit_command_ioaccel2,
51266+ .set_intr_mask = SA5_performant_intr_mask,
51267+ .intr_pending = SA5_performant_intr_pending,
51268+ .command_completed = SA5_performant_completed,
51269 };
51270
51271 static struct access_method SA5_performant_access = {
51272- SA5_submit_command,
51273- SA5_performant_intr_mask,
51274- SA5_performant_intr_pending,
51275- SA5_performant_completed,
51276+ .submit_command = SA5_submit_command,
51277+ .set_intr_mask = SA5_performant_intr_mask,
51278+ .intr_pending = SA5_performant_intr_pending,
51279+ .command_completed = SA5_performant_completed,
51280 };
51281
51282 static struct access_method SA5_performant_access_no_read = {
51283- SA5_submit_command_no_read,
51284- SA5_performant_intr_mask,
51285- SA5_performant_intr_pending,
51286- SA5_performant_completed,
51287+ .submit_command = SA5_submit_command_no_read,
51288+ .set_intr_mask = SA5_performant_intr_mask,
51289+ .intr_pending = SA5_performant_intr_pending,
51290+ .command_completed = SA5_performant_completed,
51291 };
51292
51293 struct board_type {
51294diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51295index 1b3a094..068e683 100644
51296--- a/drivers/scsi/libfc/fc_exch.c
51297+++ b/drivers/scsi/libfc/fc_exch.c
51298@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51299 u16 pool_max_index;
51300
51301 struct {
51302- atomic_t no_free_exch;
51303- atomic_t no_free_exch_xid;
51304- atomic_t xid_not_found;
51305- atomic_t xid_busy;
51306- atomic_t seq_not_found;
51307- atomic_t non_bls_resp;
51308+ atomic_unchecked_t no_free_exch;
51309+ atomic_unchecked_t no_free_exch_xid;
51310+ atomic_unchecked_t xid_not_found;
51311+ atomic_unchecked_t xid_busy;
51312+ atomic_unchecked_t seq_not_found;
51313+ atomic_unchecked_t non_bls_resp;
51314 } stats;
51315 };
51316
51317@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51318 /* allocate memory for exchange */
51319 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51320 if (!ep) {
51321- atomic_inc(&mp->stats.no_free_exch);
51322+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51323 goto out;
51324 }
51325 memset(ep, 0, sizeof(*ep));
51326@@ -874,7 +874,7 @@ out:
51327 return ep;
51328 err:
51329 spin_unlock_bh(&pool->lock);
51330- atomic_inc(&mp->stats.no_free_exch_xid);
51331+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51332 mempool_free(ep, mp->ep_pool);
51333 return NULL;
51334 }
51335@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51336 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51337 ep = fc_exch_find(mp, xid);
51338 if (!ep) {
51339- atomic_inc(&mp->stats.xid_not_found);
51340+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51341 reject = FC_RJT_OX_ID;
51342 goto out;
51343 }
51344@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51345 ep = fc_exch_find(mp, xid);
51346 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51347 if (ep) {
51348- atomic_inc(&mp->stats.xid_busy);
51349+ atomic_inc_unchecked(&mp->stats.xid_busy);
51350 reject = FC_RJT_RX_ID;
51351 goto rel;
51352 }
51353@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51354 }
51355 xid = ep->xid; /* get our XID */
51356 } else if (!ep) {
51357- atomic_inc(&mp->stats.xid_not_found);
51358+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51359 reject = FC_RJT_RX_ID; /* XID not found */
51360 goto out;
51361 }
51362@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51363 } else {
51364 sp = &ep->seq;
51365 if (sp->id != fh->fh_seq_id) {
51366- atomic_inc(&mp->stats.seq_not_found);
51367+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51368 if (f_ctl & FC_FC_END_SEQ) {
51369 /*
51370 * Update sequence_id based on incoming last
51371@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51372
51373 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51374 if (!ep) {
51375- atomic_inc(&mp->stats.xid_not_found);
51376+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51377 goto out;
51378 }
51379 if (ep->esb_stat & ESB_ST_COMPLETE) {
51380- atomic_inc(&mp->stats.xid_not_found);
51381+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51382 goto rel;
51383 }
51384 if (ep->rxid == FC_XID_UNKNOWN)
51385 ep->rxid = ntohs(fh->fh_rx_id);
51386 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51387- atomic_inc(&mp->stats.xid_not_found);
51388+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51389 goto rel;
51390 }
51391 if (ep->did != ntoh24(fh->fh_s_id) &&
51392 ep->did != FC_FID_FLOGI) {
51393- atomic_inc(&mp->stats.xid_not_found);
51394+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51395 goto rel;
51396 }
51397 sof = fr_sof(fp);
51398@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51399 sp->ssb_stat |= SSB_ST_RESP;
51400 sp->id = fh->fh_seq_id;
51401 } else if (sp->id != fh->fh_seq_id) {
51402- atomic_inc(&mp->stats.seq_not_found);
51403+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51404 goto rel;
51405 }
51406
51407@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51408 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51409
51410 if (!sp)
51411- atomic_inc(&mp->stats.xid_not_found);
51412+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51413 else
51414- atomic_inc(&mp->stats.non_bls_resp);
51415+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51416
51417 fc_frame_free(fp);
51418 }
51419@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51420
51421 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51422 mp = ema->mp;
51423- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51424+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51425 st->fc_no_free_exch_xid +=
51426- atomic_read(&mp->stats.no_free_exch_xid);
51427- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51428- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51429- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51430- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51431+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51432+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51433+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51434+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51435+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51436 }
51437 }
51438 EXPORT_SYMBOL(fc_exch_update_stats);
51439diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51440index 9c706d8..d3e3ed2 100644
51441--- a/drivers/scsi/libsas/sas_ata.c
51442+++ b/drivers/scsi/libsas/sas_ata.c
51443@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51444 .postreset = ata_std_postreset,
51445 .error_handler = ata_std_error_handler,
51446 .post_internal_cmd = sas_ata_post_internal,
51447- .qc_defer = ata_std_qc_defer,
51448+ .qc_defer = ata_std_qc_defer,
51449 .qc_prep = ata_noop_qc_prep,
51450 .qc_issue = sas_ata_qc_issue,
51451 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51452diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51453index 434e903..5a4a79b 100644
51454--- a/drivers/scsi/lpfc/lpfc.h
51455+++ b/drivers/scsi/lpfc/lpfc.h
51456@@ -430,7 +430,7 @@ struct lpfc_vport {
51457 struct dentry *debug_nodelist;
51458 struct dentry *vport_debugfs_root;
51459 struct lpfc_debugfs_trc *disc_trc;
51460- atomic_t disc_trc_cnt;
51461+ atomic_unchecked_t disc_trc_cnt;
51462 #endif
51463 uint8_t stat_data_enabled;
51464 uint8_t stat_data_blocked;
51465@@ -880,8 +880,8 @@ struct lpfc_hba {
51466 struct timer_list fabric_block_timer;
51467 unsigned long bit_flags;
51468 #define FABRIC_COMANDS_BLOCKED 0
51469- atomic_t num_rsrc_err;
51470- atomic_t num_cmd_success;
51471+ atomic_unchecked_t num_rsrc_err;
51472+ atomic_unchecked_t num_cmd_success;
51473 unsigned long last_rsrc_error_time;
51474 unsigned long last_ramp_down_time;
51475 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51476@@ -916,7 +916,7 @@ struct lpfc_hba {
51477
51478 struct dentry *debug_slow_ring_trc;
51479 struct lpfc_debugfs_trc *slow_ring_trc;
51480- atomic_t slow_ring_trc_cnt;
51481+ atomic_unchecked_t slow_ring_trc_cnt;
51482 /* iDiag debugfs sub-directory */
51483 struct dentry *idiag_root;
51484 struct dentry *idiag_pci_cfg;
51485diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51486index 5633e7d..8272114 100644
51487--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51488+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51489@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51490
51491 #include <linux/debugfs.h>
51492
51493-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51494+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51495 static unsigned long lpfc_debugfs_start_time = 0L;
51496
51497 /* iDiag */
51498@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51499 lpfc_debugfs_enable = 0;
51500
51501 len = 0;
51502- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51503+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51504 (lpfc_debugfs_max_disc_trc - 1);
51505 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51506 dtp = vport->disc_trc + i;
51507@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51508 lpfc_debugfs_enable = 0;
51509
51510 len = 0;
51511- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51512+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51513 (lpfc_debugfs_max_slow_ring_trc - 1);
51514 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51515 dtp = phba->slow_ring_trc + i;
51516@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51517 !vport || !vport->disc_trc)
51518 return;
51519
51520- index = atomic_inc_return(&vport->disc_trc_cnt) &
51521+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51522 (lpfc_debugfs_max_disc_trc - 1);
51523 dtp = vport->disc_trc + index;
51524 dtp->fmt = fmt;
51525 dtp->data1 = data1;
51526 dtp->data2 = data2;
51527 dtp->data3 = data3;
51528- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51529+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51530 dtp->jif = jiffies;
51531 #endif
51532 return;
51533@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51534 !phba || !phba->slow_ring_trc)
51535 return;
51536
51537- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51538+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51539 (lpfc_debugfs_max_slow_ring_trc - 1);
51540 dtp = phba->slow_ring_trc + index;
51541 dtp->fmt = fmt;
51542 dtp->data1 = data1;
51543 dtp->data2 = data2;
51544 dtp->data3 = data3;
51545- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51546+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51547 dtp->jif = jiffies;
51548 #endif
51549 return;
51550@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51551 "slow_ring buffer\n");
51552 goto debug_failed;
51553 }
51554- atomic_set(&phba->slow_ring_trc_cnt, 0);
51555+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51556 memset(phba->slow_ring_trc, 0,
51557 (sizeof(struct lpfc_debugfs_trc) *
51558 lpfc_debugfs_max_slow_ring_trc));
51559@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51560 "buffer\n");
51561 goto debug_failed;
51562 }
51563- atomic_set(&vport->disc_trc_cnt, 0);
51564+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51565
51566 snprintf(name, sizeof(name), "discovery_trace");
51567 vport->debug_disc_trc =
51568diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51569index 0b2c53a..aec2b45 100644
51570--- a/drivers/scsi/lpfc/lpfc_init.c
51571+++ b/drivers/scsi/lpfc/lpfc_init.c
51572@@ -11290,8 +11290,10 @@ lpfc_init(void)
51573 "misc_register returned with status %d", error);
51574
51575 if (lpfc_enable_npiv) {
51576- lpfc_transport_functions.vport_create = lpfc_vport_create;
51577- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51578+ pax_open_kernel();
51579+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51580+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51581+ pax_close_kernel();
51582 }
51583 lpfc_transport_template =
51584 fc_attach_transport(&lpfc_transport_functions);
51585diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51586index 4f9222e..f1850e3 100644
51587--- a/drivers/scsi/lpfc/lpfc_scsi.c
51588+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51589@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51590 unsigned long expires;
51591
51592 spin_lock_irqsave(&phba->hbalock, flags);
51593- atomic_inc(&phba->num_rsrc_err);
51594+ atomic_inc_unchecked(&phba->num_rsrc_err);
51595 phba->last_rsrc_error_time = jiffies;
51596
51597 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51598@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51599 unsigned long num_rsrc_err, num_cmd_success;
51600 int i;
51601
51602- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51603- num_cmd_success = atomic_read(&phba->num_cmd_success);
51604+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51605+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51606
51607 /*
51608 * The error and success command counters are global per
51609@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51610 }
51611 }
51612 lpfc_destroy_vport_work_array(phba, vports);
51613- atomic_set(&phba->num_rsrc_err, 0);
51614- atomic_set(&phba->num_cmd_success, 0);
51615+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51616+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51617 }
51618
51619 /**
51620diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51621index 3f26147..ee8efd1 100644
51622--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51623+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51624@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
51625 {
51626 struct scsi_device *sdev = to_scsi_device(dev);
51627 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51628- static struct _raid_device *raid_device;
51629+ struct _raid_device *raid_device;
51630 unsigned long flags;
51631 Mpi2RaidVolPage0_t vol_pg0;
51632 Mpi2ConfigReply_t mpi_reply;
51633@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
51634 {
51635 struct scsi_device *sdev = to_scsi_device(dev);
51636 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51637- static struct _raid_device *raid_device;
51638+ struct _raid_device *raid_device;
51639 unsigned long flags;
51640 Mpi2RaidVolPage0_t vol_pg0;
51641 Mpi2ConfigReply_t mpi_reply;
51642@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51643 Mpi2EventDataIrOperationStatus_t *event_data =
51644 (Mpi2EventDataIrOperationStatus_t *)
51645 fw_event->event_data;
51646- static struct _raid_device *raid_device;
51647+ struct _raid_device *raid_device;
51648 unsigned long flags;
51649 u16 handle;
51650
51651@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51652 u64 sas_address;
51653 struct _sas_device *sas_device;
51654 struct _sas_node *expander_device;
51655- static struct _raid_device *raid_device;
51656+ struct _raid_device *raid_device;
51657 u8 retry_count;
51658 unsigned long flags;
51659
51660diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51661index ed31d8c..ab856b3 100644
51662--- a/drivers/scsi/pmcraid.c
51663+++ b/drivers/scsi/pmcraid.c
51664@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51665 res->scsi_dev = scsi_dev;
51666 scsi_dev->hostdata = res;
51667 res->change_detected = 0;
51668- atomic_set(&res->read_failures, 0);
51669- atomic_set(&res->write_failures, 0);
51670+ atomic_set_unchecked(&res->read_failures, 0);
51671+ atomic_set_unchecked(&res->write_failures, 0);
51672 rc = 0;
51673 }
51674 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51675@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51676
51677 /* If this was a SCSI read/write command keep count of errors */
51678 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51679- atomic_inc(&res->read_failures);
51680+ atomic_inc_unchecked(&res->read_failures);
51681 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51682- atomic_inc(&res->write_failures);
51683+ atomic_inc_unchecked(&res->write_failures);
51684
51685 if (!RES_IS_GSCSI(res->cfg_entry) &&
51686 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51687@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
51688 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51689 * hrrq_id assigned here in queuecommand
51690 */
51691- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51692+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51693 pinstance->num_hrrq;
51694 cmd->cmd_done = pmcraid_io_done;
51695
51696@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
51697 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51698 * hrrq_id assigned here in queuecommand
51699 */
51700- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51701+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51702 pinstance->num_hrrq;
51703
51704 if (request_size) {
51705@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51706
51707 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51708 /* add resources only after host is added into system */
51709- if (!atomic_read(&pinstance->expose_resources))
51710+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51711 return;
51712
51713 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51714@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51715 init_waitqueue_head(&pinstance->reset_wait_q);
51716
51717 atomic_set(&pinstance->outstanding_cmds, 0);
51718- atomic_set(&pinstance->last_message_id, 0);
51719- atomic_set(&pinstance->expose_resources, 0);
51720+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51721+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51722
51723 INIT_LIST_HEAD(&pinstance->free_res_q);
51724 INIT_LIST_HEAD(&pinstance->used_res_q);
51725@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51726 /* Schedule worker thread to handle CCN and take care of adding and
51727 * removing devices to OS
51728 */
51729- atomic_set(&pinstance->expose_resources, 1);
51730+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51731 schedule_work(&pinstance->worker_q);
51732 return rc;
51733
51734diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51735index e1d150f..6c6df44 100644
51736--- a/drivers/scsi/pmcraid.h
51737+++ b/drivers/scsi/pmcraid.h
51738@@ -748,7 +748,7 @@ struct pmcraid_instance {
51739 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51740
51741 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51742- atomic_t last_message_id;
51743+ atomic_unchecked_t last_message_id;
51744
51745 /* configuration table */
51746 struct pmcraid_config_table *cfg_table;
51747@@ -777,7 +777,7 @@ struct pmcraid_instance {
51748 atomic_t outstanding_cmds;
51749
51750 /* should add/delete resources to mid-layer now ?*/
51751- atomic_t expose_resources;
51752+ atomic_unchecked_t expose_resources;
51753
51754
51755
51756@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51757 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51758 };
51759 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51760- atomic_t read_failures; /* count of failed READ commands */
51761- atomic_t write_failures; /* count of failed WRITE commands */
51762+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51763+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51764
51765 /* To indicate add/delete/modify during CCN */
51766 u8 change_detected;
51767diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51768index 82b92c4..3178171 100644
51769--- a/drivers/scsi/qla2xxx/qla_attr.c
51770+++ b/drivers/scsi/qla2xxx/qla_attr.c
51771@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51772 return 0;
51773 }
51774
51775-struct fc_function_template qla2xxx_transport_functions = {
51776+fc_function_template_no_const qla2xxx_transport_functions = {
51777
51778 .show_host_node_name = 1,
51779 .show_host_port_name = 1,
51780@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51781 .bsg_timeout = qla24xx_bsg_timeout,
51782 };
51783
51784-struct fc_function_template qla2xxx_transport_vport_functions = {
51785+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51786
51787 .show_host_node_name = 1,
51788 .show_host_port_name = 1,
51789diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51790index 7686bfe..4710893 100644
51791--- a/drivers/scsi/qla2xxx/qla_gbl.h
51792+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51793@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51794 struct device_attribute;
51795 extern struct device_attribute *qla2x00_host_attrs[];
51796 struct fc_function_template;
51797-extern struct fc_function_template qla2xxx_transport_functions;
51798-extern struct fc_function_template qla2xxx_transport_vport_functions;
51799+extern fc_function_template_no_const qla2xxx_transport_functions;
51800+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51801 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51802 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51803 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51804diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51805index cce1cbc..5b9f0fe 100644
51806--- a/drivers/scsi/qla2xxx/qla_os.c
51807+++ b/drivers/scsi/qla2xxx/qla_os.c
51808@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51809 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51810 /* Ok, a 64bit DMA mask is applicable. */
51811 ha->flags.enable_64bit_addressing = 1;
51812- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51813- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51814+ pax_open_kernel();
51815+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51816+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51817+ pax_close_kernel();
51818 return;
51819 }
51820 }
51821diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51822index 8f6d0fb..1b21097 100644
51823--- a/drivers/scsi/qla4xxx/ql4_def.h
51824+++ b/drivers/scsi/qla4xxx/ql4_def.h
51825@@ -305,7 +305,7 @@ struct ddb_entry {
51826 * (4000 only) */
51827 atomic_t relogin_timer; /* Max Time to wait for
51828 * relogin to complete */
51829- atomic_t relogin_retry_count; /* Num of times relogin has been
51830+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51831 * retried */
51832 uint32_t default_time2wait; /* Default Min time between
51833 * relogins (+aens) */
51834diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51835index 6d25879..3031a9f 100644
51836--- a/drivers/scsi/qla4xxx/ql4_os.c
51837+++ b/drivers/scsi/qla4xxx/ql4_os.c
51838@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51839 */
51840 if (!iscsi_is_session_online(cls_sess)) {
51841 /* Reset retry relogin timer */
51842- atomic_inc(&ddb_entry->relogin_retry_count);
51843+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51844 DEBUG2(ql4_printk(KERN_INFO, ha,
51845 "%s: index[%d] relogin timed out-retrying"
51846 " relogin (%d), retry (%d)\n", __func__,
51847 ddb_entry->fw_ddb_index,
51848- atomic_read(&ddb_entry->relogin_retry_count),
51849+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51850 ddb_entry->default_time2wait + 4));
51851 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51852 atomic_set(&ddb_entry->retry_relogin_timer,
51853@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51854
51855 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51856 atomic_set(&ddb_entry->relogin_timer, 0);
51857- atomic_set(&ddb_entry->relogin_retry_count, 0);
51858+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51859 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51860 ddb_entry->default_relogin_timeout =
51861 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51862diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51863index b1a2631..5bcd9c8 100644
51864--- a/drivers/scsi/scsi_lib.c
51865+++ b/drivers/scsi/scsi_lib.c
51866@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51867 shost = sdev->host;
51868 scsi_init_cmd_errh(cmd);
51869 cmd->result = DID_NO_CONNECT << 16;
51870- atomic_inc(&cmd->device->iorequest_cnt);
51871+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51872
51873 /*
51874 * SCSI request completion path will do scsi_device_unbusy(),
51875@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
51876
51877 INIT_LIST_HEAD(&cmd->eh_entry);
51878
51879- atomic_inc(&cmd->device->iodone_cnt);
51880+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51881 if (cmd->result)
51882- atomic_inc(&cmd->device->ioerr_cnt);
51883+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51884
51885 disposition = scsi_decide_disposition(cmd);
51886 if (disposition != SUCCESS &&
51887@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51888 struct Scsi_Host *host = cmd->device->host;
51889 int rtn = 0;
51890
51891- atomic_inc(&cmd->device->iorequest_cnt);
51892+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51893
51894 /* check if the device is still usable */
51895 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51896diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51897index 1ac38e7..6acc656 100644
51898--- a/drivers/scsi/scsi_sysfs.c
51899+++ b/drivers/scsi/scsi_sysfs.c
51900@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51901 char *buf) \
51902 { \
51903 struct scsi_device *sdev = to_scsi_device(dev); \
51904- unsigned long long count = atomic_read(&sdev->field); \
51905+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51906 return snprintf(buf, 20, "0x%llx\n", count); \
51907 } \
51908 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51909diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51910index 5d6f348..18778a6b 100644
51911--- a/drivers/scsi/scsi_transport_fc.c
51912+++ b/drivers/scsi/scsi_transport_fc.c
51913@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51914 * Netlink Infrastructure
51915 */
51916
51917-static atomic_t fc_event_seq;
51918+static atomic_unchecked_t fc_event_seq;
51919
51920 /**
51921 * fc_get_event_number - Obtain the next sequential FC event number
51922@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51923 u32
51924 fc_get_event_number(void)
51925 {
51926- return atomic_add_return(1, &fc_event_seq);
51927+ return atomic_add_return_unchecked(1, &fc_event_seq);
51928 }
51929 EXPORT_SYMBOL(fc_get_event_number);
51930
51931@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51932 {
51933 int error;
51934
51935- atomic_set(&fc_event_seq, 0);
51936+ atomic_set_unchecked(&fc_event_seq, 0);
51937
51938 error = transport_class_register(&fc_host_class);
51939 if (error)
51940@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51941 char *cp;
51942
51943 *val = simple_strtoul(buf, &cp, 0);
51944- if ((*cp && (*cp != '\n')) || (*val < 0))
51945+ if (*cp && (*cp != '\n'))
51946 return -EINVAL;
51947 /*
51948 * Check for overflow; dev_loss_tmo is u32
51949diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51950index 67d43e3..8cee73c 100644
51951--- a/drivers/scsi/scsi_transport_iscsi.c
51952+++ b/drivers/scsi/scsi_transport_iscsi.c
51953@@ -79,7 +79,7 @@ struct iscsi_internal {
51954 struct transport_container session_cont;
51955 };
51956
51957-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51958+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51959 static struct workqueue_struct *iscsi_eh_timer_workq;
51960
51961 static DEFINE_IDA(iscsi_sess_ida);
51962@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51963 int err;
51964
51965 ihost = shost->shost_data;
51966- session->sid = atomic_add_return(1, &iscsi_session_nr);
51967+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51968
51969 if (target_id == ISCSI_MAX_TARGET) {
51970 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51971@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51972 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51973 ISCSI_TRANSPORT_VERSION);
51974
51975- atomic_set(&iscsi_session_nr, 0);
51976+ atomic_set_unchecked(&iscsi_session_nr, 0);
51977
51978 err = class_register(&iscsi_transport_class);
51979 if (err)
51980diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51981index ae45bd9..c32a586 100644
51982--- a/drivers/scsi/scsi_transport_srp.c
51983+++ b/drivers/scsi/scsi_transport_srp.c
51984@@ -35,7 +35,7 @@
51985 #include "scsi_priv.h"
51986
51987 struct srp_host_attrs {
51988- atomic_t next_port_id;
51989+ atomic_unchecked_t next_port_id;
51990 };
51991 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51992
51993@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51994 struct Scsi_Host *shost = dev_to_shost(dev);
51995 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51996
51997- atomic_set(&srp_host->next_port_id, 0);
51998+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51999 return 0;
52000 }
52001
52002@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52003 rport_fast_io_fail_timedout);
52004 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52005
52006- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52007+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52008 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52009
52010 transport_setup_device(&rport->dev);
52011diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52012index 6b78476..d40476f 100644
52013--- a/drivers/scsi/sd.c
52014+++ b/drivers/scsi/sd.c
52015@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
52016 sdkp->disk = gd;
52017 sdkp->index = index;
52018 atomic_set(&sdkp->openers, 0);
52019- atomic_set(&sdkp->device->ioerr_cnt, 0);
52020+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52021
52022 if (!sdp->request_queue->rq_timeout) {
52023 if (sdp->type != TYPE_MOD)
52024diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52025index 2270bd5..98408a5 100644
52026--- a/drivers/scsi/sg.c
52027+++ b/drivers/scsi/sg.c
52028@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52029 sdp->disk->disk_name,
52030 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52031 NULL,
52032- (char *)arg);
52033+ (char __user *)arg);
52034 case BLKTRACESTART:
52035 return blk_trace_startstop(sdp->device->request_queue, 1);
52036 case BLKTRACESTOP:
52037diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52038index c0d660f..24a5854 100644
52039--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52040+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52041@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52042 return i;
52043 }
52044
52045-static struct bin_attribute fuse_bin_attr = {
52046+static bin_attribute_no_const fuse_bin_attr = {
52047 .attr = { .name = "fuse", .mode = S_IRUGO, },
52048 .read = fuse_read,
52049 };
52050diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52051index 57a1950..ae54e21 100644
52052--- a/drivers/spi/spi.c
52053+++ b/drivers/spi/spi.c
52054@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
52055 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52056
52057 /* portable code must never pass more than 32 bytes */
52058-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52059+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52060
52061 static u8 *buf;
52062
52063diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52064index b41429f..2de5373 100644
52065--- a/drivers/staging/android/timed_output.c
52066+++ b/drivers/staging/android/timed_output.c
52067@@ -25,7 +25,7 @@
52068 #include "timed_output.h"
52069
52070 static struct class *timed_output_class;
52071-static atomic_t device_count;
52072+static atomic_unchecked_t device_count;
52073
52074 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52075 char *buf)
52076@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52077 timed_output_class = class_create(THIS_MODULE, "timed_output");
52078 if (IS_ERR(timed_output_class))
52079 return PTR_ERR(timed_output_class);
52080- atomic_set(&device_count, 0);
52081+ atomic_set_unchecked(&device_count, 0);
52082 timed_output_class->dev_groups = timed_output_groups;
52083 }
52084
52085@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52086 if (ret < 0)
52087 return ret;
52088
52089- tdev->index = atomic_inc_return(&device_count);
52090+ tdev->index = atomic_inc_return_unchecked(&device_count);
52091 tdev->dev = device_create(timed_output_class, NULL,
52092 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52093 if (IS_ERR(tdev->dev))
52094diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
52095index 727640e..55bf61c 100644
52096--- a/drivers/staging/comedi/comedi_fops.c
52097+++ b/drivers/staging/comedi/comedi_fops.c
52098@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
52099 }
52100 cfp->last_attached = dev->attached;
52101 cfp->last_detach_count = dev->detach_count;
52102- ACCESS_ONCE(cfp->read_subdev) = read_s;
52103- ACCESS_ONCE(cfp->write_subdev) = write_s;
52104+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
52105+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
52106 }
52107
52108 static void comedi_file_check(struct file *file)
52109@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52110 !(s_old->async->cmd.flags & CMDF_WRITE))
52111 return -EBUSY;
52112
52113- ACCESS_ONCE(cfp->read_subdev) = s_new;
52114+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
52115 return 0;
52116 }
52117
52118@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52119 (s_old->async->cmd.flags & CMDF_WRITE))
52120 return -EBUSY;
52121
52122- ACCESS_ONCE(cfp->write_subdev) = s_new;
52123+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
52124 return 0;
52125 }
52126
52127diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
52128index 37dcf7e..f3c2016 100644
52129--- a/drivers/staging/fbtft/fbtft-core.c
52130+++ b/drivers/staging/fbtft/fbtft-core.c
52131@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
52132 {
52133 struct fb_info *info;
52134 struct fbtft_par *par;
52135- struct fb_ops *fbops = NULL;
52136+ fb_ops_no_const *fbops = NULL;
52137 struct fb_deferred_io *fbdefio = NULL;
52138 struct fbtft_platform_data *pdata = dev->platform_data;
52139 u8 *vmem = NULL;
52140diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
52141index 0dbf3f9..fed0063 100644
52142--- a/drivers/staging/fbtft/fbtft.h
52143+++ b/drivers/staging/fbtft/fbtft.h
52144@@ -106,7 +106,7 @@ struct fbtft_ops {
52145
52146 int (*set_var)(struct fbtft_par *par);
52147 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
52148-};
52149+} __no_const;
52150
52151 /**
52152 * struct fbtft_display - Describes the display properties
52153diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
52154index 001348c..cfaac8a 100644
52155--- a/drivers/staging/gdm724x/gdm_tty.c
52156+++ b/drivers/staging/gdm724x/gdm_tty.c
52157@@ -44,7 +44,7 @@
52158 #define gdm_tty_send_control(n, r, v, d, l) (\
52159 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
52160
52161-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
52162+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
52163
52164 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52165 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52166diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
52167index d23c3c2..eb63c81 100644
52168--- a/drivers/staging/i2o/i2o.h
52169+++ b/drivers/staging/i2o/i2o.h
52170@@ -565,7 +565,7 @@ struct i2o_controller {
52171 struct i2o_device *exec; /* Executive */
52172 #if BITS_PER_LONG == 64
52173 spinlock_t context_list_lock; /* lock for context_list */
52174- atomic_t context_list_counter; /* needed for unique contexts */
52175+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
52176 struct list_head context_list; /* list of context id's
52177 and pointers */
52178 #endif
52179diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
52180index ad84f33..c5bdf65 100644
52181--- a/drivers/staging/i2o/i2o_proc.c
52182+++ b/drivers/staging/i2o/i2o_proc.c
52183@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
52184 "Array Controller Device"
52185 };
52186
52187-static char *chtostr(char *tmp, u8 *chars, int n)
52188-{
52189- tmp[0] = 0;
52190- return strncat(tmp, (char *)chars, n);
52191-}
52192-
52193 static int i2o_report_query_status(struct seq_file *seq, int block_status,
52194 char *group)
52195 {
52196@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
52197 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
52198 {
52199 struct i2o_controller *c = (struct i2o_controller *)seq->private;
52200- static u32 work32[5];
52201- static u8 *work8 = (u8 *) work32;
52202- static u16 *work16 = (u16 *) work32;
52203+ u32 work32[5];
52204+ u8 *work8 = (u8 *) work32;
52205+ u16 *work16 = (u16 *) work32;
52206 int token;
52207 u32 hwcap;
52208
52209@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
52210 } *result;
52211
52212 i2o_exec_execute_ddm_table ddm_table;
52213- char tmp[28 + 1];
52214
52215 result = kmalloc(sizeof(*result), GFP_KERNEL);
52216 if (!result)
52217@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
52218
52219 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
52220 seq_printf(seq, "%-#8x", ddm_table.module_id);
52221- seq_printf(seq, "%-29s",
52222- chtostr(tmp, ddm_table.module_name_version, 28));
52223+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
52224 seq_printf(seq, "%9d ", ddm_table.data_size);
52225 seq_printf(seq, "%8d", ddm_table.code_size);
52226
52227@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
52228
52229 i2o_driver_result_table *result;
52230 i2o_driver_store_table *dst;
52231- char tmp[28 + 1];
52232
52233 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
52234 if (result == NULL)
52235@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
52236
52237 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
52238 seq_printf(seq, "%-#8x", dst->module_id);
52239- seq_printf(seq, "%-29s",
52240- chtostr(tmp, dst->module_name_version, 28));
52241- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
52242+ seq_printf(seq, "%-.28s", dst->module_name_version);
52243+ seq_printf(seq, "%-.8s", dst->date);
52244 seq_printf(seq, "%8d ", dst->module_size);
52245 seq_printf(seq, "%8d ", dst->mpb_size);
52246 seq_printf(seq, "0x%04x", dst->module_flags);
52247@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
52248 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
52249 {
52250 struct i2o_device *d = (struct i2o_device *)seq->private;
52251- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
52252+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
52253 // == (allow) 512d bytes (max)
52254- static u16 *work16 = (u16 *) work32;
52255+ u16 *work16 = (u16 *) work32;
52256 int token;
52257- char tmp[16 + 1];
52258
52259 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
52260
52261@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
52262 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
52263 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
52264 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
52265- seq_printf(seq, "Vendor info : %s\n",
52266- chtostr(tmp, (u8 *) (work32 + 2), 16));
52267- seq_printf(seq, "Product info : %s\n",
52268- chtostr(tmp, (u8 *) (work32 + 6), 16));
52269- seq_printf(seq, "Description : %s\n",
52270- chtostr(tmp, (u8 *) (work32 + 10), 16));
52271- seq_printf(seq, "Product rev. : %s\n",
52272- chtostr(tmp, (u8 *) (work32 + 14), 8));
52273+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
52274+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
52275+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
52276+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
52277
52278 seq_printf(seq, "Serial number : ");
52279 print_serial_number(seq, (u8 *) (work32 + 16),
52280@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
52281 u8 pad[256]; // allow up to 256 byte (max) serial number
52282 } result;
52283
52284- char tmp[24 + 1];
52285-
52286 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
52287
52288 if (token < 0) {
52289@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
52290 }
52291
52292 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
52293- seq_printf(seq, "Module name : %s\n",
52294- chtostr(tmp, result.module_name, 24));
52295- seq_printf(seq, "Module revision : %s\n",
52296- chtostr(tmp, result.module_rev, 8));
52297+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
52298+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
52299
52300 seq_printf(seq, "Serial number : ");
52301 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
52302@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52303 u8 instance_number[4];
52304 } result;
52305
52306- char tmp[64 + 1];
52307-
52308 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
52309
52310 if (token < 0) {
52311@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52312 return 0;
52313 }
52314
52315- seq_printf(seq, "Device name : %s\n",
52316- chtostr(tmp, result.device_name, 64));
52317- seq_printf(seq, "Service name : %s\n",
52318- chtostr(tmp, result.service_name, 64));
52319- seq_printf(seq, "Physical name : %s\n",
52320- chtostr(tmp, result.physical_location, 64));
52321- seq_printf(seq, "Instance number : %s\n",
52322- chtostr(tmp, result.instance_number, 4));
52323+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
52324+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
52325+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
52326+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
52327
52328 return 0;
52329 }
52330@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
52331 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
52332 {
52333 struct i2o_device *d = (struct i2o_device *)seq->private;
52334- static u32 work32[12];
52335- static u16 *work16 = (u16 *) work32;
52336- static u8 *work8 = (u8 *) work32;
52337+ u32 work32[12];
52338+ u16 *work16 = (u16 *) work32;
52339+ u8 *work8 = (u8 *) work32;
52340 int token;
52341
52342 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
52343diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
52344index 52334fc..d7f40b3 100644
52345--- a/drivers/staging/i2o/iop.c
52346+++ b/drivers/staging/i2o/iop.c
52347@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
52348
52349 spin_lock_irqsave(&c->context_list_lock, flags);
52350
52351- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
52352- atomic_inc(&c->context_list_counter);
52353+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
52354+ atomic_inc_unchecked(&c->context_list_counter);
52355
52356- entry->context = atomic_read(&c->context_list_counter);
52357+ entry->context = atomic_read_unchecked(&c->context_list_counter);
52358
52359 list_add(&entry->list, &c->context_list);
52360
52361@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
52362
52363 #if BITS_PER_LONG == 64
52364 spin_lock_init(&c->context_list_lock);
52365- atomic_set(&c->context_list_counter, 0);
52366+ atomic_set_unchecked(&c->context_list_counter, 0);
52367 INIT_LIST_HEAD(&c->context_list);
52368 #endif
52369
52370diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52371index 463da07..e791ce9 100644
52372--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52373+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52374@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52375 return 0;
52376 }
52377
52378-sfw_test_client_ops_t brw_test_client;
52379-void brw_init_test_client(void)
52380-{
52381- brw_test_client.tso_init = brw_client_init;
52382- brw_test_client.tso_fini = brw_client_fini;
52383- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52384- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52385+sfw_test_client_ops_t brw_test_client = {
52386+ .tso_init = brw_client_init,
52387+ .tso_fini = brw_client_fini,
52388+ .tso_prep_rpc = brw_client_prep_rpc,
52389+ .tso_done_rpc = brw_client_done_rpc,
52390 };
52391
52392 srpc_service_t brw_test_service;
52393diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52394index 5709148..ccd9e0d 100644
52395--- a/drivers/staging/lustre/lnet/selftest/framework.c
52396+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52397@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52398
52399 extern sfw_test_client_ops_t ping_test_client;
52400 extern srpc_service_t ping_test_service;
52401-extern void ping_init_test_client(void);
52402 extern void ping_init_test_service(void);
52403
52404 extern sfw_test_client_ops_t brw_test_client;
52405 extern srpc_service_t brw_test_service;
52406-extern void brw_init_test_client(void);
52407 extern void brw_init_test_service(void);
52408
52409
52410@@ -1675,12 +1673,10 @@ sfw_startup (void)
52411 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52412 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52413
52414- brw_init_test_client();
52415 brw_init_test_service();
52416 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52417 LASSERT (rc == 0);
52418
52419- ping_init_test_client();
52420 ping_init_test_service();
52421 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52422 LASSERT (rc == 0);
52423diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52424index d8c0df6..5041cbb 100644
52425--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52426+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52427@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52428 return 0;
52429 }
52430
52431-sfw_test_client_ops_t ping_test_client;
52432-void ping_init_test_client(void)
52433-{
52434- ping_test_client.tso_init = ping_client_init;
52435- ping_test_client.tso_fini = ping_client_fini;
52436- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52437- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52438-}
52439+sfw_test_client_ops_t ping_test_client = {
52440+ .tso_init = ping_client_init,
52441+ .tso_fini = ping_client_fini,
52442+ .tso_prep_rpc = ping_client_prep_rpc,
52443+ .tso_done_rpc = ping_client_done_rpc,
52444+};
52445
52446 srpc_service_t ping_test_service;
52447 void ping_init_test_service(void)
52448diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52449index 83bc0a9..12ba00a 100644
52450--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52451+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52452@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52453 ldlm_completion_callback lcs_completion;
52454 ldlm_blocking_callback lcs_blocking;
52455 ldlm_glimpse_callback lcs_glimpse;
52456-};
52457+} __no_const;
52458
52459 /* ldlm_lockd.c */
52460 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52461diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52462index 2a88b80..62e7e5f 100644
52463--- a/drivers/staging/lustre/lustre/include/obd.h
52464+++ b/drivers/staging/lustre/lustre/include/obd.h
52465@@ -1362,7 +1362,7 @@ struct md_ops {
52466 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52467 * wrapper function in include/linux/obd_class.h.
52468 */
52469-};
52470+} __no_const;
52471
52472 struct lsm_operations {
52473 void (*lsm_free)(struct lov_stripe_md *);
52474diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52475index a4c252f..b21acac 100644
52476--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52477+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52478@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52479 int added = (mode == LCK_NL);
52480 int overlaps = 0;
52481 int splitted = 0;
52482- const struct ldlm_callback_suite null_cbs = { NULL };
52483+ const struct ldlm_callback_suite null_cbs = { };
52484
52485 CDEBUG(D_DLMTRACE,
52486 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52487diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52488index c539e37..743b213 100644
52489--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52490+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52491@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
52492 loff_t *ppos)
52493 {
52494 int rc, max_delay_cs;
52495- struct ctl_table dummy = *table;
52496+ ctl_table_no_const dummy = *table;
52497 long d;
52498
52499 dummy.data = &max_delay_cs;
52500@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
52501 loff_t *ppos)
52502 {
52503 int rc, min_delay_cs;
52504- struct ctl_table dummy = *table;
52505+ ctl_table_no_const dummy = *table;
52506 long d;
52507
52508 dummy.data = &min_delay_cs;
52509@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
52510 void __user *buffer, size_t *lenp, loff_t *ppos)
52511 {
52512 int rc, backoff;
52513- struct ctl_table dummy = *table;
52514+ ctl_table_no_const dummy = *table;
52515
52516 dummy.data = &backoff;
52517 dummy.proc_handler = &proc_dointvec;
52518diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52519index 7dc77dd..289d03e 100644
52520--- a/drivers/staging/lustre/lustre/libcfs/module.c
52521+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52522@@ -313,11 +313,11 @@ out:
52523
52524
52525 struct cfs_psdev_ops libcfs_psdev_ops = {
52526- libcfs_psdev_open,
52527- libcfs_psdev_release,
52528- NULL,
52529- NULL,
52530- libcfs_ioctl
52531+ .p_open = libcfs_psdev_open,
52532+ .p_close = libcfs_psdev_release,
52533+ .p_read = NULL,
52534+ .p_write = NULL,
52535+ .p_ioctl = libcfs_ioctl
52536 };
52537
52538 extern int insert_proc(void);
52539diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52540index 22667db..8b703b6 100644
52541--- a/drivers/staging/octeon/ethernet-rx.c
52542+++ b/drivers/staging/octeon/ethernet-rx.c
52543@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52544 /* Increment RX stats for virtual ports */
52545 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52546 #ifdef CONFIG_64BIT
52547- atomic64_add(1,
52548+ atomic64_add_unchecked(1,
52549 (atomic64_t *)&priv->stats.rx_packets);
52550- atomic64_add(skb->len,
52551+ atomic64_add_unchecked(skb->len,
52552 (atomic64_t *)&priv->stats.rx_bytes);
52553 #else
52554- atomic_add(1,
52555+ atomic_add_unchecked(1,
52556 (atomic_t *)&priv->stats.rx_packets);
52557- atomic_add(skb->len,
52558+ atomic_add_unchecked(skb->len,
52559 (atomic_t *)&priv->stats.rx_bytes);
52560 #endif
52561 }
52562@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52563 dev->name);
52564 */
52565 #ifdef CONFIG_64BIT
52566- atomic64_add(1,
52567+ atomic64_add_unchecked(1,
52568 (atomic64_t *)&priv->stats.rx_dropped);
52569 #else
52570- atomic_add(1,
52571+ atomic_add_unchecked(1,
52572 (atomic_t *)&priv->stats.rx_dropped);
52573 #endif
52574 dev_kfree_skb_irq(skb);
52575diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52576index 460e854..f926452 100644
52577--- a/drivers/staging/octeon/ethernet.c
52578+++ b/drivers/staging/octeon/ethernet.c
52579@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52580 * since the RX tasklet also increments it.
52581 */
52582 #ifdef CONFIG_64BIT
52583- atomic64_add(rx_status.dropped_packets,
52584- (atomic64_t *)&priv->stats.rx_dropped);
52585+ atomic64_add_unchecked(rx_status.dropped_packets,
52586+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52587 #else
52588- atomic_add(rx_status.dropped_packets,
52589- (atomic_t *)&priv->stats.rx_dropped);
52590+ atomic_add_unchecked(rx_status.dropped_packets,
52591+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52592 #endif
52593 }
52594
52595diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52596index 3b476d8..f522d68 100644
52597--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52598+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52599@@ -225,7 +225,7 @@ struct hal_ops {
52600
52601 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52602 void (*hal_reset_security_engine)(struct adapter *adapter);
52603-};
52604+} __no_const;
52605
52606 enum rt_eeprom_type {
52607 EEPROM_93C46,
52608diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52609index 070cc03..6806e37 100644
52610--- a/drivers/staging/rtl8712/rtl871x_io.h
52611+++ b/drivers/staging/rtl8712/rtl871x_io.h
52612@@ -108,7 +108,7 @@ struct _io_ops {
52613 u8 *pmem);
52614 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52615 u8 *pmem);
52616-};
52617+} __no_const;
52618
52619 struct io_req {
52620 struct list_head list;
52621diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52622index 98f3ba4..c6a7fce 100644
52623--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52624+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52625@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
52626 void (*device_resume)(ulong bus_no, ulong dev_no);
52627 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52628 ulong *max_size);
52629-};
52630+} __no_const;
52631
52632 /* These functions live inside visorchipset, and will be called to indicate
52633 * responses to specific events (by code outside of visorchipset).
52634@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
52635 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52636 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52637 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52638-};
52639+} __no_const;
52640
52641 /** Register functions (in the bus driver) to get called by visorchipset
52642 * whenever a bus or device appears for which this service partition is
52643diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52644index 9512af6..045bf5a 100644
52645--- a/drivers/target/sbp/sbp_target.c
52646+++ b/drivers/target/sbp/sbp_target.c
52647@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52648
52649 #define SESSION_MAINTENANCE_INTERVAL HZ
52650
52651-static atomic_t login_id = ATOMIC_INIT(0);
52652+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52653
52654 static void session_maintenance_work(struct work_struct *);
52655 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52656@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52657 login->lun = se_lun;
52658 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52659 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52660- login->login_id = atomic_inc_return(&login_id);
52661+ login->login_id = atomic_inc_return_unchecked(&login_id);
52662
52663 login->tgt_agt = sbp_target_agent_register(login);
52664 if (IS_ERR(login->tgt_agt)) {
52665diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52666index 7faa6ae..ae6c410 100644
52667--- a/drivers/target/target_core_device.c
52668+++ b/drivers/target/target_core_device.c
52669@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52670 spin_lock_init(&dev->se_tmr_lock);
52671 spin_lock_init(&dev->qf_cmd_lock);
52672 sema_init(&dev->caw_sem, 1);
52673- atomic_set(&dev->dev_ordered_id, 0);
52674+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52675 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52676 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52677 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52678diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52679index ac3cbab..f0d1dd2 100644
52680--- a/drivers/target/target_core_transport.c
52681+++ b/drivers/target/target_core_transport.c
52682@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52683 * Used to determine when ORDERED commands should go from
52684 * Dormant to Active status.
52685 */
52686- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52687+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52688 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52689 cmd->se_ordered_id, cmd->sam_task_attr,
52690 dev->transport->name);
52691diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52692index 031018e..90981a1 100644
52693--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52694+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52695@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52696 platform_set_drvdata(pdev, priv);
52697
52698 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52699- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52700- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52701+ pax_open_kernel();
52702+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52703+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52704+ pax_close_kernel();
52705 }
52706 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52707 priv, &int3400_thermal_ops,
52708diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52709index 668fb1b..2737bbe 100644
52710--- a/drivers/thermal/of-thermal.c
52711+++ b/drivers/thermal/of-thermal.c
52712@@ -31,6 +31,7 @@
52713 #include <linux/export.h>
52714 #include <linux/string.h>
52715 #include <linux/thermal.h>
52716+#include <linux/mm.h>
52717
52718 #include "thermal_core.h"
52719
52720@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52721 tz->ops = ops;
52722 tz->sensor_data = data;
52723
52724- tzd->ops->get_temp = of_thermal_get_temp;
52725- tzd->ops->get_trend = of_thermal_get_trend;
52726- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52727+ pax_open_kernel();
52728+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52729+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52730+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52731+ pax_close_kernel();
52732 mutex_unlock(&tzd->lock);
52733
52734 return tzd;
52735@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52736 return;
52737
52738 mutex_lock(&tzd->lock);
52739- tzd->ops->get_temp = NULL;
52740- tzd->ops->get_trend = NULL;
52741- tzd->ops->set_emul_temp = NULL;
52742+ pax_open_kernel();
52743+ *(void **)&tzd->ops->get_temp = NULL;
52744+ *(void **)&tzd->ops->get_trend = NULL;
52745+ *(void **)&tzd->ops->set_emul_temp = NULL;
52746+ pax_close_kernel();
52747
52748 tz->ops = NULL;
52749 tz->sensor_data = NULL;
52750diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
52751index 9ea3d9d..53e8792 100644
52752--- a/drivers/thermal/x86_pkg_temp_thermal.c
52753+++ b/drivers/thermal/x86_pkg_temp_thermal.c
52754@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
52755 return NOTIFY_OK;
52756 }
52757
52758-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
52759+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
52760 .notifier_call = pkg_temp_thermal_cpu_callback,
52761 };
52762
52763diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52764index fd66f57..48e6376 100644
52765--- a/drivers/tty/cyclades.c
52766+++ b/drivers/tty/cyclades.c
52767@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52768 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52769 info->port.count);
52770 #endif
52771- info->port.count++;
52772+ atomic_inc(&info->port.count);
52773 #ifdef CY_DEBUG_COUNT
52774 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52775- current->pid, info->port.count);
52776+ current->pid, atomic_read(&info->port.count));
52777 #endif
52778
52779 /*
52780@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52781 for (j = 0; j < cy_card[i].nports; j++) {
52782 info = &cy_card[i].ports[j];
52783
52784- if (info->port.count) {
52785+ if (atomic_read(&info->port.count)) {
52786 /* XXX is the ldisc num worth this? */
52787 struct tty_struct *tty;
52788 struct tty_ldisc *ld;
52789diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52790index 4fcec1d..5a036f7 100644
52791--- a/drivers/tty/hvc/hvc_console.c
52792+++ b/drivers/tty/hvc/hvc_console.c
52793@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52794
52795 spin_lock_irqsave(&hp->port.lock, flags);
52796 /* Check and then increment for fast path open. */
52797- if (hp->port.count++ > 0) {
52798+ if (atomic_inc_return(&hp->port.count) > 1) {
52799 spin_unlock_irqrestore(&hp->port.lock, flags);
52800 hvc_kick();
52801 return 0;
52802@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52803
52804 spin_lock_irqsave(&hp->port.lock, flags);
52805
52806- if (--hp->port.count == 0) {
52807+ if (atomic_dec_return(&hp->port.count) == 0) {
52808 spin_unlock_irqrestore(&hp->port.lock, flags);
52809 /* We are done with the tty pointer now. */
52810 tty_port_tty_set(&hp->port, NULL);
52811@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52812 */
52813 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52814 } else {
52815- if (hp->port.count < 0)
52816+ if (atomic_read(&hp->port.count) < 0)
52817 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52818- hp->vtermno, hp->port.count);
52819+ hp->vtermno, atomic_read(&hp->port.count));
52820 spin_unlock_irqrestore(&hp->port.lock, flags);
52821 }
52822 }
52823@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52824 * open->hangup case this can be called after the final close so prevent
52825 * that from happening for now.
52826 */
52827- if (hp->port.count <= 0) {
52828+ if (atomic_read(&hp->port.count) <= 0) {
52829 spin_unlock_irqrestore(&hp->port.lock, flags);
52830 return;
52831 }
52832
52833- hp->port.count = 0;
52834+ atomic_set(&hp->port.count, 0);
52835 spin_unlock_irqrestore(&hp->port.lock, flags);
52836 tty_port_tty_set(&hp->port, NULL);
52837
52838@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52839 return -EPIPE;
52840
52841 /* FIXME what's this (unprotected) check for? */
52842- if (hp->port.count <= 0)
52843+ if (atomic_read(&hp->port.count) <= 0)
52844 return -EIO;
52845
52846 spin_lock_irqsave(&hp->lock, flags);
52847diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52848index 81ff7e1..dfb7b71 100644
52849--- a/drivers/tty/hvc/hvcs.c
52850+++ b/drivers/tty/hvc/hvcs.c
52851@@ -83,6 +83,7 @@
52852 #include <asm/hvcserver.h>
52853 #include <asm/uaccess.h>
52854 #include <asm/vio.h>
52855+#include <asm/local.h>
52856
52857 /*
52858 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52859@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52860
52861 spin_lock_irqsave(&hvcsd->lock, flags);
52862
52863- if (hvcsd->port.count > 0) {
52864+ if (atomic_read(&hvcsd->port.count) > 0) {
52865 spin_unlock_irqrestore(&hvcsd->lock, flags);
52866 printk(KERN_INFO "HVCS: vterm state unchanged. "
52867 "The hvcs device node is still in use.\n");
52868@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52869 }
52870 }
52871
52872- hvcsd->port.count = 0;
52873+ atomic_set(&hvcsd->port.count, 0);
52874 hvcsd->port.tty = tty;
52875 tty->driver_data = hvcsd;
52876
52877@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52878 unsigned long flags;
52879
52880 spin_lock_irqsave(&hvcsd->lock, flags);
52881- hvcsd->port.count++;
52882+ atomic_inc(&hvcsd->port.count);
52883 hvcsd->todo_mask |= HVCS_SCHED_READ;
52884 spin_unlock_irqrestore(&hvcsd->lock, flags);
52885
52886@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52887 hvcsd = tty->driver_data;
52888
52889 spin_lock_irqsave(&hvcsd->lock, flags);
52890- if (--hvcsd->port.count == 0) {
52891+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52892
52893 vio_disable_interrupts(hvcsd->vdev);
52894
52895@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52896
52897 free_irq(irq, hvcsd);
52898 return;
52899- } else if (hvcsd->port.count < 0) {
52900+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52901 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52902 " is missmanaged.\n",
52903- hvcsd->vdev->unit_address, hvcsd->port.count);
52904+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52905 }
52906
52907 spin_unlock_irqrestore(&hvcsd->lock, flags);
52908@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52909
52910 spin_lock_irqsave(&hvcsd->lock, flags);
52911 /* Preserve this so that we know how many kref refs to put */
52912- temp_open_count = hvcsd->port.count;
52913+ temp_open_count = atomic_read(&hvcsd->port.count);
52914
52915 /*
52916 * Don't kref put inside the spinlock because the destruction
52917@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52918 tty->driver_data = NULL;
52919 hvcsd->port.tty = NULL;
52920
52921- hvcsd->port.count = 0;
52922+ atomic_set(&hvcsd->port.count, 0);
52923
52924 /* This will drop any buffered data on the floor which is OK in a hangup
52925 * scenario. */
52926@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52927 * the middle of a write operation? This is a crummy place to do this
52928 * but we want to keep it all in the spinlock.
52929 */
52930- if (hvcsd->port.count <= 0) {
52931+ if (atomic_read(&hvcsd->port.count) <= 0) {
52932 spin_unlock_irqrestore(&hvcsd->lock, flags);
52933 return -ENODEV;
52934 }
52935@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52936 {
52937 struct hvcs_struct *hvcsd = tty->driver_data;
52938
52939- if (!hvcsd || hvcsd->port.count <= 0)
52940+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52941 return 0;
52942
52943 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52944diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52945index 4190199..06d5bfa 100644
52946--- a/drivers/tty/hvc/hvsi.c
52947+++ b/drivers/tty/hvc/hvsi.c
52948@@ -85,7 +85,7 @@ struct hvsi_struct {
52949 int n_outbuf;
52950 uint32_t vtermno;
52951 uint32_t virq;
52952- atomic_t seqno; /* HVSI packet sequence number */
52953+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52954 uint16_t mctrl;
52955 uint8_t state; /* HVSI protocol state */
52956 uint8_t flags;
52957@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52958
52959 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52960 packet.hdr.len = sizeof(struct hvsi_query_response);
52961- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52962+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52963 packet.verb = VSV_SEND_VERSION_NUMBER;
52964 packet.u.version = HVSI_VERSION;
52965 packet.query_seqno = query_seqno+1;
52966@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52967
52968 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52969 packet.hdr.len = sizeof(struct hvsi_query);
52970- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52971+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52972 packet.verb = verb;
52973
52974 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52975@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52976 int wrote;
52977
52978 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52979- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52980+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52981 packet.hdr.len = sizeof(struct hvsi_control);
52982 packet.verb = VSV_SET_MODEM_CTL;
52983 packet.mask = HVSI_TSDTR;
52984@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52985 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52986
52987 packet.hdr.type = VS_DATA_PACKET_HEADER;
52988- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52989+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52990 packet.hdr.len = count + sizeof(struct hvsi_header);
52991 memcpy(&packet.data, buf, count);
52992
52993@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52994 struct hvsi_control packet __ALIGNED__;
52995
52996 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52997- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52998+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52999 packet.hdr.len = 6;
53000 packet.verb = VSV_CLOSE_PROTOCOL;
53001
53002@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53003
53004 tty_port_tty_set(&hp->port, tty);
53005 spin_lock_irqsave(&hp->lock, flags);
53006- hp->port.count++;
53007+ atomic_inc(&hp->port.count);
53008 atomic_set(&hp->seqno, 0);
53009 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53010 spin_unlock_irqrestore(&hp->lock, flags);
53011@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53012
53013 spin_lock_irqsave(&hp->lock, flags);
53014
53015- if (--hp->port.count == 0) {
53016+ if (atomic_dec_return(&hp->port.count) == 0) {
53017 tty_port_tty_set(&hp->port, NULL);
53018 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53019
53020@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53021
53022 spin_lock_irqsave(&hp->lock, flags);
53023 }
53024- } else if (hp->port.count < 0)
53025+ } else if (atomic_read(&hp->port.count) < 0)
53026 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53027- hp - hvsi_ports, hp->port.count);
53028+ hp - hvsi_ports, atomic_read(&hp->port.count));
53029
53030 spin_unlock_irqrestore(&hp->lock, flags);
53031 }
53032@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53033 tty_port_tty_set(&hp->port, NULL);
53034
53035 spin_lock_irqsave(&hp->lock, flags);
53036- hp->port.count = 0;
53037+ atomic_set(&hp->port.count, 0);
53038 hp->n_outbuf = 0;
53039 spin_unlock_irqrestore(&hp->lock, flags);
53040 }
53041diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53042index a270f04..7c77b5d 100644
53043--- a/drivers/tty/hvc/hvsi_lib.c
53044+++ b/drivers/tty/hvc/hvsi_lib.c
53045@@ -8,7 +8,7 @@
53046
53047 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53048 {
53049- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53050+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53051
53052 /* Assumes that always succeeds, works in practice */
53053 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53054@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53055
53056 /* Reset state */
53057 pv->established = 0;
53058- atomic_set(&pv->seqno, 0);
53059+ atomic_set_unchecked(&pv->seqno, 0);
53060
53061 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53062
53063diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53064index 345cebb..d5a1e9e 100644
53065--- a/drivers/tty/ipwireless/tty.c
53066+++ b/drivers/tty/ipwireless/tty.c
53067@@ -28,6 +28,7 @@
53068 #include <linux/tty_driver.h>
53069 #include <linux/tty_flip.h>
53070 #include <linux/uaccess.h>
53071+#include <asm/local.h>
53072
53073 #include "tty.h"
53074 #include "network.h"
53075@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53076 return -ENODEV;
53077
53078 mutex_lock(&tty->ipw_tty_mutex);
53079- if (tty->port.count == 0)
53080+ if (atomic_read(&tty->port.count) == 0)
53081 tty->tx_bytes_queued = 0;
53082
53083- tty->port.count++;
53084+ atomic_inc(&tty->port.count);
53085
53086 tty->port.tty = linux_tty;
53087 linux_tty->driver_data = tty;
53088@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53089
53090 static void do_ipw_close(struct ipw_tty *tty)
53091 {
53092- tty->port.count--;
53093-
53094- if (tty->port.count == 0) {
53095+ if (atomic_dec_return(&tty->port.count) == 0) {
53096 struct tty_struct *linux_tty = tty->port.tty;
53097
53098 if (linux_tty != NULL) {
53099@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53100 return;
53101
53102 mutex_lock(&tty->ipw_tty_mutex);
53103- if (tty->port.count == 0) {
53104+ if (atomic_read(&tty->port.count) == 0) {
53105 mutex_unlock(&tty->ipw_tty_mutex);
53106 return;
53107 }
53108@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53109
53110 mutex_lock(&tty->ipw_tty_mutex);
53111
53112- if (!tty->port.count) {
53113+ if (!atomic_read(&tty->port.count)) {
53114 mutex_unlock(&tty->ipw_tty_mutex);
53115 return;
53116 }
53117@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53118 return -ENODEV;
53119
53120 mutex_lock(&tty->ipw_tty_mutex);
53121- if (!tty->port.count) {
53122+ if (!atomic_read(&tty->port.count)) {
53123 mutex_unlock(&tty->ipw_tty_mutex);
53124 return -EINVAL;
53125 }
53126@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53127 if (!tty)
53128 return -ENODEV;
53129
53130- if (!tty->port.count)
53131+ if (!atomic_read(&tty->port.count))
53132 return -EINVAL;
53133
53134 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53135@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53136 if (!tty)
53137 return 0;
53138
53139- if (!tty->port.count)
53140+ if (!atomic_read(&tty->port.count))
53141 return 0;
53142
53143 return tty->tx_bytes_queued;
53144@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53145 if (!tty)
53146 return -ENODEV;
53147
53148- if (!tty->port.count)
53149+ if (!atomic_read(&tty->port.count))
53150 return -EINVAL;
53151
53152 return get_control_lines(tty);
53153@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53154 if (!tty)
53155 return -ENODEV;
53156
53157- if (!tty->port.count)
53158+ if (!atomic_read(&tty->port.count))
53159 return -EINVAL;
53160
53161 return set_control_lines(tty, set, clear);
53162@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53163 if (!tty)
53164 return -ENODEV;
53165
53166- if (!tty->port.count)
53167+ if (!atomic_read(&tty->port.count))
53168 return -EINVAL;
53169
53170 /* FIXME: Exactly how is the tty object locked here .. */
53171@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53172 * are gone */
53173 mutex_lock(&ttyj->ipw_tty_mutex);
53174 }
53175- while (ttyj->port.count)
53176+ while (atomic_read(&ttyj->port.count))
53177 do_ipw_close(ttyj);
53178 ipwireless_disassociate_network_ttys(network,
53179 ttyj->channel_idx);
53180diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53181index 14c54e0..1efd4f2 100644
53182--- a/drivers/tty/moxa.c
53183+++ b/drivers/tty/moxa.c
53184@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53185 }
53186
53187 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53188- ch->port.count++;
53189+ atomic_inc(&ch->port.count);
53190 tty->driver_data = ch;
53191 tty_port_tty_set(&ch->port, tty);
53192 mutex_lock(&ch->port.mutex);
53193diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53194index c434376..114ce13 100644
53195--- a/drivers/tty/n_gsm.c
53196+++ b/drivers/tty/n_gsm.c
53197@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53198 spin_lock_init(&dlci->lock);
53199 mutex_init(&dlci->mutex);
53200 dlci->fifo = &dlci->_fifo;
53201- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53202+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53203 kfree(dlci);
53204 return NULL;
53205 }
53206@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53207 struct gsm_dlci *dlci = tty->driver_data;
53208 struct tty_port *port = &dlci->port;
53209
53210- port->count++;
53211+ atomic_inc(&port->count);
53212 tty_port_tty_set(port, tty);
53213
53214 dlci->modem_rx = 0;
53215diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53216index cf6e0f2..4283167 100644
53217--- a/drivers/tty/n_tty.c
53218+++ b/drivers/tty/n_tty.c
53219@@ -116,7 +116,7 @@ struct n_tty_data {
53220 int minimum_to_wake;
53221
53222 /* consumer-published */
53223- size_t read_tail;
53224+ size_t read_tail __intentional_overflow(-1);
53225 size_t line_start;
53226
53227 /* protected by output lock */
53228@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53229 {
53230 *ops = tty_ldisc_N_TTY;
53231 ops->owner = NULL;
53232- ops->refcount = ops->flags = 0;
53233+ atomic_set(&ops->refcount, 0);
53234+ ops->flags = 0;
53235 }
53236 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53237diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53238index e72ee62..d977ad9 100644
53239--- a/drivers/tty/pty.c
53240+++ b/drivers/tty/pty.c
53241@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
53242 panic("Couldn't register Unix98 pts driver");
53243
53244 /* Now create the /dev/ptmx special device */
53245+ pax_open_kernel();
53246 tty_default_fops(&ptmx_fops);
53247- ptmx_fops.open = ptmx_open;
53248+ *(void **)&ptmx_fops.open = ptmx_open;
53249+ pax_close_kernel();
53250
53251 cdev_init(&ptmx_cdev, &ptmx_fops);
53252 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53253diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53254index c8dd8dc..dca6cfd 100644
53255--- a/drivers/tty/rocket.c
53256+++ b/drivers/tty/rocket.c
53257@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53258 tty->driver_data = info;
53259 tty_port_tty_set(port, tty);
53260
53261- if (port->count++ == 0) {
53262+ if (atomic_inc_return(&port->count) == 1) {
53263 atomic_inc(&rp_num_ports_open);
53264
53265 #ifdef ROCKET_DEBUG_OPEN
53266@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53267 #endif
53268 }
53269 #ifdef ROCKET_DEBUG_OPEN
53270- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53271+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53272 #endif
53273
53274 /*
53275@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53276 spin_unlock_irqrestore(&info->port.lock, flags);
53277 return;
53278 }
53279- if (info->port.count)
53280+ if (atomic_read(&info->port.count))
53281 atomic_dec(&rp_num_ports_open);
53282 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53283 spin_unlock_irqrestore(&info->port.lock, flags);
53284diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53285index aa28209..e08fb85 100644
53286--- a/drivers/tty/serial/ioc4_serial.c
53287+++ b/drivers/tty/serial/ioc4_serial.c
53288@@ -437,7 +437,7 @@ struct ioc4_soft {
53289 } is_intr_info[MAX_IOC4_INTR_ENTS];
53290
53291 /* Number of entries active in the above array */
53292- atomic_t is_num_intrs;
53293+ atomic_unchecked_t is_num_intrs;
53294 } is_intr_type[IOC4_NUM_INTR_TYPES];
53295
53296 /* is_ir_lock must be held while
53297@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53298 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53299 || (type == IOC4_OTHER_INTR_TYPE)));
53300
53301- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53302+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53303 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53304
53305 /* Save off the lower level interrupt handler */
53306@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53307
53308 soft = arg;
53309 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53310- num_intrs = (int)atomic_read(
53311+ num_intrs = (int)atomic_read_unchecked(
53312 &soft->is_intr_type[intr_type].is_num_intrs);
53313
53314 this_mir = this_ir = pending_intrs(soft, intr_type);
53315diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53316index 129dc5b..1da5bb8 100644
53317--- a/drivers/tty/serial/kgdb_nmi.c
53318+++ b/drivers/tty/serial/kgdb_nmi.c
53319@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53320 * I/O utilities that messages sent to the console will automatically
53321 * be displayed on the dbg_io.
53322 */
53323- dbg_io_ops->is_console = true;
53324+ pax_open_kernel();
53325+ *(int *)&dbg_io_ops->is_console = true;
53326+ pax_close_kernel();
53327
53328 return 0;
53329 }
53330diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53331index a260cde..6b2b5ce 100644
53332--- a/drivers/tty/serial/kgdboc.c
53333+++ b/drivers/tty/serial/kgdboc.c
53334@@ -24,8 +24,9 @@
53335 #define MAX_CONFIG_LEN 40
53336
53337 static struct kgdb_io kgdboc_io_ops;
53338+static struct kgdb_io kgdboc_io_ops_console;
53339
53340-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53341+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53342 static int configured = -1;
53343
53344 static char config[MAX_CONFIG_LEN];
53345@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53346 kgdboc_unregister_kbd();
53347 if (configured == 1)
53348 kgdb_unregister_io_module(&kgdboc_io_ops);
53349+ else if (configured == 2)
53350+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53351 }
53352
53353 static int configure_kgdboc(void)
53354@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53355 int err;
53356 char *cptr = config;
53357 struct console *cons;
53358+ int is_console = 0;
53359
53360 err = kgdboc_option_setup(config);
53361 if (err || !strlen(config) || isspace(config[0]))
53362 goto noconfig;
53363
53364 err = -ENODEV;
53365- kgdboc_io_ops.is_console = 0;
53366 kgdb_tty_driver = NULL;
53367
53368 kgdboc_use_kms = 0;
53369@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53370 int idx;
53371 if (cons->device && cons->device(cons, &idx) == p &&
53372 idx == tty_line) {
53373- kgdboc_io_ops.is_console = 1;
53374+ is_console = 1;
53375 break;
53376 }
53377 cons = cons->next;
53378@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53379 kgdb_tty_line = tty_line;
53380
53381 do_register:
53382- err = kgdb_register_io_module(&kgdboc_io_ops);
53383+ if (is_console) {
53384+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53385+ configured = 2;
53386+ } else {
53387+ err = kgdb_register_io_module(&kgdboc_io_ops);
53388+ configured = 1;
53389+ }
53390 if (err)
53391 goto noconfig;
53392
53393@@ -205,8 +214,6 @@ do_register:
53394 if (err)
53395 goto nmi_con_failed;
53396
53397- configured = 1;
53398-
53399 return 0;
53400
53401 nmi_con_failed:
53402@@ -223,7 +230,7 @@ noconfig:
53403 static int __init init_kgdboc(void)
53404 {
53405 /* Already configured? */
53406- if (configured == 1)
53407+ if (configured >= 1)
53408 return 0;
53409
53410 return configure_kgdboc();
53411@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53412 if (config[len - 1] == '\n')
53413 config[len - 1] = '\0';
53414
53415- if (configured == 1)
53416+ if (configured >= 1)
53417 cleanup_kgdboc();
53418
53419 /* Go and configure with the new params. */
53420@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53421 .post_exception = kgdboc_post_exp_handler,
53422 };
53423
53424+static struct kgdb_io kgdboc_io_ops_console = {
53425+ .name = "kgdboc",
53426+ .read_char = kgdboc_get_char,
53427+ .write_char = kgdboc_put_char,
53428+ .pre_exception = kgdboc_pre_exp_handler,
53429+ .post_exception = kgdboc_post_exp_handler,
53430+ .is_console = 1
53431+};
53432+
53433 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53434 /* This is only available if kgdboc is a built in for early debugging */
53435 static int __init kgdboc_early_init(char *opt)
53436diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53437index b73889c..9f74f0a 100644
53438--- a/drivers/tty/serial/msm_serial.c
53439+++ b/drivers/tty/serial/msm_serial.c
53440@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
53441 .cons = MSM_CONSOLE,
53442 };
53443
53444-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53445+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53446
53447 static const struct of_device_id msm_uartdm_table[] = {
53448 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53449@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53450 line = pdev->id;
53451
53452 if (line < 0)
53453- line = atomic_inc_return(&msm_uart_next_id) - 1;
53454+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53455
53456 if (unlikely(line < 0 || line >= UART_NR))
53457 return -ENXIO;
53458diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53459index cf08876..711e0bf 100644
53460--- a/drivers/tty/serial/samsung.c
53461+++ b/drivers/tty/serial/samsung.c
53462@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53463 ourport->tx_in_progress = 0;
53464 }
53465
53466+static int s3c64xx_serial_startup(struct uart_port *port);
53467 static int s3c24xx_serial_startup(struct uart_port *port)
53468 {
53469 struct s3c24xx_uart_port *ourport = to_ourport(port);
53470 int ret;
53471
53472+ /* Startup sequence is different for s3c64xx and higher SoC's */
53473+ if (s3c24xx_serial_has_interrupt_mask(port))
53474+ return s3c64xx_serial_startup(port);
53475+
53476 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53477 port, (unsigned long long)port->mapbase, port->membase);
53478
53479@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53480 /* setup info for port */
53481 port->dev = &platdev->dev;
53482
53483- /* Startup sequence is different for s3c64xx and higher SoC's */
53484- if (s3c24xx_serial_has_interrupt_mask(port))
53485- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53486-
53487 port->uartclk = 1;
53488
53489 if (cfg->uart_flags & UPF_CONS_FLOW) {
53490diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53491index 6a1055a..5ca9ad9 100644
53492--- a/drivers/tty/serial/serial_core.c
53493+++ b/drivers/tty/serial/serial_core.c
53494@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53495 state = drv->state + tty->index;
53496 port = &state->port;
53497 spin_lock_irq(&port->lock);
53498- --port->count;
53499+ atomic_dec(&port->count);
53500 spin_unlock_irq(&port->lock);
53501 return;
53502 }
53503@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53504
53505 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53506
53507- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53508+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53509 return;
53510
53511 /*
53512@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
53513 uart_flush_buffer(tty);
53514 uart_shutdown(tty, state);
53515 spin_lock_irqsave(&port->lock, flags);
53516- port->count = 0;
53517+ atomic_set(&port->count, 0);
53518 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53519 spin_unlock_irqrestore(&port->lock, flags);
53520 tty_port_tty_set(port, NULL);
53521@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53522 pr_debug("uart_open(%d) called\n", line);
53523
53524 spin_lock_irq(&port->lock);
53525- ++port->count;
53526+ atomic_inc(&port->count);
53527 spin_unlock_irq(&port->lock);
53528
53529 /*
53530diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53531index b799170..87dafd5 100644
53532--- a/drivers/tty/synclink.c
53533+++ b/drivers/tty/synclink.c
53534@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53535
53536 if (debug_level >= DEBUG_LEVEL_INFO)
53537 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53538- __FILE__,__LINE__, info->device_name, info->port.count);
53539+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53540
53541 if (tty_port_close_start(&info->port, tty, filp) == 0)
53542 goto cleanup;
53543@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53544 cleanup:
53545 if (debug_level >= DEBUG_LEVEL_INFO)
53546 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53547- tty->driver->name, info->port.count);
53548+ tty->driver->name, atomic_read(&info->port.count));
53549
53550 } /* end of mgsl_close() */
53551
53552@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53553
53554 mgsl_flush_buffer(tty);
53555 shutdown(info);
53556-
53557- info->port.count = 0;
53558+
53559+ atomic_set(&info->port.count, 0);
53560 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53561 info->port.tty = NULL;
53562
53563@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53564
53565 if (debug_level >= DEBUG_LEVEL_INFO)
53566 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53567- __FILE__,__LINE__, tty->driver->name, port->count );
53568+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53569
53570 spin_lock_irqsave(&info->irq_spinlock, flags);
53571- port->count--;
53572+ atomic_dec(&port->count);
53573 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53574 port->blocked_open++;
53575
53576@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53577
53578 if (debug_level >= DEBUG_LEVEL_INFO)
53579 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53580- __FILE__,__LINE__, tty->driver->name, port->count );
53581+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53582
53583 tty_unlock(tty);
53584 schedule();
53585@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53586
53587 /* FIXME: Racy on hangup during close wait */
53588 if (!tty_hung_up_p(filp))
53589- port->count++;
53590+ atomic_inc(&port->count);
53591 port->blocked_open--;
53592
53593 if (debug_level >= DEBUG_LEVEL_INFO)
53594 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53595- __FILE__,__LINE__, tty->driver->name, port->count );
53596+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53597
53598 if (!retval)
53599 port->flags |= ASYNC_NORMAL_ACTIVE;
53600@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53601
53602 if (debug_level >= DEBUG_LEVEL_INFO)
53603 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53604- __FILE__,__LINE__,tty->driver->name, info->port.count);
53605+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53606
53607 /* If port is closing, signal caller to try again */
53608 if (info->port.flags & ASYNC_CLOSING){
53609@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53610 spin_unlock_irqrestore(&info->netlock, flags);
53611 goto cleanup;
53612 }
53613- info->port.count++;
53614+ atomic_inc(&info->port.count);
53615 spin_unlock_irqrestore(&info->netlock, flags);
53616
53617- if (info->port.count == 1) {
53618+ if (atomic_read(&info->port.count) == 1) {
53619 /* 1st open on this device, init hardware */
53620 retval = startup(info);
53621 if (retval < 0)
53622@@ -3442,8 +3442,8 @@ cleanup:
53623 if (retval) {
53624 if (tty->count == 1)
53625 info->port.tty = NULL; /* tty layer will release tty struct */
53626- if(info->port.count)
53627- info->port.count--;
53628+ if (atomic_read(&info->port.count))
53629+ atomic_dec(&info->port.count);
53630 }
53631
53632 return retval;
53633@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53634 unsigned short new_crctype;
53635
53636 /* return error if TTY interface open */
53637- if (info->port.count)
53638+ if (atomic_read(&info->port.count))
53639 return -EBUSY;
53640
53641 switch (encoding)
53642@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53643
53644 /* arbitrate between network and tty opens */
53645 spin_lock_irqsave(&info->netlock, flags);
53646- if (info->port.count != 0 || info->netcount != 0) {
53647+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53648 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53649 spin_unlock_irqrestore(&info->netlock, flags);
53650 return -EBUSY;
53651@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53652 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53653
53654 /* return error if TTY interface open */
53655- if (info->port.count)
53656+ if (atomic_read(&info->port.count))
53657 return -EBUSY;
53658
53659 if (cmd != SIOCWANDEV)
53660diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53661index 0e8c39b..e0cb171 100644
53662--- a/drivers/tty/synclink_gt.c
53663+++ b/drivers/tty/synclink_gt.c
53664@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53665 tty->driver_data = info;
53666 info->port.tty = tty;
53667
53668- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53669+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53670
53671 /* If port is closing, signal caller to try again */
53672 if (info->port.flags & ASYNC_CLOSING){
53673@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53674 mutex_unlock(&info->port.mutex);
53675 goto cleanup;
53676 }
53677- info->port.count++;
53678+ atomic_inc(&info->port.count);
53679 spin_unlock_irqrestore(&info->netlock, flags);
53680
53681- if (info->port.count == 1) {
53682+ if (atomic_read(&info->port.count) == 1) {
53683 /* 1st open on this device, init hardware */
53684 retval = startup(info);
53685 if (retval < 0) {
53686@@ -715,8 +715,8 @@ cleanup:
53687 if (retval) {
53688 if (tty->count == 1)
53689 info->port.tty = NULL; /* tty layer will release tty struct */
53690- if(info->port.count)
53691- info->port.count--;
53692+ if(atomic_read(&info->port.count))
53693+ atomic_dec(&info->port.count);
53694 }
53695
53696 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53697@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53698
53699 if (sanity_check(info, tty->name, "close"))
53700 return;
53701- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53702+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53703
53704 if (tty_port_close_start(&info->port, tty, filp) == 0)
53705 goto cleanup;
53706@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53707 tty_port_close_end(&info->port, tty);
53708 info->port.tty = NULL;
53709 cleanup:
53710- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53711+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53712 }
53713
53714 static void hangup(struct tty_struct *tty)
53715@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53716 shutdown(info);
53717
53718 spin_lock_irqsave(&info->port.lock, flags);
53719- info->port.count = 0;
53720+ atomic_set(&info->port.count, 0);
53721 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53722 info->port.tty = NULL;
53723 spin_unlock_irqrestore(&info->port.lock, flags);
53724@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53725 unsigned short new_crctype;
53726
53727 /* return error if TTY interface open */
53728- if (info->port.count)
53729+ if (atomic_read(&info->port.count))
53730 return -EBUSY;
53731
53732 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53733@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53734
53735 /* arbitrate between network and tty opens */
53736 spin_lock_irqsave(&info->netlock, flags);
53737- if (info->port.count != 0 || info->netcount != 0) {
53738+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53739 DBGINFO(("%s hdlc_open busy\n", dev->name));
53740 spin_unlock_irqrestore(&info->netlock, flags);
53741 return -EBUSY;
53742@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53743 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53744
53745 /* return error if TTY interface open */
53746- if (info->port.count)
53747+ if (atomic_read(&info->port.count))
53748 return -EBUSY;
53749
53750 if (cmd != SIOCWANDEV)
53751@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53752 if (port == NULL)
53753 continue;
53754 spin_lock(&port->lock);
53755- if ((port->port.count || port->netcount) &&
53756+ if ((atomic_read(&port->port.count) || port->netcount) &&
53757 port->pending_bh && !port->bh_running &&
53758 !port->bh_requested) {
53759 DBGISR(("%s bh queued\n", port->device_name));
53760@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53761 add_wait_queue(&port->open_wait, &wait);
53762
53763 spin_lock_irqsave(&info->lock, flags);
53764- port->count--;
53765+ atomic_dec(&port->count);
53766 spin_unlock_irqrestore(&info->lock, flags);
53767 port->blocked_open++;
53768
53769@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53770 remove_wait_queue(&port->open_wait, &wait);
53771
53772 if (!tty_hung_up_p(filp))
53773- port->count++;
53774+ atomic_inc(&port->count);
53775 port->blocked_open--;
53776
53777 if (!retval)
53778diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53779index c3f9091..abe4601 100644
53780--- a/drivers/tty/synclinkmp.c
53781+++ b/drivers/tty/synclinkmp.c
53782@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53783
53784 if (debug_level >= DEBUG_LEVEL_INFO)
53785 printk("%s(%d):%s open(), old ref count = %d\n",
53786- __FILE__,__LINE__,tty->driver->name, info->port.count);
53787+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53788
53789 /* If port is closing, signal caller to try again */
53790 if (info->port.flags & ASYNC_CLOSING){
53791@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53792 spin_unlock_irqrestore(&info->netlock, flags);
53793 goto cleanup;
53794 }
53795- info->port.count++;
53796+ atomic_inc(&info->port.count);
53797 spin_unlock_irqrestore(&info->netlock, flags);
53798
53799- if (info->port.count == 1) {
53800+ if (atomic_read(&info->port.count) == 1) {
53801 /* 1st open on this device, init hardware */
53802 retval = startup(info);
53803 if (retval < 0)
53804@@ -796,8 +796,8 @@ cleanup:
53805 if (retval) {
53806 if (tty->count == 1)
53807 info->port.tty = NULL; /* tty layer will release tty struct */
53808- if(info->port.count)
53809- info->port.count--;
53810+ if(atomic_read(&info->port.count))
53811+ atomic_dec(&info->port.count);
53812 }
53813
53814 return retval;
53815@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53816
53817 if (debug_level >= DEBUG_LEVEL_INFO)
53818 printk("%s(%d):%s close() entry, count=%d\n",
53819- __FILE__,__LINE__, info->device_name, info->port.count);
53820+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53821
53822 if (tty_port_close_start(&info->port, tty, filp) == 0)
53823 goto cleanup;
53824@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53825 cleanup:
53826 if (debug_level >= DEBUG_LEVEL_INFO)
53827 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53828- tty->driver->name, info->port.count);
53829+ tty->driver->name, atomic_read(&info->port.count));
53830 }
53831
53832 /* Called by tty_hangup() when a hangup is signaled.
53833@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53834 shutdown(info);
53835
53836 spin_lock_irqsave(&info->port.lock, flags);
53837- info->port.count = 0;
53838+ atomic_set(&info->port.count, 0);
53839 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53840 info->port.tty = NULL;
53841 spin_unlock_irqrestore(&info->port.lock, flags);
53842@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53843 unsigned short new_crctype;
53844
53845 /* return error if TTY interface open */
53846- if (info->port.count)
53847+ if (atomic_read(&info->port.count))
53848 return -EBUSY;
53849
53850 switch (encoding)
53851@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53852
53853 /* arbitrate between network and tty opens */
53854 spin_lock_irqsave(&info->netlock, flags);
53855- if (info->port.count != 0 || info->netcount != 0) {
53856+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53857 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53858 spin_unlock_irqrestore(&info->netlock, flags);
53859 return -EBUSY;
53860@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53861 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53862
53863 /* return error if TTY interface open */
53864- if (info->port.count)
53865+ if (atomic_read(&info->port.count))
53866 return -EBUSY;
53867
53868 if (cmd != SIOCWANDEV)
53869@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53870 * do not request bottom half processing if the
53871 * device is not open in a normal mode.
53872 */
53873- if ( port && (port->port.count || port->netcount) &&
53874+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53875 port->pending_bh && !port->bh_running &&
53876 !port->bh_requested ) {
53877 if ( debug_level >= DEBUG_LEVEL_ISR )
53878@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53879
53880 if (debug_level >= DEBUG_LEVEL_INFO)
53881 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53882- __FILE__,__LINE__, tty->driver->name, port->count );
53883+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53884
53885 spin_lock_irqsave(&info->lock, flags);
53886- port->count--;
53887+ atomic_dec(&port->count);
53888 spin_unlock_irqrestore(&info->lock, flags);
53889 port->blocked_open++;
53890
53891@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53892
53893 if (debug_level >= DEBUG_LEVEL_INFO)
53894 printk("%s(%d):%s block_til_ready() count=%d\n",
53895- __FILE__,__LINE__, tty->driver->name, port->count );
53896+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53897
53898 tty_unlock(tty);
53899 schedule();
53900@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53901 set_current_state(TASK_RUNNING);
53902 remove_wait_queue(&port->open_wait, &wait);
53903 if (!tty_hung_up_p(filp))
53904- port->count++;
53905+ atomic_inc(&port->count);
53906 port->blocked_open--;
53907
53908 if (debug_level >= DEBUG_LEVEL_INFO)
53909 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53910- __FILE__,__LINE__, tty->driver->name, port->count );
53911+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53912
53913 if (!retval)
53914 port->flags |= ASYNC_NORMAL_ACTIVE;
53915diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53916index 259a4d5..9b0c9e7 100644
53917--- a/drivers/tty/sysrq.c
53918+++ b/drivers/tty/sysrq.c
53919@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53920 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53921 size_t count, loff_t *ppos)
53922 {
53923- if (count) {
53924+ if (count && capable(CAP_SYS_ADMIN)) {
53925 char c;
53926
53927 if (get_user(c, buf))
53928diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53929index 2bb4dfc..a7f6e86 100644
53930--- a/drivers/tty/tty_io.c
53931+++ b/drivers/tty/tty_io.c
53932@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53933
53934 void tty_default_fops(struct file_operations *fops)
53935 {
53936- *fops = tty_fops;
53937+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53938 }
53939
53940 /*
53941diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53942index 3737f55..7cef448 100644
53943--- a/drivers/tty/tty_ldisc.c
53944+++ b/drivers/tty/tty_ldisc.c
53945@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53946 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53947 tty_ldiscs[disc] = new_ldisc;
53948 new_ldisc->num = disc;
53949- new_ldisc->refcount = 0;
53950+ atomic_set(&new_ldisc->refcount, 0);
53951 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53952
53953 return ret;
53954@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53955 return -EINVAL;
53956
53957 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53958- if (tty_ldiscs[disc]->refcount)
53959+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53960 ret = -EBUSY;
53961 else
53962 tty_ldiscs[disc] = NULL;
53963@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53964 if (ldops) {
53965 ret = ERR_PTR(-EAGAIN);
53966 if (try_module_get(ldops->owner)) {
53967- ldops->refcount++;
53968+ atomic_inc(&ldops->refcount);
53969 ret = ldops;
53970 }
53971 }
53972@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53973 unsigned long flags;
53974
53975 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53976- ldops->refcount--;
53977+ atomic_dec(&ldops->refcount);
53978 module_put(ldops->owner);
53979 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53980 }
53981diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53982index 40b31835..94d92ae 100644
53983--- a/drivers/tty/tty_port.c
53984+++ b/drivers/tty/tty_port.c
53985@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53986 unsigned long flags;
53987
53988 spin_lock_irqsave(&port->lock, flags);
53989- port->count = 0;
53990+ atomic_set(&port->count, 0);
53991 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53992 tty = port->tty;
53993 if (tty)
53994@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53995
53996 /* The port lock protects the port counts */
53997 spin_lock_irqsave(&port->lock, flags);
53998- port->count--;
53999+ atomic_dec(&port->count);
54000 port->blocked_open++;
54001 spin_unlock_irqrestore(&port->lock, flags);
54002
54003@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54004 we must not mess that up further */
54005 spin_lock_irqsave(&port->lock, flags);
54006 if (!tty_hung_up_p(filp))
54007- port->count++;
54008+ atomic_inc(&port->count);
54009 port->blocked_open--;
54010 if (retval == 0)
54011 port->flags |= ASYNC_NORMAL_ACTIVE;
54012@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
54013 return 0;
54014
54015 spin_lock_irqsave(&port->lock, flags);
54016- if (tty->count == 1 && port->count != 1) {
54017+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54018 printk(KERN_WARNING
54019 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54020- port->count);
54021- port->count = 1;
54022+ atomic_read(&port->count));
54023+ atomic_set(&port->count, 1);
54024 }
54025- if (--port->count < 0) {
54026+ if (atomic_dec_return(&port->count) < 0) {
54027 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54028- port->count);
54029- port->count = 0;
54030+ atomic_read(&port->count));
54031+ atomic_set(&port->count, 0);
54032 }
54033
54034- if (port->count) {
54035+ if (atomic_read(&port->count)) {
54036 spin_unlock_irqrestore(&port->lock, flags);
54037 return 0;
54038 }
54039@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54040 struct file *filp)
54041 {
54042 spin_lock_irq(&port->lock);
54043- ++port->count;
54044+ atomic_inc(&port->count);
54045 spin_unlock_irq(&port->lock);
54046 tty_port_tty_set(port, tty);
54047
54048diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54049index 8a89f6e..50b32af 100644
54050--- a/drivers/tty/vt/keyboard.c
54051+++ b/drivers/tty/vt/keyboard.c
54052@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54053 kbd->kbdmode == VC_OFF) &&
54054 value != KVAL(K_SAK))
54055 return; /* SAK is allowed even in raw mode */
54056+
54057+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54058+ {
54059+ void *func = fn_handler[value];
54060+ if (func == fn_show_state || func == fn_show_ptregs ||
54061+ func == fn_show_mem)
54062+ return;
54063+ }
54064+#endif
54065+
54066 fn_handler[value](vc);
54067 }
54068
54069@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54070 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54071 return -EFAULT;
54072
54073- if (!capable(CAP_SYS_TTY_CONFIG))
54074- perm = 0;
54075-
54076 switch (cmd) {
54077 case KDGKBENT:
54078 /* Ensure another thread doesn't free it under us */
54079@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54080 spin_unlock_irqrestore(&kbd_event_lock, flags);
54081 return put_user(val, &user_kbe->kb_value);
54082 case KDSKBENT:
54083+ if (!capable(CAP_SYS_TTY_CONFIG))
54084+ perm = 0;
54085+
54086 if (!perm)
54087 return -EPERM;
54088 if (!i && v == K_NOSUCHMAP) {
54089@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54090 int i, j, k;
54091 int ret;
54092
54093- if (!capable(CAP_SYS_TTY_CONFIG))
54094- perm = 0;
54095-
54096 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54097 if (!kbs) {
54098 ret = -ENOMEM;
54099@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54100 kfree(kbs);
54101 return ((p && *p) ? -EOVERFLOW : 0);
54102 case KDSKBSENT:
54103+ if (!capable(CAP_SYS_TTY_CONFIG))
54104+ perm = 0;
54105+
54106 if (!perm) {
54107 ret = -EPERM;
54108 goto reterr;
54109diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54110index 6276f13..84f2449 100644
54111--- a/drivers/uio/uio.c
54112+++ b/drivers/uio/uio.c
54113@@ -25,6 +25,7 @@
54114 #include <linux/kobject.h>
54115 #include <linux/cdev.h>
54116 #include <linux/uio_driver.h>
54117+#include <asm/local.h>
54118
54119 #define UIO_MAX_DEVICES (1U << MINORBITS)
54120
54121@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
54122 struct device_attribute *attr, char *buf)
54123 {
54124 struct uio_device *idev = dev_get_drvdata(dev);
54125- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54126+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54127 }
54128 static DEVICE_ATTR_RO(event);
54129
54130@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
54131 {
54132 struct uio_device *idev = info->uio_dev;
54133
54134- atomic_inc(&idev->event);
54135+ atomic_inc_unchecked(&idev->event);
54136 wake_up_interruptible(&idev->wait);
54137 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54138 }
54139@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54140 }
54141
54142 listener->dev = idev;
54143- listener->event_count = atomic_read(&idev->event);
54144+ listener->event_count = atomic_read_unchecked(&idev->event);
54145 filep->private_data = listener;
54146
54147 if (idev->info->open) {
54148@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54149 return -EIO;
54150
54151 poll_wait(filep, &idev->wait, wait);
54152- if (listener->event_count != atomic_read(&idev->event))
54153+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54154 return POLLIN | POLLRDNORM;
54155 return 0;
54156 }
54157@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54158 do {
54159 set_current_state(TASK_INTERRUPTIBLE);
54160
54161- event_count = atomic_read(&idev->event);
54162+ event_count = atomic_read_unchecked(&idev->event);
54163 if (event_count != listener->event_count) {
54164 if (copy_to_user(buf, &event_count, count))
54165 retval = -EFAULT;
54166@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54167 static int uio_find_mem_index(struct vm_area_struct *vma)
54168 {
54169 struct uio_device *idev = vma->vm_private_data;
54170+ unsigned long size;
54171
54172 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54173- if (idev->info->mem[vma->vm_pgoff].size == 0)
54174+ size = idev->info->mem[vma->vm_pgoff].size;
54175+ if (size == 0)
54176+ return -1;
54177+ if (vma->vm_end - vma->vm_start > size)
54178 return -1;
54179 return (int)vma->vm_pgoff;
54180 }
54181@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
54182 idev->owner = owner;
54183 idev->info = info;
54184 init_waitqueue_head(&idev->wait);
54185- atomic_set(&idev->event, 0);
54186+ atomic_set_unchecked(&idev->event, 0);
54187
54188 ret = uio_get_minor(idev);
54189 if (ret)
54190diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54191index 813d4d3..a71934f 100644
54192--- a/drivers/usb/atm/cxacru.c
54193+++ b/drivers/usb/atm/cxacru.c
54194@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54195 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54196 if (ret < 2)
54197 return -EINVAL;
54198- if (index < 0 || index > 0x7f)
54199+ if (index > 0x7f)
54200 return -EINVAL;
54201 pos += tmp;
54202
54203diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54204index dada014..1d0d517 100644
54205--- a/drivers/usb/atm/usbatm.c
54206+++ b/drivers/usb/atm/usbatm.c
54207@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54208 if (printk_ratelimit())
54209 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54210 __func__, vpi, vci);
54211- atomic_inc(&vcc->stats->rx_err);
54212+ atomic_inc_unchecked(&vcc->stats->rx_err);
54213 return;
54214 }
54215
54216@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54217 if (length > ATM_MAX_AAL5_PDU) {
54218 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54219 __func__, length, vcc);
54220- atomic_inc(&vcc->stats->rx_err);
54221+ atomic_inc_unchecked(&vcc->stats->rx_err);
54222 goto out;
54223 }
54224
54225@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54226 if (sarb->len < pdu_length) {
54227 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54228 __func__, pdu_length, sarb->len, vcc);
54229- atomic_inc(&vcc->stats->rx_err);
54230+ atomic_inc_unchecked(&vcc->stats->rx_err);
54231 goto out;
54232 }
54233
54234 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54235 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54236 __func__, vcc);
54237- atomic_inc(&vcc->stats->rx_err);
54238+ atomic_inc_unchecked(&vcc->stats->rx_err);
54239 goto out;
54240 }
54241
54242@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54243 if (printk_ratelimit())
54244 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54245 __func__, length);
54246- atomic_inc(&vcc->stats->rx_drop);
54247+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54248 goto out;
54249 }
54250
54251@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54252
54253 vcc->push(vcc, skb);
54254
54255- atomic_inc(&vcc->stats->rx);
54256+ atomic_inc_unchecked(&vcc->stats->rx);
54257 out:
54258 skb_trim(sarb, 0);
54259 }
54260@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54261 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54262
54263 usbatm_pop(vcc, skb);
54264- atomic_inc(&vcc->stats->tx);
54265+ atomic_inc_unchecked(&vcc->stats->tx);
54266
54267 skb = skb_dequeue(&instance->sndqueue);
54268 }
54269@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54270 if (!left--)
54271 return sprintf(page,
54272 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54273- atomic_read(&atm_dev->stats.aal5.tx),
54274- atomic_read(&atm_dev->stats.aal5.tx_err),
54275- atomic_read(&atm_dev->stats.aal5.rx),
54276- atomic_read(&atm_dev->stats.aal5.rx_err),
54277- atomic_read(&atm_dev->stats.aal5.rx_drop));
54278+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54279+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54280+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54281+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54282+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54283
54284 if (!left--) {
54285 if (instance->disconnected)
54286diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54287index 2a3bbdf..91d72cf 100644
54288--- a/drivers/usb/core/devices.c
54289+++ b/drivers/usb/core/devices.c
54290@@ -126,7 +126,7 @@ static const char format_endpt[] =
54291 * time it gets called.
54292 */
54293 static struct device_connect_event {
54294- atomic_t count;
54295+ atomic_unchecked_t count;
54296 wait_queue_head_t wait;
54297 } device_event = {
54298 .count = ATOMIC_INIT(1),
54299@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54300
54301 void usbfs_conn_disc_event(void)
54302 {
54303- atomic_add(2, &device_event.count);
54304+ atomic_add_unchecked(2, &device_event.count);
54305 wake_up(&device_event.wait);
54306 }
54307
54308@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54309
54310 poll_wait(file, &device_event.wait, wait);
54311
54312- event_count = atomic_read(&device_event.count);
54313+ event_count = atomic_read_unchecked(&device_event.count);
54314 if (file->f_version != event_count) {
54315 file->f_version = event_count;
54316 return POLLIN | POLLRDNORM;
54317diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54318index 1163553..f292679 100644
54319--- a/drivers/usb/core/devio.c
54320+++ b/drivers/usb/core/devio.c
54321@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54322 struct usb_dev_state *ps = file->private_data;
54323 struct usb_device *dev = ps->dev;
54324 ssize_t ret = 0;
54325- unsigned len;
54326+ size_t len;
54327 loff_t pos;
54328 int i;
54329
54330@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54331 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54332 struct usb_config_descriptor *config =
54333 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54334- unsigned int length = le16_to_cpu(config->wTotalLength);
54335+ size_t length = le16_to_cpu(config->wTotalLength);
54336
54337 if (*ppos < pos + length) {
54338
54339 /* The descriptor may claim to be longer than it
54340 * really is. Here is the actual allocated length. */
54341- unsigned alloclen =
54342+ size_t alloclen =
54343 le16_to_cpu(dev->config[i].desc.wTotalLength);
54344
54345- len = length - (*ppos - pos);
54346+ len = length + pos - *ppos;
54347 if (len > nbytes)
54348 len = nbytes;
54349
54350 /* Simply don't write (skip over) unallocated parts */
54351 if (alloclen > (*ppos - pos)) {
54352- alloclen -= (*ppos - pos);
54353+ alloclen = alloclen + pos - *ppos;
54354 if (copy_to_user(buf,
54355 dev->rawdescriptors[i] + (*ppos - pos),
54356 min(len, alloclen))) {
54357diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54358index 45a915c..09f9735 100644
54359--- a/drivers/usb/core/hcd.c
54360+++ b/drivers/usb/core/hcd.c
54361@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54362 */
54363 usb_get_urb(urb);
54364 atomic_inc(&urb->use_count);
54365- atomic_inc(&urb->dev->urbnum);
54366+ atomic_inc_unchecked(&urb->dev->urbnum);
54367 usbmon_urb_submit(&hcd->self, urb);
54368
54369 /* NOTE requirements on root-hub callers (usbfs and the hub
54370@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54371 urb->hcpriv = NULL;
54372 INIT_LIST_HEAD(&urb->urb_list);
54373 atomic_dec(&urb->use_count);
54374- atomic_dec(&urb->dev->urbnum);
54375+ atomic_dec_unchecked(&urb->dev->urbnum);
54376 if (atomic_read(&urb->reject))
54377 wake_up(&usb_kill_urb_queue);
54378 usb_put_urb(urb);
54379diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54380index d7c3d5a..2f87607 100644
54381--- a/drivers/usb/core/hub.c
54382+++ b/drivers/usb/core/hub.c
54383@@ -26,6 +26,7 @@
54384 #include <linux/mutex.h>
54385 #include <linux/random.h>
54386 #include <linux/pm_qos.h>
54387+#include <linux/grsecurity.h>
54388
54389 #include <asm/uaccess.h>
54390 #include <asm/byteorder.h>
54391@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54392 goto done;
54393 return;
54394 }
54395+
54396+ if (gr_handle_new_usb())
54397+ goto done;
54398+
54399 if (hub_is_superspeed(hub->hdev))
54400 unit_load = 150;
54401 else
54402diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54403index f368d20..0c30ac5 100644
54404--- a/drivers/usb/core/message.c
54405+++ b/drivers/usb/core/message.c
54406@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54407 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54408 * error number.
54409 */
54410-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54411+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54412 __u8 requesttype, __u16 value, __u16 index, void *data,
54413 __u16 size, int timeout)
54414 {
54415@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54416 * If successful, 0. Otherwise a negative error number. The number of actual
54417 * bytes transferred will be stored in the @actual_length parameter.
54418 */
54419-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54420+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54421 void *data, int len, int *actual_length, int timeout)
54422 {
54423 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54424@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54425 * bytes transferred will be stored in the @actual_length parameter.
54426 *
54427 */
54428-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54429+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54430 void *data, int len, int *actual_length, int timeout)
54431 {
54432 struct urb *urb;
54433diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54434index d269738..7340cd7 100644
54435--- a/drivers/usb/core/sysfs.c
54436+++ b/drivers/usb/core/sysfs.c
54437@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54438 struct usb_device *udev;
54439
54440 udev = to_usb_device(dev);
54441- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54442+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54443 }
54444 static DEVICE_ATTR_RO(urbnum);
54445
54446diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54447index b1fb9ae..4224885 100644
54448--- a/drivers/usb/core/usb.c
54449+++ b/drivers/usb/core/usb.c
54450@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54451 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54452 dev->state = USB_STATE_ATTACHED;
54453 dev->lpm_disable_count = 1;
54454- atomic_set(&dev->urbnum, 0);
54455+ atomic_set_unchecked(&dev->urbnum, 0);
54456
54457 INIT_LIST_HEAD(&dev->ep0.urb_list);
54458 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54459diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54460index 8cfc319..4868255 100644
54461--- a/drivers/usb/early/ehci-dbgp.c
54462+++ b/drivers/usb/early/ehci-dbgp.c
54463@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54464
54465 #ifdef CONFIG_KGDB
54466 static struct kgdb_io kgdbdbgp_io_ops;
54467-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54468+static struct kgdb_io kgdbdbgp_io_ops_console;
54469+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54470 #else
54471 #define dbgp_kgdb_mode (0)
54472 #endif
54473@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54474 .write_char = kgdbdbgp_write_char,
54475 };
54476
54477+static struct kgdb_io kgdbdbgp_io_ops_console = {
54478+ .name = "kgdbdbgp",
54479+ .read_char = kgdbdbgp_read_char,
54480+ .write_char = kgdbdbgp_write_char,
54481+ .is_console = 1
54482+};
54483+
54484 static int kgdbdbgp_wait_time;
54485
54486 static int __init kgdbdbgp_parse_config(char *str)
54487@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54488 ptr++;
54489 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54490 }
54491- kgdb_register_io_module(&kgdbdbgp_io_ops);
54492- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54493+ if (early_dbgp_console.index != -1)
54494+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54495+ else
54496+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54497
54498 return 0;
54499 }
54500diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54501index 9719abf..789d5d9 100644
54502--- a/drivers/usb/gadget/function/f_uac1.c
54503+++ b/drivers/usb/gadget/function/f_uac1.c
54504@@ -14,6 +14,7 @@
54505 #include <linux/module.h>
54506 #include <linux/device.h>
54507 #include <linux/atomic.h>
54508+#include <linux/module.h>
54509
54510 #include "u_uac1.h"
54511
54512diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54513index 491082a..dfd7d17 100644
54514--- a/drivers/usb/gadget/function/u_serial.c
54515+++ b/drivers/usb/gadget/function/u_serial.c
54516@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54517 spin_lock_irq(&port->port_lock);
54518
54519 /* already open? Great. */
54520- if (port->port.count) {
54521+ if (atomic_read(&port->port.count)) {
54522 status = 0;
54523- port->port.count++;
54524+ atomic_inc(&port->port.count);
54525
54526 /* currently opening/closing? wait ... */
54527 } else if (port->openclose) {
54528@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54529 tty->driver_data = port;
54530 port->port.tty = tty;
54531
54532- port->port.count = 1;
54533+ atomic_set(&port->port.count, 1);
54534 port->openclose = false;
54535
54536 /* if connected, start the I/O stream */
54537@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54538
54539 spin_lock_irq(&port->port_lock);
54540
54541- if (port->port.count != 1) {
54542- if (port->port.count == 0)
54543+ if (atomic_read(&port->port.count) != 1) {
54544+ if (atomic_read(&port->port.count) == 0)
54545 WARN_ON(1);
54546 else
54547- --port->port.count;
54548+ atomic_dec(&port->port.count);
54549 goto exit;
54550 }
54551
54552@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54553 * and sleep if necessary
54554 */
54555 port->openclose = true;
54556- port->port.count = 0;
54557+ atomic_set(&port->port.count, 0);
54558
54559 gser = port->port_usb;
54560 if (gser && gser->disconnect)
54561@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54562 int cond;
54563
54564 spin_lock_irq(&port->port_lock);
54565- cond = (port->port.count == 0) && !port->openclose;
54566+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54567 spin_unlock_irq(&port->port_lock);
54568 return cond;
54569 }
54570@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54571 /* if it's already open, start I/O ... and notify the serial
54572 * protocol about open/close status (connect/disconnect).
54573 */
54574- if (port->port.count) {
54575+ if (atomic_read(&port->port.count)) {
54576 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54577 gs_start_io(port);
54578 if (gser->connect)
54579@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54580
54581 port->port_usb = NULL;
54582 gser->ioport = NULL;
54583- if (port->port.count > 0 || port->openclose) {
54584+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54585 wake_up_interruptible(&port->drain_wait);
54586 if (port->port.tty)
54587 tty_hangup(port->port.tty);
54588@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54589
54590 /* finally, free any unused/unusable I/O buffers */
54591 spin_lock_irqsave(&port->port_lock, flags);
54592- if (port->port.count == 0 && !port->openclose)
54593+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54594 gs_buf_free(&port->port_write_buf);
54595 gs_free_requests(gser->out, &port->read_pool, NULL);
54596 gs_free_requests(gser->out, &port->read_queue, NULL);
54597diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54598index c78c841..48fd281 100644
54599--- a/drivers/usb/gadget/function/u_uac1.c
54600+++ b/drivers/usb/gadget/function/u_uac1.c
54601@@ -17,6 +17,7 @@
54602 #include <linux/ctype.h>
54603 #include <linux/random.h>
54604 #include <linux/syscalls.h>
54605+#include <linux/module.h>
54606
54607 #include "u_uac1.h"
54608
54609diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54610index 87cf86f..3de9809 100644
54611--- a/drivers/usb/host/ehci-hub.c
54612+++ b/drivers/usb/host/ehci-hub.c
54613@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54614 urb->transfer_flags = URB_DIR_IN;
54615 usb_get_urb(urb);
54616 atomic_inc(&urb->use_count);
54617- atomic_inc(&urb->dev->urbnum);
54618+ atomic_inc_unchecked(&urb->dev->urbnum);
54619 urb->setup_dma = dma_map_single(
54620 hcd->self.controller,
54621 urb->setup_packet,
54622@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54623 urb->status = -EINPROGRESS;
54624 usb_get_urb(urb);
54625 atomic_inc(&urb->use_count);
54626- atomic_inc(&urb->dev->urbnum);
54627+ atomic_inc_unchecked(&urb->dev->urbnum);
54628 retval = submit_single_step_set_feature(hcd, urb, 0);
54629 if (!retval && !wait_for_completion_timeout(&done,
54630 msecs_to_jiffies(2000))) {
54631diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54632index 1db0626..4948782 100644
54633--- a/drivers/usb/host/hwa-hc.c
54634+++ b/drivers/usb/host/hwa-hc.c
54635@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54636 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54637 struct wahc *wa = &hwahc->wa;
54638 struct device *dev = &wa->usb_iface->dev;
54639- u8 mas_le[UWB_NUM_MAS/8];
54640+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54641+
54642+ if (mas_le == NULL)
54643+ return -ENOMEM;
54644
54645 /* Set the stream index */
54646 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54647@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54648 WUSB_REQ_SET_WUSB_MAS,
54649 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54650 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54651- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54652+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54653 if (result < 0)
54654 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54655 out:
54656+ kfree(mas_le);
54657+
54658 return result;
54659 }
54660
54661diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54662index b3d245e..99549ed 100644
54663--- a/drivers/usb/misc/appledisplay.c
54664+++ b/drivers/usb/misc/appledisplay.c
54665@@ -84,7 +84,7 @@ struct appledisplay {
54666 struct mutex sysfslock; /* concurrent read and write */
54667 };
54668
54669-static atomic_t count_displays = ATOMIC_INIT(0);
54670+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54671 static struct workqueue_struct *wq;
54672
54673 static void appledisplay_complete(struct urb *urb)
54674@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54675
54676 /* Register backlight device */
54677 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54678- atomic_inc_return(&count_displays) - 1);
54679+ atomic_inc_return_unchecked(&count_displays) - 1);
54680 memset(&props, 0, sizeof(struct backlight_properties));
54681 props.type = BACKLIGHT_RAW;
54682 props.max_brightness = 0xff;
54683diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54684index 3806e70..55c508b 100644
54685--- a/drivers/usb/serial/console.c
54686+++ b/drivers/usb/serial/console.c
54687@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
54688
54689 info->port = port;
54690
54691- ++port->port.count;
54692+ atomic_inc(&port->port.count);
54693 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54694 if (serial->type->set_termios) {
54695 /*
54696@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
54697 }
54698 /* Now that any required fake tty operations are completed restore
54699 * the tty port count */
54700- --port->port.count;
54701+ atomic_dec(&port->port.count);
54702 /* The console is special in terms of closing the device so
54703 * indicate this port is now acting as a system console. */
54704 port->port.console = 1;
54705@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
54706 put_tty:
54707 tty_kref_put(tty);
54708 reset_open_count:
54709- port->port.count = 0;
54710+ atomic_set(&port->port.count, 0);
54711 usb_autopm_put_interface(serial->interface);
54712 error_get_interface:
54713 usb_serial_put(serial);
54714@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
54715 static void usb_console_write(struct console *co,
54716 const char *buf, unsigned count)
54717 {
54718- static struct usbcons_info *info = &usbcons_info;
54719+ struct usbcons_info *info = &usbcons_info;
54720 struct usb_serial_port *port = info->port;
54721 struct usb_serial *serial;
54722 int retval = -ENODEV;
54723diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54724index 307e339..6aa97cb 100644
54725--- a/drivers/usb/storage/usb.h
54726+++ b/drivers/usb/storage/usb.h
54727@@ -63,7 +63,7 @@ struct us_unusual_dev {
54728 __u8 useProtocol;
54729 __u8 useTransport;
54730 int (*initFunction)(struct us_data *);
54731-};
54732+} __do_const;
54733
54734
54735 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54736diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54737index a863a98..d272795 100644
54738--- a/drivers/usb/usbip/vhci.h
54739+++ b/drivers/usb/usbip/vhci.h
54740@@ -83,7 +83,7 @@ struct vhci_hcd {
54741 unsigned resuming:1;
54742 unsigned long re_timeout;
54743
54744- atomic_t seqnum;
54745+ atomic_unchecked_t seqnum;
54746
54747 /*
54748 * NOTE:
54749diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54750index 11f6f61..1087910 100644
54751--- a/drivers/usb/usbip/vhci_hcd.c
54752+++ b/drivers/usb/usbip/vhci_hcd.c
54753@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
54754
54755 spin_lock(&vdev->priv_lock);
54756
54757- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54758+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54759 if (priv->seqnum == 0xffff)
54760 dev_info(&urb->dev->dev, "seqnum max\n");
54761
54762@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54763 return -ENOMEM;
54764 }
54765
54766- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54767+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54768 if (unlink->seqnum == 0xffff)
54769 pr_info("seqnum max\n");
54770
54771@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
54772 vdev->rhport = rhport;
54773 }
54774
54775- atomic_set(&vhci->seqnum, 0);
54776+ atomic_set_unchecked(&vhci->seqnum, 0);
54777 spin_lock_init(&vhci->lock);
54778
54779 hcd->power_budget = 0; /* no limit */
54780diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54781index 00e4a54..d676f85 100644
54782--- a/drivers/usb/usbip/vhci_rx.c
54783+++ b/drivers/usb/usbip/vhci_rx.c
54784@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54785 if (!urb) {
54786 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54787 pr_info("max seqnum %d\n",
54788- atomic_read(&the_controller->seqnum));
54789+ atomic_read_unchecked(&the_controller->seqnum));
54790 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54791 return;
54792 }
54793diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54794index edc7267..9f65ce2 100644
54795--- a/drivers/usb/wusbcore/wa-hc.h
54796+++ b/drivers/usb/wusbcore/wa-hc.h
54797@@ -240,7 +240,7 @@ struct wahc {
54798 spinlock_t xfer_list_lock;
54799 struct work_struct xfer_enqueue_work;
54800 struct work_struct xfer_error_work;
54801- atomic_t xfer_id_count;
54802+ atomic_unchecked_t xfer_id_count;
54803
54804 kernel_ulong_t quirks;
54805 };
54806@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54807 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54808 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54809 wa->dto_in_use = 0;
54810- atomic_set(&wa->xfer_id_count, 1);
54811+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54812 /* init the buf in URBs */
54813 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54814 usb_init_urb(&(wa->buf_in_urbs[index]));
54815diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54816index 69af4fd..da390d7 100644
54817--- a/drivers/usb/wusbcore/wa-xfer.c
54818+++ b/drivers/usb/wusbcore/wa-xfer.c
54819@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54820 */
54821 static void wa_xfer_id_init(struct wa_xfer *xfer)
54822 {
54823- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54824+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54825 }
54826
54827 /* Return the xfer's ID. */
54828diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54829index 4cde855..b23d05d 100644
54830--- a/drivers/vfio/vfio.c
54831+++ b/drivers/vfio/vfio.c
54832@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54833 return 0;
54834
54835 /* TODO Prevent device auto probing */
54836- WARN("Device %s added to live group %d!\n", dev_name(dev),
54837+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54838 iommu_group_id(group->iommu_group));
54839
54840 return 0;
54841diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54842index 3bb02c6..a01ff38 100644
54843--- a/drivers/vhost/vringh.c
54844+++ b/drivers/vhost/vringh.c
54845@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54846 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54847 {
54848 __virtio16 v = 0;
54849- int rc = get_user(v, (__force __virtio16 __user *)p);
54850+ int rc = get_user(v, (__force_user __virtio16 *)p);
54851 *val = vringh16_to_cpu(vrh, v);
54852 return rc;
54853 }
54854@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54855 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54856 {
54857 __virtio16 v = cpu_to_vringh16(vrh, val);
54858- return put_user(v, (__force __virtio16 __user *)p);
54859+ return put_user(v, (__force_user __virtio16 *)p);
54860 }
54861
54862 static inline int copydesc_user(void *dst, const void *src, size_t len)
54863 {
54864- return copy_from_user(dst, (__force void __user *)src, len) ?
54865+ return copy_from_user(dst, (void __force_user *)src, len) ?
54866 -EFAULT : 0;
54867 }
54868
54869@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54870 const struct vring_used_elem *src,
54871 unsigned int num)
54872 {
54873- return copy_to_user((__force void __user *)dst, src,
54874+ return copy_to_user((void __force_user *)dst, src,
54875 sizeof(*dst) * num) ? -EFAULT : 0;
54876 }
54877
54878 static inline int xfer_from_user(void *src, void *dst, size_t len)
54879 {
54880- return copy_from_user(dst, (__force void __user *)src, len) ?
54881+ return copy_from_user(dst, (void __force_user *)src, len) ?
54882 -EFAULT : 0;
54883 }
54884
54885 static inline int xfer_to_user(void *dst, void *src, size_t len)
54886 {
54887- return copy_to_user((__force void __user *)dst, src, len) ?
54888+ return copy_to_user((void __force_user *)dst, src, len) ?
54889 -EFAULT : 0;
54890 }
54891
54892@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54893 vrh->last_used_idx = 0;
54894 vrh->vring.num = num;
54895 /* vring expects kernel addresses, but only used via accessors. */
54896- vrh->vring.desc = (__force struct vring_desc *)desc;
54897- vrh->vring.avail = (__force struct vring_avail *)avail;
54898- vrh->vring.used = (__force struct vring_used *)used;
54899+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54900+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54901+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54902 return 0;
54903 }
54904 EXPORT_SYMBOL(vringh_init_user);
54905@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54906
54907 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54908 {
54909- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54910+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54911 return 0;
54912 }
54913
54914diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54915index 84a110a..96312c3 100644
54916--- a/drivers/video/backlight/kb3886_bl.c
54917+++ b/drivers/video/backlight/kb3886_bl.c
54918@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54919 static unsigned long kb3886bl_flags;
54920 #define KB3886BL_SUSPENDED 0x01
54921
54922-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54923+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54924 {
54925 .ident = "Sahara Touch-iT",
54926 .matches = {
54927diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54928index 1b0b233..6f34c2c 100644
54929--- a/drivers/video/fbdev/arcfb.c
54930+++ b/drivers/video/fbdev/arcfb.c
54931@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54932 return -ENOSPC;
54933
54934 err = 0;
54935- if ((count + p) > fbmemlength) {
54936+ if (count > (fbmemlength - p)) {
54937 count = fbmemlength - p;
54938 err = -ENOSPC;
54939 }
54940diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54941index aedf2fb..47c9aca 100644
54942--- a/drivers/video/fbdev/aty/aty128fb.c
54943+++ b/drivers/video/fbdev/aty/aty128fb.c
54944@@ -149,7 +149,7 @@ enum {
54945 };
54946
54947 /* Must match above enum */
54948-static char * const r128_family[] = {
54949+static const char * const r128_family[] = {
54950 "AGP",
54951 "PCI",
54952 "PRO AGP",
54953diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54954index 8789e48..698fe4c 100644
54955--- a/drivers/video/fbdev/aty/atyfb_base.c
54956+++ b/drivers/video/fbdev/aty/atyfb_base.c
54957@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54958 par->accel_flags = var->accel_flags; /* hack */
54959
54960 if (var->accel_flags) {
54961- info->fbops->fb_sync = atyfb_sync;
54962+ pax_open_kernel();
54963+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54964+ pax_close_kernel();
54965 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54966 } else {
54967- info->fbops->fb_sync = NULL;
54968+ pax_open_kernel();
54969+ *(void **)&info->fbops->fb_sync = NULL;
54970+ pax_close_kernel();
54971 info->flags |= FBINFO_HWACCEL_DISABLED;
54972 }
54973
54974diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54975index 2fa0317..4983f2a 100644
54976--- a/drivers/video/fbdev/aty/mach64_cursor.c
54977+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54978@@ -8,6 +8,7 @@
54979 #include "../core/fb_draw.h"
54980
54981 #include <asm/io.h>
54982+#include <asm/pgtable.h>
54983
54984 #ifdef __sparc__
54985 #include <asm/fbio.h>
54986@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54987 info->sprite.buf_align = 16; /* and 64 lines tall. */
54988 info->sprite.flags = FB_PIXMAP_IO;
54989
54990- info->fbops->fb_cursor = atyfb_cursor;
54991+ pax_open_kernel();
54992+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54993+ pax_close_kernel();
54994
54995 return 0;
54996 }
54997diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54998index d6cab1f..112f680 100644
54999--- a/drivers/video/fbdev/core/fb_defio.c
55000+++ b/drivers/video/fbdev/core/fb_defio.c
55001@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
55002
55003 BUG_ON(!fbdefio);
55004 mutex_init(&fbdefio->lock);
55005- info->fbops->fb_mmap = fb_deferred_io_mmap;
55006+ pax_open_kernel();
55007+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55008+ pax_close_kernel();
55009 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55010 INIT_LIST_HEAD(&fbdefio->pagelist);
55011 if (fbdefio->delay == 0) /* set a default of 1 s */
55012@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55013 page->mapping = NULL;
55014 }
55015
55016- info->fbops->fb_mmap = NULL;
55017+ *(void **)&info->fbops->fb_mmap = NULL;
55018 mutex_destroy(&fbdefio->lock);
55019 }
55020 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55021diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55022index 0705d88..d9429bf 100644
55023--- a/drivers/video/fbdev/core/fbmem.c
55024+++ b/drivers/video/fbdev/core/fbmem.c
55025@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55026 __u32 data;
55027 int err;
55028
55029- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55030+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55031
55032 data = (__u32) (unsigned long) fix->smem_start;
55033 err |= put_user(data, &fix32->smem_start);
55034diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55035index 4254336..282567e 100644
55036--- a/drivers/video/fbdev/hyperv_fb.c
55037+++ b/drivers/video/fbdev/hyperv_fb.c
55038@@ -240,7 +240,7 @@ static uint screen_fb_size;
55039 static inline int synthvid_send(struct hv_device *hdev,
55040 struct synthvid_msg *msg)
55041 {
55042- static atomic64_t request_id = ATOMIC64_INIT(0);
55043+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55044 int ret;
55045
55046 msg->pipe_hdr.type = PIPE_MSG_DATA;
55047@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55048
55049 ret = vmbus_sendpacket(hdev->channel, msg,
55050 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55051- atomic64_inc_return(&request_id),
55052+ atomic64_inc_return_unchecked(&request_id),
55053 VM_PKT_DATA_INBAND, 0);
55054
55055 if (ret)
55056diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55057index 7672d2e..b56437f 100644
55058--- a/drivers/video/fbdev/i810/i810_accel.c
55059+++ b/drivers/video/fbdev/i810/i810_accel.c
55060@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55061 }
55062 }
55063 printk("ringbuffer lockup!!!\n");
55064+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55065 i810_report_error(mmio);
55066 par->dev_flags |= LOCKUP;
55067 info->pixmap.scan_align = 1;
55068diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55069index a01147f..5d896f8 100644
55070--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55071+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55072@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55073
55074 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55075 struct matrox_switch matrox_mystique = {
55076- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55077+ .preinit = MGA1064_preinit,
55078+ .reset = MGA1064_reset,
55079+ .init = MGA1064_init,
55080+ .restore = MGA1064_restore,
55081 };
55082 EXPORT_SYMBOL(matrox_mystique);
55083 #endif
55084
55085 #ifdef CONFIG_FB_MATROX_G
55086 struct matrox_switch matrox_G100 = {
55087- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55088+ .preinit = MGAG100_preinit,
55089+ .reset = MGAG100_reset,
55090+ .init = MGAG100_init,
55091+ .restore = MGAG100_restore,
55092 };
55093 EXPORT_SYMBOL(matrox_G100);
55094 #endif
55095diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55096index 195ad7c..09743fc 100644
55097--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55098+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55099@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55100 }
55101
55102 struct matrox_switch matrox_millennium = {
55103- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55104+ .preinit = Ti3026_preinit,
55105+ .reset = Ti3026_reset,
55106+ .init = Ti3026_init,
55107+ .restore = Ti3026_restore
55108 };
55109 EXPORT_SYMBOL(matrox_millennium);
55110 #endif
55111diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55112index fe92eed..106e085 100644
55113--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55114+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55115@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55116 struct mb862xxfb_par *par = info->par;
55117
55118 if (info->var.bits_per_pixel == 32) {
55119- info->fbops->fb_fillrect = cfb_fillrect;
55120- info->fbops->fb_copyarea = cfb_copyarea;
55121- info->fbops->fb_imageblit = cfb_imageblit;
55122+ pax_open_kernel();
55123+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55124+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55125+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55126+ pax_close_kernel();
55127 } else {
55128 outreg(disp, GC_L0EM, 3);
55129- info->fbops->fb_fillrect = mb86290fb_fillrect;
55130- info->fbops->fb_copyarea = mb86290fb_copyarea;
55131- info->fbops->fb_imageblit = mb86290fb_imageblit;
55132+ pax_open_kernel();
55133+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55134+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55135+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55136+ pax_close_kernel();
55137 }
55138 outreg(draw, GDC_REG_DRAW_BASE, 0);
55139 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55140diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55141index def0412..fed6529 100644
55142--- a/drivers/video/fbdev/nvidia/nvidia.c
55143+++ b/drivers/video/fbdev/nvidia/nvidia.c
55144@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55145 info->fix.line_length = (info->var.xres_virtual *
55146 info->var.bits_per_pixel) >> 3;
55147 if (info->var.accel_flags) {
55148- info->fbops->fb_imageblit = nvidiafb_imageblit;
55149- info->fbops->fb_fillrect = nvidiafb_fillrect;
55150- info->fbops->fb_copyarea = nvidiafb_copyarea;
55151- info->fbops->fb_sync = nvidiafb_sync;
55152+ pax_open_kernel();
55153+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55154+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55155+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55156+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55157+ pax_close_kernel();
55158 info->pixmap.scan_align = 4;
55159 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55160 info->flags |= FBINFO_READS_FAST;
55161 NVResetGraphics(info);
55162 } else {
55163- info->fbops->fb_imageblit = cfb_imageblit;
55164- info->fbops->fb_fillrect = cfb_fillrect;
55165- info->fbops->fb_copyarea = cfb_copyarea;
55166- info->fbops->fb_sync = NULL;
55167+ pax_open_kernel();
55168+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55169+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55170+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55171+ *(void **)&info->fbops->fb_sync = NULL;
55172+ pax_close_kernel();
55173 info->pixmap.scan_align = 1;
55174 info->flags |= FBINFO_HWACCEL_DISABLED;
55175 info->flags &= ~FBINFO_READS_FAST;
55176@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55177 info->pixmap.size = 8 * 1024;
55178 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55179
55180- if (!hwcur)
55181- info->fbops->fb_cursor = NULL;
55182+ if (!hwcur) {
55183+ pax_open_kernel();
55184+ *(void **)&info->fbops->fb_cursor = NULL;
55185+ pax_close_kernel();
55186+ }
55187
55188 info->var.accel_flags = (!noaccel);
55189
55190diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55191index 2412a0d..294215b 100644
55192--- a/drivers/video/fbdev/omap2/dss/display.c
55193+++ b/drivers/video/fbdev/omap2/dss/display.c
55194@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55195 if (dssdev->name == NULL)
55196 dssdev->name = dssdev->alias;
55197
55198+ pax_open_kernel();
55199 if (drv && drv->get_resolution == NULL)
55200- drv->get_resolution = omapdss_default_get_resolution;
55201+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55202 if (drv && drv->get_recommended_bpp == NULL)
55203- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55204+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55205 if (drv && drv->get_timings == NULL)
55206- drv->get_timings = omapdss_default_get_timings;
55207+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55208+ pax_close_kernel();
55209
55210 mutex_lock(&panel_list_mutex);
55211 list_add_tail(&dssdev->panel_list, &panel_list);
55212diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55213index 83433cb..71e9b98 100644
55214--- a/drivers/video/fbdev/s1d13xxxfb.c
55215+++ b/drivers/video/fbdev/s1d13xxxfb.c
55216@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55217
55218 switch(prod_id) {
55219 case S1D13506_PROD_ID: /* activate acceleration */
55220- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55221- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55222+ pax_open_kernel();
55223+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55224+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55225+ pax_close_kernel();
55226 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55227 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55228 break;
55229diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55230index d3013cd..95b8285 100644
55231--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55232+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55233@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55234 }
55235
55236 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55237- lcdc_sys_write_index,
55238- lcdc_sys_write_data,
55239- lcdc_sys_read_data,
55240+ .write_index = lcdc_sys_write_index,
55241+ .write_data = lcdc_sys_write_data,
55242+ .read_data = lcdc_sys_read_data,
55243 };
55244
55245 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55246diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55247index 9279e5f..d5f5276 100644
55248--- a/drivers/video/fbdev/smscufx.c
55249+++ b/drivers/video/fbdev/smscufx.c
55250@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55251 fb_deferred_io_cleanup(info);
55252 kfree(info->fbdefio);
55253 info->fbdefio = NULL;
55254- info->fbops->fb_mmap = ufx_ops_mmap;
55255+ pax_open_kernel();
55256+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55257+ pax_close_kernel();
55258 }
55259
55260 pr_debug("released /dev/fb%d user=%d count=%d",
55261diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55262index ff2b873..626a8d5 100644
55263--- a/drivers/video/fbdev/udlfb.c
55264+++ b/drivers/video/fbdev/udlfb.c
55265@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55266 dlfb_urb_completion(urb);
55267
55268 error:
55269- atomic_add(bytes_sent, &dev->bytes_sent);
55270- atomic_add(bytes_identical, &dev->bytes_identical);
55271- atomic_add(width*height*2, &dev->bytes_rendered);
55272+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55273+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55274+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55275 end_cycles = get_cycles();
55276- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55277+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55278 >> 10)), /* Kcycles */
55279 &dev->cpu_kcycles_used);
55280
55281@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55282 dlfb_urb_completion(urb);
55283
55284 error:
55285- atomic_add(bytes_sent, &dev->bytes_sent);
55286- atomic_add(bytes_identical, &dev->bytes_identical);
55287- atomic_add(bytes_rendered, &dev->bytes_rendered);
55288+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55289+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55290+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55291 end_cycles = get_cycles();
55292- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55293+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55294 >> 10)), /* Kcycles */
55295 &dev->cpu_kcycles_used);
55296 }
55297@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55298 fb_deferred_io_cleanup(info);
55299 kfree(info->fbdefio);
55300 info->fbdefio = NULL;
55301- info->fbops->fb_mmap = dlfb_ops_mmap;
55302+ pax_open_kernel();
55303+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55304+ pax_close_kernel();
55305 }
55306
55307 pr_warn("released /dev/fb%d user=%d count=%d\n",
55308@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55309 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55310 struct dlfb_data *dev = fb_info->par;
55311 return snprintf(buf, PAGE_SIZE, "%u\n",
55312- atomic_read(&dev->bytes_rendered));
55313+ atomic_read_unchecked(&dev->bytes_rendered));
55314 }
55315
55316 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55317@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55318 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55319 struct dlfb_data *dev = fb_info->par;
55320 return snprintf(buf, PAGE_SIZE, "%u\n",
55321- atomic_read(&dev->bytes_identical));
55322+ atomic_read_unchecked(&dev->bytes_identical));
55323 }
55324
55325 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55326@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55327 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55328 struct dlfb_data *dev = fb_info->par;
55329 return snprintf(buf, PAGE_SIZE, "%u\n",
55330- atomic_read(&dev->bytes_sent));
55331+ atomic_read_unchecked(&dev->bytes_sent));
55332 }
55333
55334 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55335@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55336 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55337 struct dlfb_data *dev = fb_info->par;
55338 return snprintf(buf, PAGE_SIZE, "%u\n",
55339- atomic_read(&dev->cpu_kcycles_used));
55340+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55341 }
55342
55343 static ssize_t edid_show(
55344@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55345 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55346 struct dlfb_data *dev = fb_info->par;
55347
55348- atomic_set(&dev->bytes_rendered, 0);
55349- atomic_set(&dev->bytes_identical, 0);
55350- atomic_set(&dev->bytes_sent, 0);
55351- atomic_set(&dev->cpu_kcycles_used, 0);
55352+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55353+ atomic_set_unchecked(&dev->bytes_identical, 0);
55354+ atomic_set_unchecked(&dev->bytes_sent, 0);
55355+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55356
55357 return count;
55358 }
55359diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55360index d32d1c4..46722e6 100644
55361--- a/drivers/video/fbdev/uvesafb.c
55362+++ b/drivers/video/fbdev/uvesafb.c
55363@@ -19,6 +19,7 @@
55364 #include <linux/io.h>
55365 #include <linux/mutex.h>
55366 #include <linux/slab.h>
55367+#include <linux/moduleloader.h>
55368 #include <video/edid.h>
55369 #include <video/uvesafb.h>
55370 #ifdef CONFIG_X86
55371@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55372 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55373 par->pmi_setpal = par->ypan = 0;
55374 } else {
55375+
55376+#ifdef CONFIG_PAX_KERNEXEC
55377+#ifdef CONFIG_MODULES
55378+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55379+#endif
55380+ if (!par->pmi_code) {
55381+ par->pmi_setpal = par->ypan = 0;
55382+ return 0;
55383+ }
55384+#endif
55385+
55386 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55387 + task->t.regs.edi);
55388+
55389+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55390+ pax_open_kernel();
55391+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55392+ pax_close_kernel();
55393+
55394+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55395+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55396+#else
55397 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55398 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55399+#endif
55400+
55401 printk(KERN_INFO "uvesafb: protected mode interface info at "
55402 "%04x:%04x\n",
55403 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55404@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55405 par->ypan = ypan;
55406
55407 if (par->pmi_setpal || par->ypan) {
55408+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55409 if (__supported_pte_mask & _PAGE_NX) {
55410 par->pmi_setpal = par->ypan = 0;
55411 printk(KERN_WARNING "uvesafb: NX protection is active, "
55412 "better not use the PMI.\n");
55413- } else {
55414+ } else
55415+#endif
55416 uvesafb_vbe_getpmi(task, par);
55417- }
55418 }
55419 #else
55420 /* The protected mode interface is not available on non-x86. */
55421@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55422 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55423
55424 /* Disable blanking if the user requested so. */
55425- if (!blank)
55426- info->fbops->fb_blank = NULL;
55427+ if (!blank) {
55428+ pax_open_kernel();
55429+ *(void **)&info->fbops->fb_blank = NULL;
55430+ pax_close_kernel();
55431+ }
55432
55433 /*
55434 * Find out how much IO memory is required for the mode with
55435@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55436 info->flags = FBINFO_FLAG_DEFAULT |
55437 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55438
55439- if (!par->ypan)
55440- info->fbops->fb_pan_display = NULL;
55441+ if (!par->ypan) {
55442+ pax_open_kernel();
55443+ *(void **)&info->fbops->fb_pan_display = NULL;
55444+ pax_close_kernel();
55445+ }
55446 }
55447
55448 static void uvesafb_init_mtrr(struct fb_info *info)
55449@@ -1786,6 +1816,11 @@ out_mode:
55450 out:
55451 kfree(par->vbe_modes);
55452
55453+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55454+ if (par->pmi_code)
55455+ module_memfree_exec(par->pmi_code);
55456+#endif
55457+
55458 framebuffer_release(info);
55459 return err;
55460 }
55461@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55462 kfree(par->vbe_state_orig);
55463 kfree(par->vbe_state_saved);
55464
55465+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55466+ if (par->pmi_code)
55467+ module_memfree_exec(par->pmi_code);
55468+#endif
55469+
55470 framebuffer_release(info);
55471 }
55472 return 0;
55473diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55474index d79a0ac..2d0c3d4 100644
55475--- a/drivers/video/fbdev/vesafb.c
55476+++ b/drivers/video/fbdev/vesafb.c
55477@@ -9,6 +9,7 @@
55478 */
55479
55480 #include <linux/module.h>
55481+#include <linux/moduleloader.h>
55482 #include <linux/kernel.h>
55483 #include <linux/errno.h>
55484 #include <linux/string.h>
55485@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55486 static int vram_total; /* Set total amount of memory */
55487 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55488 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55489-static void (*pmi_start)(void) __read_mostly;
55490-static void (*pmi_pal) (void) __read_mostly;
55491+static void (*pmi_start)(void) __read_only;
55492+static void (*pmi_pal) (void) __read_only;
55493 static int depth __read_mostly;
55494 static int vga_compat __read_mostly;
55495 /* --------------------------------------------------------------------- */
55496@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55497 unsigned int size_remap;
55498 unsigned int size_total;
55499 char *option = NULL;
55500+ void *pmi_code = NULL;
55501
55502 /* ignore error return of fb_get_options */
55503 fb_get_options("vesafb", &option);
55504@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55505 size_remap = size_total;
55506 vesafb_fix.smem_len = size_remap;
55507
55508-#ifndef __i386__
55509- screen_info.vesapm_seg = 0;
55510-#endif
55511-
55512 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55513 printk(KERN_WARNING
55514 "vesafb: cannot reserve video memory at 0x%lx\n",
55515@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55516 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55517 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55518
55519+#ifdef __i386__
55520+
55521+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55522+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55523+ if (!pmi_code)
55524+#elif !defined(CONFIG_PAX_KERNEXEC)
55525+ if (0)
55526+#endif
55527+
55528+#endif
55529+ screen_info.vesapm_seg = 0;
55530+
55531 if (screen_info.vesapm_seg) {
55532- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55533- screen_info.vesapm_seg,screen_info.vesapm_off);
55534+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55535+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55536 }
55537
55538 if (screen_info.vesapm_seg < 0xc000)
55539@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55540
55541 if (ypan || pmi_setpal) {
55542 unsigned short *pmi_base;
55543+
55544 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55545- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55546- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55547+
55548+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55549+ pax_open_kernel();
55550+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55551+#else
55552+ pmi_code = pmi_base;
55553+#endif
55554+
55555+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55556+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55557+
55558+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55559+ pmi_start = ktva_ktla(pmi_start);
55560+ pmi_pal = ktva_ktla(pmi_pal);
55561+ pax_close_kernel();
55562+#endif
55563+
55564 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55565 if (pmi_base[3]) {
55566 printk(KERN_INFO "vesafb: pmi: ports = ");
55567@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55568 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55569 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55570
55571- if (!ypan)
55572- info->fbops->fb_pan_display = NULL;
55573+ if (!ypan) {
55574+ pax_open_kernel();
55575+ *(void **)&info->fbops->fb_pan_display = NULL;
55576+ pax_close_kernel();
55577+ }
55578
55579 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55580 err = -ENOMEM;
55581@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55582 fb_info(info, "%s frame buffer device\n", info->fix.id);
55583 return 0;
55584 err:
55585+
55586+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55587+ module_memfree_exec(pmi_code);
55588+#endif
55589+
55590 if (info->screen_base)
55591 iounmap(info->screen_base);
55592 framebuffer_release(info);
55593diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55594index 88714ae..16c2e11 100644
55595--- a/drivers/video/fbdev/via/via_clock.h
55596+++ b/drivers/video/fbdev/via/via_clock.h
55597@@ -56,7 +56,7 @@ struct via_clock {
55598
55599 void (*set_engine_pll_state)(u8 state);
55600 void (*set_engine_pll)(struct via_pll_config config);
55601-};
55602+} __no_const;
55603
55604
55605 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55606diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55607index 3c14e43..2630570 100644
55608--- a/drivers/video/logo/logo_linux_clut224.ppm
55609+++ b/drivers/video/logo/logo_linux_clut224.ppm
55610@@ -2,1603 +2,1123 @@ P3
55611 # Standard 224-color Linux logo
55612 80 80
55613 255
55614- 0 0 0 0 0 0 0 0 0 0 0 0
55615- 0 0 0 0 0 0 0 0 0 0 0 0
55616- 0 0 0 0 0 0 0 0 0 0 0 0
55617- 0 0 0 0 0 0 0 0 0 0 0 0
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 6 6 6 6 6 6 10 10 10 10 10 10
55624- 10 10 10 6 6 6 6 6 6 6 6 6
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 0 0 0
55633- 0 0 0 0 0 0 0 0 0 0 0 0
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 6 6 6 10 10 10 14 14 14
55643- 22 22 22 26 26 26 30 30 30 34 34 34
55644- 30 30 30 30 30 30 26 26 26 18 18 18
55645- 14 14 14 10 10 10 6 6 6 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 0 0 0
55653- 0 0 0 0 0 0 0 0 0 0 0 0
55654- 0 0 0 0 0 0 0 0 0 0 0 0
55655- 0 0 0 0 0 1 0 0 1 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 6 6 6 14 14 14 26 26 26 42 42 42
55663- 54 54 54 66 66 66 78 78 78 78 78 78
55664- 78 78 78 74 74 74 66 66 66 54 54 54
55665- 42 42 42 26 26 26 18 18 18 10 10 10
55666- 6 6 6 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 0 0 0 0 0 0 0
55673- 0 0 0 0 0 0 0 0 0 0 0 0
55674- 0 0 0 0 0 0 0 0 0 0 0 0
55675- 0 0 1 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 10 10 10
55682- 22 22 22 42 42 42 66 66 66 86 86 86
55683- 66 66 66 38 38 38 38 38 38 22 22 22
55684- 26 26 26 34 34 34 54 54 54 66 66 66
55685- 86 86 86 70 70 70 46 46 46 26 26 26
55686- 14 14 14 6 6 6 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 0 0 0 0 0 0 0 0 0 0
55693- 0 0 0 0 0 0 0 0 0 0 0 0
55694- 0 0 0 0 0 0 0 0 0 0 0 0
55695- 0 0 1 0 0 1 0 0 1 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 10 10 10 26 26 26
55702- 50 50 50 82 82 82 58 58 58 6 6 6
55703- 2 2 6 2 2 6 2 2 6 2 2 6
55704- 2 2 6 2 2 6 2 2 6 2 2 6
55705- 6 6 6 54 54 54 86 86 86 66 66 66
55706- 38 38 38 18 18 18 6 6 6 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 0 0 0 0 0 0 0 0 0 0
55713- 0 0 0 0 0 0 0 0 0 0 0 0
55714- 0 0 0 0 0 0 0 0 0 0 0 0
55715- 0 0 0 0 0 0 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 0 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 6 6 6 22 22 22 50 50 50
55722- 78 78 78 34 34 34 2 2 6 2 2 6
55723- 2 2 6 2 2 6 2 2 6 2 2 6
55724- 2 2 6 2 2 6 2 2 6 2 2 6
55725- 2 2 6 2 2 6 6 6 6 70 70 70
55726- 78 78 78 46 46 46 22 22 22 6 6 6
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 0 0 0
55733- 0 0 0 0 0 0 0 0 0 0 0 0
55734- 0 0 0 0 0 0 0 0 0 0 0 0
55735- 0 0 1 0 0 1 0 0 1 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 6 6 6 18 18 18 42 42 42 82 82 82
55742- 26 26 26 2 2 6 2 2 6 2 2 6
55743- 2 2 6 2 2 6 2 2 6 2 2 6
55744- 2 2 6 2 2 6 2 2 6 14 14 14
55745- 46 46 46 34 34 34 6 6 6 2 2 6
55746- 42 42 42 78 78 78 42 42 42 18 18 18
55747- 6 6 6 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 0 0 0 0 0 0 0 0 0 0
55753- 0 0 0 0 0 0 0 0 0 0 0 0
55754- 0 0 0 0 0 0 0 0 0 0 0 0
55755- 0 0 1 0 0 0 0 0 1 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 0 0 0 0 0 0 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 10 10 10 30 30 30 66 66 66 58 58 58
55762- 2 2 6 2 2 6 2 2 6 2 2 6
55763- 2 2 6 2 2 6 2 2 6 2 2 6
55764- 2 2 6 2 2 6 2 2 6 26 26 26
55765- 86 86 86 101 101 101 46 46 46 10 10 10
55766- 2 2 6 58 58 58 70 70 70 34 34 34
55767- 10 10 10 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 0 0 0 0 0 0 0 0 0 0
55773- 0 0 0 0 0 0 0 0 0 0 0 0
55774- 0 0 0 0 0 0 0 0 0 0 0 0
55775- 0 0 1 0 0 1 0 0 1 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 14 14 14 42 42 42 86 86 86 10 10 10
55782- 2 2 6 2 2 6 2 2 6 2 2 6
55783- 2 2 6 2 2 6 2 2 6 2 2 6
55784- 2 2 6 2 2 6 2 2 6 30 30 30
55785- 94 94 94 94 94 94 58 58 58 26 26 26
55786- 2 2 6 6 6 6 78 78 78 54 54 54
55787- 22 22 22 6 6 6 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 0 0 0
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 0 0 0 0 0 0 0 0 0 0
55793- 0 0 0 0 0 0 0 0 0 0 0 0
55794- 0 0 0 0 0 0 0 0 0 0 0 0
55795- 0 0 0 0 0 0 0 0 0 0 0 0
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 0 0 0 0 0 0 0 0 0 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 6 6 6
55801- 22 22 22 62 62 62 62 62 62 2 2 6
55802- 2 2 6 2 2 6 2 2 6 2 2 6
55803- 2 2 6 2 2 6 2 2 6 2 2 6
55804- 2 2 6 2 2 6 2 2 6 26 26 26
55805- 54 54 54 38 38 38 18 18 18 10 10 10
55806- 2 2 6 2 2 6 34 34 34 82 82 82
55807- 38 38 38 14 14 14 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 0 0 0
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 0 0 0
55813- 0 0 0 0 0 0 0 0 0 0 0 0
55814- 0 0 0 0 0 0 0 0 0 0 0 0
55815- 0 0 0 0 0 1 0 0 1 0 0 0
55816- 0 0 0 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 6 6 6
55821- 30 30 30 78 78 78 30 30 30 2 2 6
55822- 2 2 6 2 2 6 2 2 6 2 2 6
55823- 2 2 6 2 2 6 2 2 6 2 2 6
55824- 2 2 6 2 2 6 2 2 6 10 10 10
55825- 10 10 10 2 2 6 2 2 6 2 2 6
55826- 2 2 6 2 2 6 2 2 6 78 78 78
55827- 50 50 50 18 18 18 6 6 6 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 0 0 0
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 0 0 0 0 0 0 0
55833- 0 0 0 0 0 0 0 0 0 0 0 0
55834- 0 0 0 0 0 0 0 0 0 0 0 0
55835- 0 0 1 0 0 0 0 0 0 0 0 0
55836- 0 0 0 0 0 0 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 10 10 10
55841- 38 38 38 86 86 86 14 14 14 2 2 6
55842- 2 2 6 2 2 6 2 2 6 2 2 6
55843- 2 2 6 2 2 6 2 2 6 2 2 6
55844- 2 2 6 2 2 6 2 2 6 2 2 6
55845- 2 2 6 2 2 6 2 2 6 2 2 6
55846- 2 2 6 2 2 6 2 2 6 54 54 54
55847- 66 66 66 26 26 26 6 6 6 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 0 0 0
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 0 0 0 0 0 0 0 0 0 0
55853- 0 0 0 0 0 0 0 0 0 0 0 0
55854- 0 0 0 0 0 0 0 0 0 0 0 0
55855- 0 0 0 0 0 1 0 0 1 0 0 0
55856- 0 0 0 0 0 0 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 14 14 14
55861- 42 42 42 82 82 82 2 2 6 2 2 6
55862- 2 2 6 6 6 6 10 10 10 2 2 6
55863- 2 2 6 2 2 6 2 2 6 2 2 6
55864- 2 2 6 2 2 6 2 2 6 6 6 6
55865- 14 14 14 10 10 10 2 2 6 2 2 6
55866- 2 2 6 2 2 6 2 2 6 18 18 18
55867- 82 82 82 34 34 34 10 10 10 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 0 0 0
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 0 0 0 0 0 0 0
55873- 0 0 0 0 0 0 0 0 0 0 0 0
55874- 0 0 0 0 0 0 0 0 0 0 0 0
55875- 0 0 1 0 0 0 0 0 0 0 0 0
55876- 0 0 0 0 0 0 0 0 0 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 0 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 14 14 14
55881- 46 46 46 86 86 86 2 2 6 2 2 6
55882- 6 6 6 6 6 6 22 22 22 34 34 34
55883- 6 6 6 2 2 6 2 2 6 2 2 6
55884- 2 2 6 2 2 6 18 18 18 34 34 34
55885- 10 10 10 50 50 50 22 22 22 2 2 6
55886- 2 2 6 2 2 6 2 2 6 10 10 10
55887- 86 86 86 42 42 42 14 14 14 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 0 0 0
55891- 0 0 0 0 0 0 0 0 0 0 0 0
55892- 0 0 0 0 0 0 0 0 0 0 0 0
55893- 0 0 0 0 0 0 0 0 0 0 0 0
55894- 0 0 0 0 0 0 0 0 0 0 0 0
55895- 0 0 1 0 0 1 0 0 1 0 0 0
55896- 0 0 0 0 0 0 0 0 0 0 0 0
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 14 14 14
55901- 46 46 46 86 86 86 2 2 6 2 2 6
55902- 38 38 38 116 116 116 94 94 94 22 22 22
55903- 22 22 22 2 2 6 2 2 6 2 2 6
55904- 14 14 14 86 86 86 138 138 138 162 162 162
55905-154 154 154 38 38 38 26 26 26 6 6 6
55906- 2 2 6 2 2 6 2 2 6 2 2 6
55907- 86 86 86 46 46 46 14 14 14 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 0 0 0 0 0 0
55911- 0 0 0 0 0 0 0 0 0 0 0 0
55912- 0 0 0 0 0 0 0 0 0 0 0 0
55913- 0 0 0 0 0 0 0 0 0 0 0 0
55914- 0 0 0 0 0 0 0 0 0 0 0 0
55915- 0 0 0 0 0 0 0 0 0 0 0 0
55916- 0 0 0 0 0 0 0 0 0 0 0 0
55917- 0 0 0 0 0 0 0 0 0 0 0 0
55918- 0 0 0 0 0 0 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 14 14 14
55921- 46 46 46 86 86 86 2 2 6 14 14 14
55922-134 134 134 198 198 198 195 195 195 116 116 116
55923- 10 10 10 2 2 6 2 2 6 6 6 6
55924-101 98 89 187 187 187 210 210 210 218 218 218
55925-214 214 214 134 134 134 14 14 14 6 6 6
55926- 2 2 6 2 2 6 2 2 6 2 2 6
55927- 86 86 86 50 50 50 18 18 18 6 6 6
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 0 0 0 0 0 0 0 0 0
55931- 0 0 0 0 0 0 0 0 0 0 0 0
55932- 0 0 0 0 0 0 0 0 0 0 0 0
55933- 0 0 0 0 0 0 0 0 0 0 0 0
55934- 0 0 0 0 0 0 0 0 1 0 0 0
55935- 0 0 1 0 0 1 0 0 1 0 0 0
55936- 0 0 0 0 0 0 0 0 0 0 0 0
55937- 0 0 0 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 14 14 14
55941- 46 46 46 86 86 86 2 2 6 54 54 54
55942-218 218 218 195 195 195 226 226 226 246 246 246
55943- 58 58 58 2 2 6 2 2 6 30 30 30
55944-210 210 210 253 253 253 174 174 174 123 123 123
55945-221 221 221 234 234 234 74 74 74 2 2 6
55946- 2 2 6 2 2 6 2 2 6 2 2 6
55947- 70 70 70 58 58 58 22 22 22 6 6 6
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 0 0 0 0 0 0 0 0 0 0 0 0
55951- 0 0 0 0 0 0 0 0 0 0 0 0
55952- 0 0 0 0 0 0 0 0 0 0 0 0
55953- 0 0 0 0 0 0 0 0 0 0 0 0
55954- 0 0 0 0 0 0 0 0 0 0 0 0
55955- 0 0 0 0 0 0 0 0 0 0 0 0
55956- 0 0 0 0 0 0 0 0 0 0 0 0
55957- 0 0 0 0 0 0 0 0 0 0 0 0
55958- 0 0 0 0 0 0 0 0 0 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 14 14 14
55961- 46 46 46 82 82 82 2 2 6 106 106 106
55962-170 170 170 26 26 26 86 86 86 226 226 226
55963-123 123 123 10 10 10 14 14 14 46 46 46
55964-231 231 231 190 190 190 6 6 6 70 70 70
55965- 90 90 90 238 238 238 158 158 158 2 2 6
55966- 2 2 6 2 2 6 2 2 6 2 2 6
55967- 70 70 70 58 58 58 22 22 22 6 6 6
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 0 0 0 0 0 0 0 0 0 0 0 0
55971- 0 0 0 0 0 0 0 0 0 0 0 0
55972- 0 0 0 0 0 0 0 0 0 0 0 0
55973- 0 0 0 0 0 0 0 0 0 0 0 0
55974- 0 0 0 0 0 0 0 0 1 0 0 0
55975- 0 0 1 0 0 1 0 0 1 0 0 0
55976- 0 0 0 0 0 0 0 0 0 0 0 0
55977- 0 0 0 0 0 0 0 0 0 0 0 0
55978- 0 0 0 0 0 0 0 0 0 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 14 14 14
55981- 42 42 42 86 86 86 6 6 6 116 116 116
55982-106 106 106 6 6 6 70 70 70 149 149 149
55983-128 128 128 18 18 18 38 38 38 54 54 54
55984-221 221 221 106 106 106 2 2 6 14 14 14
55985- 46 46 46 190 190 190 198 198 198 2 2 6
55986- 2 2 6 2 2 6 2 2 6 2 2 6
55987- 74 74 74 62 62 62 22 22 22 6 6 6
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 0 0 0 0 0 0 0 0 0 0 0 0
55991- 0 0 0 0 0 0 0 0 0 0 0 0
55992- 0 0 0 0 0 0 0 0 0 0 0 0
55993- 0 0 0 0 0 0 0 0 0 0 0 0
55994- 0 0 0 0 0 0 0 0 1 0 0 0
55995- 0 0 1 0 0 0 0 0 1 0 0 0
55996- 0 0 0 0 0 0 0 0 0 0 0 0
55997- 0 0 0 0 0 0 0 0 0 0 0 0
55998- 0 0 0 0 0 0 0 0 0 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 14 14 14
56001- 42 42 42 94 94 94 14 14 14 101 101 101
56002-128 128 128 2 2 6 18 18 18 116 116 116
56003-118 98 46 121 92 8 121 92 8 98 78 10
56004-162 162 162 106 106 106 2 2 6 2 2 6
56005- 2 2 6 195 195 195 195 195 195 6 6 6
56006- 2 2 6 2 2 6 2 2 6 2 2 6
56007- 74 74 74 62 62 62 22 22 22 6 6 6
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 0 0 0
56010- 0 0 0 0 0 0 0 0 0 0 0 0
56011- 0 0 0 0 0 0 0 0 0 0 0 0
56012- 0 0 0 0 0 0 0 0 0 0 0 0
56013- 0 0 0 0 0 0 0 0 0 0 0 0
56014- 0 0 0 0 0 0 0 0 1 0 0 1
56015- 0 0 1 0 0 0 0 0 1 0 0 0
56016- 0 0 0 0 0 0 0 0 0 0 0 0
56017- 0 0 0 0 0 0 0 0 0 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 10 10 10
56021- 38 38 38 90 90 90 14 14 14 58 58 58
56022-210 210 210 26 26 26 54 38 6 154 114 10
56023-226 170 11 236 186 11 225 175 15 184 144 12
56024-215 174 15 175 146 61 37 26 9 2 2 6
56025- 70 70 70 246 246 246 138 138 138 2 2 6
56026- 2 2 6 2 2 6 2 2 6 2 2 6
56027- 70 70 70 66 66 66 26 26 26 6 6 6
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 0 0 0
56030- 0 0 0 0 0 0 0 0 0 0 0 0
56031- 0 0 0 0 0 0 0 0 0 0 0 0
56032- 0 0 0 0 0 0 0 0 0 0 0 0
56033- 0 0 0 0 0 0 0 0 0 0 0 0
56034- 0 0 0 0 0 0 0 0 0 0 0 0
56035- 0 0 0 0 0 0 0 0 0 0 0 0
56036- 0 0 0 0 0 0 0 0 0 0 0 0
56037- 0 0 0 0 0 0 0 0 0 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 10 10 10
56041- 38 38 38 86 86 86 14 14 14 10 10 10
56042-195 195 195 188 164 115 192 133 9 225 175 15
56043-239 182 13 234 190 10 232 195 16 232 200 30
56044-245 207 45 241 208 19 232 195 16 184 144 12
56045-218 194 134 211 206 186 42 42 42 2 2 6
56046- 2 2 6 2 2 6 2 2 6 2 2 6
56047- 50 50 50 74 74 74 30 30 30 6 6 6
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 0 0 0
56050- 0 0 0 0 0 0 0 0 0 0 0 0
56051- 0 0 0 0 0 0 0 0 0 0 0 0
56052- 0 0 0 0 0 0 0 0 0 0 0 0
56053- 0 0 0 0 0 0 0 0 0 0 0 0
56054- 0 0 0 0 0 0 0 0 0 0 0 0
56055- 0 0 0 0 0 0 0 0 0 0 0 0
56056- 0 0 0 0 0 0 0 0 0 0 0 0
56057- 0 0 0 0 0 0 0 0 0 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 10 10 10
56061- 34 34 34 86 86 86 14 14 14 2 2 6
56062-121 87 25 192 133 9 219 162 10 239 182 13
56063-236 186 11 232 195 16 241 208 19 244 214 54
56064-246 218 60 246 218 38 246 215 20 241 208 19
56065-241 208 19 226 184 13 121 87 25 2 2 6
56066- 2 2 6 2 2 6 2 2 6 2 2 6
56067- 50 50 50 82 82 82 34 34 34 10 10 10
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 0 0 0 0 0 0
56070- 0 0 0 0 0 0 0 0 0 0 0 0
56071- 0 0 0 0 0 0 0 0 0 0 0 0
56072- 0 0 0 0 0 0 0 0 0 0 0 0
56073- 0 0 0 0 0 0 0 0 0 0 0 0
56074- 0 0 0 0 0 0 0 0 0 0 0 0
56075- 0 0 0 0 0 0 0 0 0 0 0 0
56076- 0 0 0 0 0 0 0 0 0 0 0 0
56077- 0 0 0 0 0 0 0 0 0 0 0 0
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 10 10 10
56081- 34 34 34 82 82 82 30 30 30 61 42 6
56082-180 123 7 206 145 10 230 174 11 239 182 13
56083-234 190 10 238 202 15 241 208 19 246 218 74
56084-246 218 38 246 215 20 246 215 20 246 215 20
56085-226 184 13 215 174 15 184 144 12 6 6 6
56086- 2 2 6 2 2 6 2 2 6 2 2 6
56087- 26 26 26 94 94 94 42 42 42 14 14 14
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 0 0 0 0 0 0
56090- 0 0 0 0 0 0 0 0 0 0 0 0
56091- 0 0 0 0 0 0 0 0 0 0 0 0
56092- 0 0 0 0 0 0 0 0 0 0 0 0
56093- 0 0 0 0 0 0 0 0 0 0 0 0
56094- 0 0 0 0 0 0 0 0 0 0 0 0
56095- 0 0 0 0 0 0 0 0 0 0 0 0
56096- 0 0 0 0 0 0 0 0 0 0 0 0
56097- 0 0 0 0 0 0 0 0 0 0 0 0
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 10 10 10
56101- 30 30 30 78 78 78 50 50 50 104 69 6
56102-192 133 9 216 158 10 236 178 12 236 186 11
56103-232 195 16 241 208 19 244 214 54 245 215 43
56104-246 215 20 246 215 20 241 208 19 198 155 10
56105-200 144 11 216 158 10 156 118 10 2 2 6
56106- 2 2 6 2 2 6 2 2 6 2 2 6
56107- 6 6 6 90 90 90 54 54 54 18 18 18
56108- 6 6 6 0 0 0 0 0 0 0 0 0
56109- 0 0 0 0 0 0 0 0 0 0 0 0
56110- 0 0 0 0 0 0 0 0 0 0 0 0
56111- 0 0 0 0 0 0 0 0 0 0 0 0
56112- 0 0 0 0 0 0 0 0 0 0 0 0
56113- 0 0 0 0 0 0 0 0 0 0 0 0
56114- 0 0 0 0 0 0 0 0 0 0 0 0
56115- 0 0 0 0 0 0 0 0 0 0 0 0
56116- 0 0 0 0 0 0 0 0 0 0 0 0
56117- 0 0 0 0 0 0 0 0 0 0 0 0
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 10 10 10
56121- 30 30 30 78 78 78 46 46 46 22 22 22
56122-137 92 6 210 162 10 239 182 13 238 190 10
56123-238 202 15 241 208 19 246 215 20 246 215 20
56124-241 208 19 203 166 17 185 133 11 210 150 10
56125-216 158 10 210 150 10 102 78 10 2 2 6
56126- 6 6 6 54 54 54 14 14 14 2 2 6
56127- 2 2 6 62 62 62 74 74 74 30 30 30
56128- 10 10 10 0 0 0 0 0 0 0 0 0
56129- 0 0 0 0 0 0 0 0 0 0 0 0
56130- 0 0 0 0 0 0 0 0 0 0 0 0
56131- 0 0 0 0 0 0 0 0 0 0 0 0
56132- 0 0 0 0 0 0 0 0 0 0 0 0
56133- 0 0 0 0 0 0 0 0 0 0 0 0
56134- 0 0 0 0 0 0 0 0 0 0 0 0
56135- 0 0 0 0 0 0 0 0 0 0 0 0
56136- 0 0 0 0 0 0 0 0 0 0 0 0
56137- 0 0 0 0 0 0 0 0 0 0 0 0
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 10 10 10
56141- 34 34 34 78 78 78 50 50 50 6 6 6
56142- 94 70 30 139 102 15 190 146 13 226 184 13
56143-232 200 30 232 195 16 215 174 15 190 146 13
56144-168 122 10 192 133 9 210 150 10 213 154 11
56145-202 150 34 182 157 106 101 98 89 2 2 6
56146- 2 2 6 78 78 78 116 116 116 58 58 58
56147- 2 2 6 22 22 22 90 90 90 46 46 46
56148- 18 18 18 6 6 6 0 0 0 0 0 0
56149- 0 0 0 0 0 0 0 0 0 0 0 0
56150- 0 0 0 0 0 0 0 0 0 0 0 0
56151- 0 0 0 0 0 0 0 0 0 0 0 0
56152- 0 0 0 0 0 0 0 0 0 0 0 0
56153- 0 0 0 0 0 0 0 0 0 0 0 0
56154- 0 0 0 0 0 0 0 0 0 0 0 0
56155- 0 0 0 0 0 0 0 0 0 0 0 0
56156- 0 0 0 0 0 0 0 0 0 0 0 0
56157- 0 0 0 0 0 0 0 0 0 0 0 0
56158- 0 0 0 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 10 10 10
56161- 38 38 38 86 86 86 50 50 50 6 6 6
56162-128 128 128 174 154 114 156 107 11 168 122 10
56163-198 155 10 184 144 12 197 138 11 200 144 11
56164-206 145 10 206 145 10 197 138 11 188 164 115
56165-195 195 195 198 198 198 174 174 174 14 14 14
56166- 2 2 6 22 22 22 116 116 116 116 116 116
56167- 22 22 22 2 2 6 74 74 74 70 70 70
56168- 30 30 30 10 10 10 0 0 0 0 0 0
56169- 0 0 0 0 0 0 0 0 0 0 0 0
56170- 0 0 0 0 0 0 0 0 0 0 0 0
56171- 0 0 0 0 0 0 0 0 0 0 0 0
56172- 0 0 0 0 0 0 0 0 0 0 0 0
56173- 0 0 0 0 0 0 0 0 0 0 0 0
56174- 0 0 0 0 0 0 0 0 0 0 0 0
56175- 0 0 0 0 0 0 0 0 0 0 0 0
56176- 0 0 0 0 0 0 0 0 0 0 0 0
56177- 0 0 0 0 0 0 0 0 0 0 0 0
56178- 0 0 0 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 6 6 6 18 18 18
56181- 50 50 50 101 101 101 26 26 26 10 10 10
56182-138 138 138 190 190 190 174 154 114 156 107 11
56183-197 138 11 200 144 11 197 138 11 192 133 9
56184-180 123 7 190 142 34 190 178 144 187 187 187
56185-202 202 202 221 221 221 214 214 214 66 66 66
56186- 2 2 6 2 2 6 50 50 50 62 62 62
56187- 6 6 6 2 2 6 10 10 10 90 90 90
56188- 50 50 50 18 18 18 6 6 6 0 0 0
56189- 0 0 0 0 0 0 0 0 0 0 0 0
56190- 0 0 0 0 0 0 0 0 0 0 0 0
56191- 0 0 0 0 0 0 0 0 0 0 0 0
56192- 0 0 0 0 0 0 0 0 0 0 0 0
56193- 0 0 0 0 0 0 0 0 0 0 0 0
56194- 0 0 0 0 0 0 0 0 0 0 0 0
56195- 0 0 0 0 0 0 0 0 0 0 0 0
56196- 0 0 0 0 0 0 0 0 0 0 0 0
56197- 0 0 0 0 0 0 0 0 0 0 0 0
56198- 0 0 0 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 10 10 10 34 34 34
56201- 74 74 74 74 74 74 2 2 6 6 6 6
56202-144 144 144 198 198 198 190 190 190 178 166 146
56203-154 121 60 156 107 11 156 107 11 168 124 44
56204-174 154 114 187 187 187 190 190 190 210 210 210
56205-246 246 246 253 253 253 253 253 253 182 182 182
56206- 6 6 6 2 2 6 2 2 6 2 2 6
56207- 2 2 6 2 2 6 2 2 6 62 62 62
56208- 74 74 74 34 34 34 14 14 14 0 0 0
56209- 0 0 0 0 0 0 0 0 0 0 0 0
56210- 0 0 0 0 0 0 0 0 0 0 0 0
56211- 0 0 0 0 0 0 0 0 0 0 0 0
56212- 0 0 0 0 0 0 0 0 0 0 0 0
56213- 0 0 0 0 0 0 0 0 0 0 0 0
56214- 0 0 0 0 0 0 0 0 0 0 0 0
56215- 0 0 0 0 0 0 0 0 0 0 0 0
56216- 0 0 0 0 0 0 0 0 0 0 0 0
56217- 0 0 0 0 0 0 0 0 0 0 0 0
56218- 0 0 0 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 10 10 10 22 22 22 54 54 54
56221- 94 94 94 18 18 18 2 2 6 46 46 46
56222-234 234 234 221 221 221 190 190 190 190 190 190
56223-190 190 190 187 187 187 187 187 187 190 190 190
56224-190 190 190 195 195 195 214 214 214 242 242 242
56225-253 253 253 253 253 253 253 253 253 253 253 253
56226- 82 82 82 2 2 6 2 2 6 2 2 6
56227- 2 2 6 2 2 6 2 2 6 14 14 14
56228- 86 86 86 54 54 54 22 22 22 6 6 6
56229- 0 0 0 0 0 0 0 0 0 0 0 0
56230- 0 0 0 0 0 0 0 0 0 0 0 0
56231- 0 0 0 0 0 0 0 0 0 0 0 0
56232- 0 0 0 0 0 0 0 0 0 0 0 0
56233- 0 0 0 0 0 0 0 0 0 0 0 0
56234- 0 0 0 0 0 0 0 0 0 0 0 0
56235- 0 0 0 0 0 0 0 0 0 0 0 0
56236- 0 0 0 0 0 0 0 0 0 0 0 0
56237- 0 0 0 0 0 0 0 0 0 0 0 0
56238- 0 0 0 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 6 6 6 18 18 18 46 46 46 90 90 90
56241- 46 46 46 18 18 18 6 6 6 182 182 182
56242-253 253 253 246 246 246 206 206 206 190 190 190
56243-190 190 190 190 190 190 190 190 190 190 190 190
56244-206 206 206 231 231 231 250 250 250 253 253 253
56245-253 253 253 253 253 253 253 253 253 253 253 253
56246-202 202 202 14 14 14 2 2 6 2 2 6
56247- 2 2 6 2 2 6 2 2 6 2 2 6
56248- 42 42 42 86 86 86 42 42 42 18 18 18
56249- 6 6 6 0 0 0 0 0 0 0 0 0
56250- 0 0 0 0 0 0 0 0 0 0 0 0
56251- 0 0 0 0 0 0 0 0 0 0 0 0
56252- 0 0 0 0 0 0 0 0 0 0 0 0
56253- 0 0 0 0 0 0 0 0 0 0 0 0
56254- 0 0 0 0 0 0 0 0 0 0 0 0
56255- 0 0 0 0 0 0 0 0 0 0 0 0
56256- 0 0 0 0 0 0 0 0 0 0 0 0
56257- 0 0 0 0 0 0 0 0 0 0 0 0
56258- 0 0 0 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 6 6 6
56260- 14 14 14 38 38 38 74 74 74 66 66 66
56261- 2 2 6 6 6 6 90 90 90 250 250 250
56262-253 253 253 253 253 253 238 238 238 198 198 198
56263-190 190 190 190 190 190 195 195 195 221 221 221
56264-246 246 246 253 253 253 253 253 253 253 253 253
56265-253 253 253 253 253 253 253 253 253 253 253 253
56266-253 253 253 82 82 82 2 2 6 2 2 6
56267- 2 2 6 2 2 6 2 2 6 2 2 6
56268- 2 2 6 78 78 78 70 70 70 34 34 34
56269- 14 14 14 6 6 6 0 0 0 0 0 0
56270- 0 0 0 0 0 0 0 0 0 0 0 0
56271- 0 0 0 0 0 0 0 0 0 0 0 0
56272- 0 0 0 0 0 0 0 0 0 0 0 0
56273- 0 0 0 0 0 0 0 0 0 0 0 0
56274- 0 0 0 0 0 0 0 0 0 0 0 0
56275- 0 0 0 0 0 0 0 0 0 0 0 0
56276- 0 0 0 0 0 0 0 0 0 0 0 0
56277- 0 0 0 0 0 0 0 0 0 0 0 0
56278- 0 0 0 0 0 0 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 14 14 14
56280- 34 34 34 66 66 66 78 78 78 6 6 6
56281- 2 2 6 18 18 18 218 218 218 253 253 253
56282-253 253 253 253 253 253 253 253 253 246 246 246
56283-226 226 226 231 231 231 246 246 246 253 253 253
56284-253 253 253 253 253 253 253 253 253 253 253 253
56285-253 253 253 253 253 253 253 253 253 253 253 253
56286-253 253 253 178 178 178 2 2 6 2 2 6
56287- 2 2 6 2 2 6 2 2 6 2 2 6
56288- 2 2 6 18 18 18 90 90 90 62 62 62
56289- 30 30 30 10 10 10 0 0 0 0 0 0
56290- 0 0 0 0 0 0 0 0 0 0 0 0
56291- 0 0 0 0 0 0 0 0 0 0 0 0
56292- 0 0 0 0 0 0 0 0 0 0 0 0
56293- 0 0 0 0 0 0 0 0 0 0 0 0
56294- 0 0 0 0 0 0 0 0 0 0 0 0
56295- 0 0 0 0 0 0 0 0 0 0 0 0
56296- 0 0 0 0 0 0 0 0 0 0 0 0
56297- 0 0 0 0 0 0 0 0 0 0 0 0
56298- 0 0 0 0 0 0 0 0 0 0 0 0
56299- 0 0 0 0 0 0 10 10 10 26 26 26
56300- 58 58 58 90 90 90 18 18 18 2 2 6
56301- 2 2 6 110 110 110 253 253 253 253 253 253
56302-253 253 253 253 253 253 253 253 253 253 253 253
56303-250 250 250 253 253 253 253 253 253 253 253 253
56304-253 253 253 253 253 253 253 253 253 253 253 253
56305-253 253 253 253 253 253 253 253 253 253 253 253
56306-253 253 253 231 231 231 18 18 18 2 2 6
56307- 2 2 6 2 2 6 2 2 6 2 2 6
56308- 2 2 6 2 2 6 18 18 18 94 94 94
56309- 54 54 54 26 26 26 10 10 10 0 0 0
56310- 0 0 0 0 0 0 0 0 0 0 0 0
56311- 0 0 0 0 0 0 0 0 0 0 0 0
56312- 0 0 0 0 0 0 0 0 0 0 0 0
56313- 0 0 0 0 0 0 0 0 0 0 0 0
56314- 0 0 0 0 0 0 0 0 0 0 0 0
56315- 0 0 0 0 0 0 0 0 0 0 0 0
56316- 0 0 0 0 0 0 0 0 0 0 0 0
56317- 0 0 0 0 0 0 0 0 0 0 0 0
56318- 0 0 0 0 0 0 0 0 0 0 0 0
56319- 0 0 0 6 6 6 22 22 22 50 50 50
56320- 90 90 90 26 26 26 2 2 6 2 2 6
56321- 14 14 14 195 195 195 250 250 250 253 253 253
56322-253 253 253 253 253 253 253 253 253 253 253 253
56323-253 253 253 253 253 253 253 253 253 253 253 253
56324-253 253 253 253 253 253 253 253 253 253 253 253
56325-253 253 253 253 253 253 253 253 253 253 253 253
56326-250 250 250 242 242 242 54 54 54 2 2 6
56327- 2 2 6 2 2 6 2 2 6 2 2 6
56328- 2 2 6 2 2 6 2 2 6 38 38 38
56329- 86 86 86 50 50 50 22 22 22 6 6 6
56330- 0 0 0 0 0 0 0 0 0 0 0 0
56331- 0 0 0 0 0 0 0 0 0 0 0 0
56332- 0 0 0 0 0 0 0 0 0 0 0 0
56333- 0 0 0 0 0 0 0 0 0 0 0 0
56334- 0 0 0 0 0 0 0 0 0 0 0 0
56335- 0 0 0 0 0 0 0 0 0 0 0 0
56336- 0 0 0 0 0 0 0 0 0 0 0 0
56337- 0 0 0 0 0 0 0 0 0 0 0 0
56338- 0 0 0 0 0 0 0 0 0 0 0 0
56339- 6 6 6 14 14 14 38 38 38 82 82 82
56340- 34 34 34 2 2 6 2 2 6 2 2 6
56341- 42 42 42 195 195 195 246 246 246 253 253 253
56342-253 253 253 253 253 253 253 253 253 250 250 250
56343-242 242 242 242 242 242 250 250 250 253 253 253
56344-253 253 253 253 253 253 253 253 253 253 253 253
56345-253 253 253 250 250 250 246 246 246 238 238 238
56346-226 226 226 231 231 231 101 101 101 6 6 6
56347- 2 2 6 2 2 6 2 2 6 2 2 6
56348- 2 2 6 2 2 6 2 2 6 2 2 6
56349- 38 38 38 82 82 82 42 42 42 14 14 14
56350- 6 6 6 0 0 0 0 0 0 0 0 0
56351- 0 0 0 0 0 0 0 0 0 0 0 0
56352- 0 0 0 0 0 0 0 0 0 0 0 0
56353- 0 0 0 0 0 0 0 0 0 0 0 0
56354- 0 0 0 0 0 0 0 0 0 0 0 0
56355- 0 0 0 0 0 0 0 0 0 0 0 0
56356- 0 0 0 0 0 0 0 0 0 0 0 0
56357- 0 0 0 0 0 0 0 0 0 0 0 0
56358- 0 0 0 0 0 0 0 0 0 0 0 0
56359- 10 10 10 26 26 26 62 62 62 66 66 66
56360- 2 2 6 2 2 6 2 2 6 6 6 6
56361- 70 70 70 170 170 170 206 206 206 234 234 234
56362-246 246 246 250 250 250 250 250 250 238 238 238
56363-226 226 226 231 231 231 238 238 238 250 250 250
56364-250 250 250 250 250 250 246 246 246 231 231 231
56365-214 214 214 206 206 206 202 202 202 202 202 202
56366-198 198 198 202 202 202 182 182 182 18 18 18
56367- 2 2 6 2 2 6 2 2 6 2 2 6
56368- 2 2 6 2 2 6 2 2 6 2 2 6
56369- 2 2 6 62 62 62 66 66 66 30 30 30
56370- 10 10 10 0 0 0 0 0 0 0 0 0
56371- 0 0 0 0 0 0 0 0 0 0 0 0
56372- 0 0 0 0 0 0 0 0 0 0 0 0
56373- 0 0 0 0 0 0 0 0 0 0 0 0
56374- 0 0 0 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 0 0 0 0 0 0 0 0 0 0 0 0
56377- 0 0 0 0 0 0 0 0 0 0 0 0
56378- 0 0 0 0 0 0 0 0 0 0 0 0
56379- 14 14 14 42 42 42 82 82 82 18 18 18
56380- 2 2 6 2 2 6 2 2 6 10 10 10
56381- 94 94 94 182 182 182 218 218 218 242 242 242
56382-250 250 250 253 253 253 253 253 253 250 250 250
56383-234 234 234 253 253 253 253 253 253 253 253 253
56384-253 253 253 253 253 253 253 253 253 246 246 246
56385-238 238 238 226 226 226 210 210 210 202 202 202
56386-195 195 195 195 195 195 210 210 210 158 158 158
56387- 6 6 6 14 14 14 50 50 50 14 14 14
56388- 2 2 6 2 2 6 2 2 6 2 2 6
56389- 2 2 6 6 6 6 86 86 86 46 46 46
56390- 18 18 18 6 6 6 0 0 0 0 0 0
56391- 0 0 0 0 0 0 0 0 0 0 0 0
56392- 0 0 0 0 0 0 0 0 0 0 0 0
56393- 0 0 0 0 0 0 0 0 0 0 0 0
56394- 0 0 0 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 0 0 0 0 0 0 0 0 0 0 0 0
56397- 0 0 0 0 0 0 0 0 0 0 0 0
56398- 0 0 0 0 0 0 0 0 0 6 6 6
56399- 22 22 22 54 54 54 70 70 70 2 2 6
56400- 2 2 6 10 10 10 2 2 6 22 22 22
56401-166 166 166 231 231 231 250 250 250 253 253 253
56402-253 253 253 253 253 253 253 253 253 250 250 250
56403-242 242 242 253 253 253 253 253 253 253 253 253
56404-253 253 253 253 253 253 253 253 253 253 253 253
56405-253 253 253 253 253 253 253 253 253 246 246 246
56406-231 231 231 206 206 206 198 198 198 226 226 226
56407- 94 94 94 2 2 6 6 6 6 38 38 38
56408- 30 30 30 2 2 6 2 2 6 2 2 6
56409- 2 2 6 2 2 6 62 62 62 66 66 66
56410- 26 26 26 10 10 10 0 0 0 0 0 0
56411- 0 0 0 0 0 0 0 0 0 0 0 0
56412- 0 0 0 0 0 0 0 0 0 0 0 0
56413- 0 0 0 0 0 0 0 0 0 0 0 0
56414- 0 0 0 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 0 0 0
56416- 0 0 0 0 0 0 0 0 0 0 0 0
56417- 0 0 0 0 0 0 0 0 0 0 0 0
56418- 0 0 0 0 0 0 0 0 0 10 10 10
56419- 30 30 30 74 74 74 50 50 50 2 2 6
56420- 26 26 26 26 26 26 2 2 6 106 106 106
56421-238 238 238 253 253 253 253 253 253 253 253 253
56422-253 253 253 253 253 253 253 253 253 253 253 253
56423-253 253 253 253 253 253 253 253 253 253 253 253
56424-253 253 253 253 253 253 253 253 253 253 253 253
56425-253 253 253 253 253 253 253 253 253 253 253 253
56426-253 253 253 246 246 246 218 218 218 202 202 202
56427-210 210 210 14 14 14 2 2 6 2 2 6
56428- 30 30 30 22 22 22 2 2 6 2 2 6
56429- 2 2 6 2 2 6 18 18 18 86 86 86
56430- 42 42 42 14 14 14 0 0 0 0 0 0
56431- 0 0 0 0 0 0 0 0 0 0 0 0
56432- 0 0 0 0 0 0 0 0 0 0 0 0
56433- 0 0 0 0 0 0 0 0 0 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 0 0 0 0 0 0 0 0 0 14 14 14
56439- 42 42 42 90 90 90 22 22 22 2 2 6
56440- 42 42 42 2 2 6 18 18 18 218 218 218
56441-253 253 253 253 253 253 253 253 253 253 253 253
56442-253 253 253 253 253 253 253 253 253 253 253 253
56443-253 253 253 253 253 253 253 253 253 253 253 253
56444-253 253 253 253 253 253 253 253 253 253 253 253
56445-253 253 253 253 253 253 253 253 253 253 253 253
56446-253 253 253 253 253 253 250 250 250 221 221 221
56447-218 218 218 101 101 101 2 2 6 14 14 14
56448- 18 18 18 38 38 38 10 10 10 2 2 6
56449- 2 2 6 2 2 6 2 2 6 78 78 78
56450- 58 58 58 22 22 22 6 6 6 0 0 0
56451- 0 0 0 0 0 0 0 0 0 0 0 0
56452- 0 0 0 0 0 0 0 0 0 0 0 0
56453- 0 0 0 0 0 0 0 0 0 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 0 0 0 0 0 0 0 0 0
56458- 0 0 0 0 0 0 6 6 6 18 18 18
56459- 54 54 54 82 82 82 2 2 6 26 26 26
56460- 22 22 22 2 2 6 123 123 123 253 253 253
56461-253 253 253 253 253 253 253 253 253 253 253 253
56462-253 253 253 253 253 253 253 253 253 253 253 253
56463-253 253 253 253 253 253 253 253 253 253 253 253
56464-253 253 253 253 253 253 253 253 253 253 253 253
56465-253 253 253 253 253 253 253 253 253 253 253 253
56466-253 253 253 253 253 253 253 253 253 250 250 250
56467-238 238 238 198 198 198 6 6 6 38 38 38
56468- 58 58 58 26 26 26 38 38 38 2 2 6
56469- 2 2 6 2 2 6 2 2 6 46 46 46
56470- 78 78 78 30 30 30 10 10 10 0 0 0
56471- 0 0 0 0 0 0 0 0 0 0 0 0
56472- 0 0 0 0 0 0 0 0 0 0 0 0
56473- 0 0 0 0 0 0 0 0 0 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 0 0 0 0 0 0 0 0 0 0 0 0
56478- 0 0 0 0 0 0 10 10 10 30 30 30
56479- 74 74 74 58 58 58 2 2 6 42 42 42
56480- 2 2 6 22 22 22 231 231 231 253 253 253
56481-253 253 253 253 253 253 253 253 253 253 253 253
56482-253 253 253 253 253 253 253 253 253 250 250 250
56483-253 253 253 253 253 253 253 253 253 253 253 253
56484-253 253 253 253 253 253 253 253 253 253 253 253
56485-253 253 253 253 253 253 253 253 253 253 253 253
56486-253 253 253 253 253 253 253 253 253 253 253 253
56487-253 253 253 246 246 246 46 46 46 38 38 38
56488- 42 42 42 14 14 14 38 38 38 14 14 14
56489- 2 2 6 2 2 6 2 2 6 6 6 6
56490- 86 86 86 46 46 46 14 14 14 0 0 0
56491- 0 0 0 0 0 0 0 0 0 0 0 0
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 6 6 6 14 14 14 42 42 42
56499- 90 90 90 18 18 18 18 18 18 26 26 26
56500- 2 2 6 116 116 116 253 253 253 253 253 253
56501-253 253 253 253 253 253 253 253 253 253 253 253
56502-253 253 253 253 253 253 250 250 250 238 238 238
56503-253 253 253 253 253 253 253 253 253 253 253 253
56504-253 253 253 253 253 253 253 253 253 253 253 253
56505-253 253 253 253 253 253 253 253 253 253 253 253
56506-253 253 253 253 253 253 253 253 253 253 253 253
56507-253 253 253 253 253 253 94 94 94 6 6 6
56508- 2 2 6 2 2 6 10 10 10 34 34 34
56509- 2 2 6 2 2 6 2 2 6 2 2 6
56510- 74 74 74 58 58 58 22 22 22 6 6 6
56511- 0 0 0 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 10 10 10 26 26 26 66 66 66
56519- 82 82 82 2 2 6 38 38 38 6 6 6
56520- 14 14 14 210 210 210 253 253 253 253 253 253
56521-253 253 253 253 253 253 253 253 253 253 253 253
56522-253 253 253 253 253 253 246 246 246 242 242 242
56523-253 253 253 253 253 253 253 253 253 253 253 253
56524-253 253 253 253 253 253 253 253 253 253 253 253
56525-253 253 253 253 253 253 253 253 253 253 253 253
56526-253 253 253 253 253 253 253 253 253 253 253 253
56527-253 253 253 253 253 253 144 144 144 2 2 6
56528- 2 2 6 2 2 6 2 2 6 46 46 46
56529- 2 2 6 2 2 6 2 2 6 2 2 6
56530- 42 42 42 74 74 74 30 30 30 10 10 10
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 6 6 6 14 14 14 42 42 42 90 90 90
56539- 26 26 26 6 6 6 42 42 42 2 2 6
56540- 74 74 74 250 250 250 253 253 253 253 253 253
56541-253 253 253 253 253 253 253 253 253 253 253 253
56542-253 253 253 253 253 253 242 242 242 242 242 242
56543-253 253 253 253 253 253 253 253 253 253 253 253
56544-253 253 253 253 253 253 253 253 253 253 253 253
56545-253 253 253 253 253 253 253 253 253 253 253 253
56546-253 253 253 253 253 253 253 253 253 253 253 253
56547-253 253 253 253 253 253 182 182 182 2 2 6
56548- 2 2 6 2 2 6 2 2 6 46 46 46
56549- 2 2 6 2 2 6 2 2 6 2 2 6
56550- 10 10 10 86 86 86 38 38 38 10 10 10
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 10 10 10 26 26 26 66 66 66 82 82 82
56559- 2 2 6 22 22 22 18 18 18 2 2 6
56560-149 149 149 253 253 253 253 253 253 253 253 253
56561-253 253 253 253 253 253 253 253 253 253 253 253
56562-253 253 253 253 253 253 234 234 234 242 242 242
56563-253 253 253 253 253 253 253 253 253 253 253 253
56564-253 253 253 253 253 253 253 253 253 253 253 253
56565-253 253 253 253 253 253 253 253 253 253 253 253
56566-253 253 253 253 253 253 253 253 253 253 253 253
56567-253 253 253 253 253 253 206 206 206 2 2 6
56568- 2 2 6 2 2 6 2 2 6 38 38 38
56569- 2 2 6 2 2 6 2 2 6 2 2 6
56570- 6 6 6 86 86 86 46 46 46 14 14 14
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 6 6 6
56578- 18 18 18 46 46 46 86 86 86 18 18 18
56579- 2 2 6 34 34 34 10 10 10 6 6 6
56580-210 210 210 253 253 253 253 253 253 253 253 253
56581-253 253 253 253 253 253 253 253 253 253 253 253
56582-253 253 253 253 253 253 234 234 234 242 242 242
56583-253 253 253 253 253 253 253 253 253 253 253 253
56584-253 253 253 253 253 253 253 253 253 253 253 253
56585-253 253 253 253 253 253 253 253 253 253 253 253
56586-253 253 253 253 253 253 253 253 253 253 253 253
56587-253 253 253 253 253 253 221 221 221 6 6 6
56588- 2 2 6 2 2 6 6 6 6 30 30 30
56589- 2 2 6 2 2 6 2 2 6 2 2 6
56590- 2 2 6 82 82 82 54 54 54 18 18 18
56591- 6 6 6 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 10 10 10
56598- 26 26 26 66 66 66 62 62 62 2 2 6
56599- 2 2 6 38 38 38 10 10 10 26 26 26
56600-238 238 238 253 253 253 253 253 253 253 253 253
56601-253 253 253 253 253 253 253 253 253 253 253 253
56602-253 253 253 253 253 253 231 231 231 238 238 238
56603-253 253 253 253 253 253 253 253 253 253 253 253
56604-253 253 253 253 253 253 253 253 253 253 253 253
56605-253 253 253 253 253 253 253 253 253 253 253 253
56606-253 253 253 253 253 253 253 253 253 253 253 253
56607-253 253 253 253 253 253 231 231 231 6 6 6
56608- 2 2 6 2 2 6 10 10 10 30 30 30
56609- 2 2 6 2 2 6 2 2 6 2 2 6
56610- 2 2 6 66 66 66 58 58 58 22 22 22
56611- 6 6 6 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 10 10 10
56618- 38 38 38 78 78 78 6 6 6 2 2 6
56619- 2 2 6 46 46 46 14 14 14 42 42 42
56620-246 246 246 253 253 253 253 253 253 253 253 253
56621-253 253 253 253 253 253 253 253 253 253 253 253
56622-253 253 253 253 253 253 231 231 231 242 242 242
56623-253 253 253 253 253 253 253 253 253 253 253 253
56624-253 253 253 253 253 253 253 253 253 253 253 253
56625-253 253 253 253 253 253 253 253 253 253 253 253
56626-253 253 253 253 253 253 253 253 253 253 253 253
56627-253 253 253 253 253 253 234 234 234 10 10 10
56628- 2 2 6 2 2 6 22 22 22 14 14 14
56629- 2 2 6 2 2 6 2 2 6 2 2 6
56630- 2 2 6 66 66 66 62 62 62 22 22 22
56631- 6 6 6 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 6 6 6 18 18 18
56638- 50 50 50 74 74 74 2 2 6 2 2 6
56639- 14 14 14 70 70 70 34 34 34 62 62 62
56640-250 250 250 253 253 253 253 253 253 253 253 253
56641-253 253 253 253 253 253 253 253 253 253 253 253
56642-253 253 253 253 253 253 231 231 231 246 246 246
56643-253 253 253 253 253 253 253 253 253 253 253 253
56644-253 253 253 253 253 253 253 253 253 253 253 253
56645-253 253 253 253 253 253 253 253 253 253 253 253
56646-253 253 253 253 253 253 253 253 253 253 253 253
56647-253 253 253 253 253 253 234 234 234 14 14 14
56648- 2 2 6 2 2 6 30 30 30 2 2 6
56649- 2 2 6 2 2 6 2 2 6 2 2 6
56650- 2 2 6 66 66 66 62 62 62 22 22 22
56651- 6 6 6 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 6 6 6 18 18 18
56658- 54 54 54 62 62 62 2 2 6 2 2 6
56659- 2 2 6 30 30 30 46 46 46 70 70 70
56660-250 250 250 253 253 253 253 253 253 253 253 253
56661-253 253 253 253 253 253 253 253 253 253 253 253
56662-253 253 253 253 253 253 231 231 231 246 246 246
56663-253 253 253 253 253 253 253 253 253 253 253 253
56664-253 253 253 253 253 253 253 253 253 253 253 253
56665-253 253 253 253 253 253 253 253 253 253 253 253
56666-253 253 253 253 253 253 253 253 253 253 253 253
56667-253 253 253 253 253 253 226 226 226 10 10 10
56668- 2 2 6 6 6 6 30 30 30 2 2 6
56669- 2 2 6 2 2 6 2 2 6 2 2 6
56670- 2 2 6 66 66 66 58 58 58 22 22 22
56671- 6 6 6 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 6 6 6 22 22 22
56678- 58 58 58 62 62 62 2 2 6 2 2 6
56679- 2 2 6 2 2 6 30 30 30 78 78 78
56680-250 250 250 253 253 253 253 253 253 253 253 253
56681-253 253 253 253 253 253 253 253 253 253 253 253
56682-253 253 253 253 253 253 231 231 231 246 246 246
56683-253 253 253 253 253 253 253 253 253 253 253 253
56684-253 253 253 253 253 253 253 253 253 253 253 253
56685-253 253 253 253 253 253 253 253 253 253 253 253
56686-253 253 253 253 253 253 253 253 253 253 253 253
56687-253 253 253 253 253 253 206 206 206 2 2 6
56688- 22 22 22 34 34 34 18 14 6 22 22 22
56689- 26 26 26 18 18 18 6 6 6 2 2 6
56690- 2 2 6 82 82 82 54 54 54 18 18 18
56691- 6 6 6 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 6 6 6 26 26 26
56698- 62 62 62 106 106 106 74 54 14 185 133 11
56699-210 162 10 121 92 8 6 6 6 62 62 62
56700-238 238 238 253 253 253 253 253 253 253 253 253
56701-253 253 253 253 253 253 253 253 253 253 253 253
56702-253 253 253 253 253 253 231 231 231 246 246 246
56703-253 253 253 253 253 253 253 253 253 253 253 253
56704-253 253 253 253 253 253 253 253 253 253 253 253
56705-253 253 253 253 253 253 253 253 253 253 253 253
56706-253 253 253 253 253 253 253 253 253 253 253 253
56707-253 253 253 253 253 253 158 158 158 18 18 18
56708- 14 14 14 2 2 6 2 2 6 2 2 6
56709- 6 6 6 18 18 18 66 66 66 38 38 38
56710- 6 6 6 94 94 94 50 50 50 18 18 18
56711- 6 6 6 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 0 0 0
56716- 0 0 0 0 0 0 0 0 0 6 6 6
56717- 10 10 10 10 10 10 18 18 18 38 38 38
56718- 78 78 78 142 134 106 216 158 10 242 186 14
56719-246 190 14 246 190 14 156 118 10 10 10 10
56720- 90 90 90 238 238 238 253 253 253 253 253 253
56721-253 253 253 253 253 253 253 253 253 253 253 253
56722-253 253 253 253 253 253 231 231 231 250 250 250
56723-253 253 253 253 253 253 253 253 253 253 253 253
56724-253 253 253 253 253 253 253 253 253 253 253 253
56725-253 253 253 253 253 253 253 253 253 253 253 253
56726-253 253 253 253 253 253 253 253 253 246 230 190
56727-238 204 91 238 204 91 181 142 44 37 26 9
56728- 2 2 6 2 2 6 2 2 6 2 2 6
56729- 2 2 6 2 2 6 38 38 38 46 46 46
56730- 26 26 26 106 106 106 54 54 54 18 18 18
56731- 6 6 6 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 0 0 0
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 0 0 0
56736- 0 0 0 6 6 6 14 14 14 22 22 22
56737- 30 30 30 38 38 38 50 50 50 70 70 70
56738-106 106 106 190 142 34 226 170 11 242 186 14
56739-246 190 14 246 190 14 246 190 14 154 114 10
56740- 6 6 6 74 74 74 226 226 226 253 253 253
56741-253 253 253 253 253 253 253 253 253 253 253 253
56742-253 253 253 253 253 253 231 231 231 250 250 250
56743-253 253 253 253 253 253 253 253 253 253 253 253
56744-253 253 253 253 253 253 253 253 253 253 253 253
56745-253 253 253 253 253 253 253 253 253 253 253 253
56746-253 253 253 253 253 253 253 253 253 228 184 62
56747-241 196 14 241 208 19 232 195 16 38 30 10
56748- 2 2 6 2 2 6 2 2 6 2 2 6
56749- 2 2 6 6 6 6 30 30 30 26 26 26
56750-203 166 17 154 142 90 66 66 66 26 26 26
56751- 6 6 6 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 6 6 6 18 18 18 38 38 38 58 58 58
56757- 78 78 78 86 86 86 101 101 101 123 123 123
56758-175 146 61 210 150 10 234 174 13 246 186 14
56759-246 190 14 246 190 14 246 190 14 238 190 10
56760-102 78 10 2 2 6 46 46 46 198 198 198
56761-253 253 253 253 253 253 253 253 253 253 253 253
56762-253 253 253 253 253 253 234 234 234 242 242 242
56763-253 253 253 253 253 253 253 253 253 253 253 253
56764-253 253 253 253 253 253 253 253 253 253 253 253
56765-253 253 253 253 253 253 253 253 253 253 253 253
56766-253 253 253 253 253 253 253 253 253 224 178 62
56767-242 186 14 241 196 14 210 166 10 22 18 6
56768- 2 2 6 2 2 6 2 2 6 2 2 6
56769- 2 2 6 2 2 6 6 6 6 121 92 8
56770-238 202 15 232 195 16 82 82 82 34 34 34
56771- 10 10 10 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 14 14 14 38 38 38 70 70 70 154 122 46
56777-190 142 34 200 144 11 197 138 11 197 138 11
56778-213 154 11 226 170 11 242 186 14 246 190 14
56779-246 190 14 246 190 14 246 190 14 246 190 14
56780-225 175 15 46 32 6 2 2 6 22 22 22
56781-158 158 158 250 250 250 253 253 253 253 253 253
56782-253 253 253 253 253 253 253 253 253 253 253 253
56783-253 253 253 253 253 253 253 253 253 253 253 253
56784-253 253 253 253 253 253 253 253 253 253 253 253
56785-253 253 253 253 253 253 253 253 253 253 253 253
56786-253 253 253 250 250 250 242 242 242 224 178 62
56787-239 182 13 236 186 11 213 154 11 46 32 6
56788- 2 2 6 2 2 6 2 2 6 2 2 6
56789- 2 2 6 2 2 6 61 42 6 225 175 15
56790-238 190 10 236 186 11 112 100 78 42 42 42
56791- 14 14 14 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 6 6 6
56796- 22 22 22 54 54 54 154 122 46 213 154 11
56797-226 170 11 230 174 11 226 170 11 226 170 11
56798-236 178 12 242 186 14 246 190 14 246 190 14
56799-246 190 14 246 190 14 246 190 14 246 190 14
56800-241 196 14 184 144 12 10 10 10 2 2 6
56801- 6 6 6 116 116 116 242 242 242 253 253 253
56802-253 253 253 253 253 253 253 253 253 253 253 253
56803-253 253 253 253 253 253 253 253 253 253 253 253
56804-253 253 253 253 253 253 253 253 253 253 253 253
56805-253 253 253 253 253 253 253 253 253 253 253 253
56806-253 253 253 231 231 231 198 198 198 214 170 54
56807-236 178 12 236 178 12 210 150 10 137 92 6
56808- 18 14 6 2 2 6 2 2 6 2 2 6
56809- 6 6 6 70 47 6 200 144 11 236 178 12
56810-239 182 13 239 182 13 124 112 88 58 58 58
56811- 22 22 22 6 6 6 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 10 10 10
56816- 30 30 30 70 70 70 180 133 36 226 170 11
56817-239 182 13 242 186 14 242 186 14 246 186 14
56818-246 190 14 246 190 14 246 190 14 246 190 14
56819-246 190 14 246 190 14 246 190 14 246 190 14
56820-246 190 14 232 195 16 98 70 6 2 2 6
56821- 2 2 6 2 2 6 66 66 66 221 221 221
56822-253 253 253 253 253 253 253 253 253 253 253 253
56823-253 253 253 253 253 253 253 253 253 253 253 253
56824-253 253 253 253 253 253 253 253 253 253 253 253
56825-253 253 253 253 253 253 253 253 253 253 253 253
56826-253 253 253 206 206 206 198 198 198 214 166 58
56827-230 174 11 230 174 11 216 158 10 192 133 9
56828-163 110 8 116 81 8 102 78 10 116 81 8
56829-167 114 7 197 138 11 226 170 11 239 182 13
56830-242 186 14 242 186 14 162 146 94 78 78 78
56831- 34 34 34 14 14 14 6 6 6 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 6 6 6
56836- 30 30 30 78 78 78 190 142 34 226 170 11
56837-239 182 13 246 190 14 246 190 14 246 190 14
56838-246 190 14 246 190 14 246 190 14 246 190 14
56839-246 190 14 246 190 14 246 190 14 246 190 14
56840-246 190 14 241 196 14 203 166 17 22 18 6
56841- 2 2 6 2 2 6 2 2 6 38 38 38
56842-218 218 218 253 253 253 253 253 253 253 253 253
56843-253 253 253 253 253 253 253 253 253 253 253 253
56844-253 253 253 253 253 253 253 253 253 253 253 253
56845-253 253 253 253 253 253 253 253 253 253 253 253
56846-250 250 250 206 206 206 198 198 198 202 162 69
56847-226 170 11 236 178 12 224 166 10 210 150 10
56848-200 144 11 197 138 11 192 133 9 197 138 11
56849-210 150 10 226 170 11 242 186 14 246 190 14
56850-246 190 14 246 186 14 225 175 15 124 112 88
56851- 62 62 62 30 30 30 14 14 14 6 6 6
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 10 10 10
56856- 30 30 30 78 78 78 174 135 50 224 166 10
56857-239 182 13 246 190 14 246 190 14 246 190 14
56858-246 190 14 246 190 14 246 190 14 246 190 14
56859-246 190 14 246 190 14 246 190 14 246 190 14
56860-246 190 14 246 190 14 241 196 14 139 102 15
56861- 2 2 6 2 2 6 2 2 6 2 2 6
56862- 78 78 78 250 250 250 253 253 253 253 253 253
56863-253 253 253 253 253 253 253 253 253 253 253 253
56864-253 253 253 253 253 253 253 253 253 253 253 253
56865-253 253 253 253 253 253 253 253 253 253 253 253
56866-250 250 250 214 214 214 198 198 198 190 150 46
56867-219 162 10 236 178 12 234 174 13 224 166 10
56868-216 158 10 213 154 11 213 154 11 216 158 10
56869-226 170 11 239 182 13 246 190 14 246 190 14
56870-246 190 14 246 190 14 242 186 14 206 162 42
56871-101 101 101 58 58 58 30 30 30 14 14 14
56872- 6 6 6 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 10 10 10
56876- 30 30 30 74 74 74 174 135 50 216 158 10
56877-236 178 12 246 190 14 246 190 14 246 190 14
56878-246 190 14 246 190 14 246 190 14 246 190 14
56879-246 190 14 246 190 14 246 190 14 246 190 14
56880-246 190 14 246 190 14 241 196 14 226 184 13
56881- 61 42 6 2 2 6 2 2 6 2 2 6
56882- 22 22 22 238 238 238 253 253 253 253 253 253
56883-253 253 253 253 253 253 253 253 253 253 253 253
56884-253 253 253 253 253 253 253 253 253 253 253 253
56885-253 253 253 253 253 253 253 253 253 253 253 253
56886-253 253 253 226 226 226 187 187 187 180 133 36
56887-216 158 10 236 178 12 239 182 13 236 178 12
56888-230 174 11 226 170 11 226 170 11 230 174 11
56889-236 178 12 242 186 14 246 190 14 246 190 14
56890-246 190 14 246 190 14 246 186 14 239 182 13
56891-206 162 42 106 106 106 66 66 66 34 34 34
56892- 14 14 14 6 6 6 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 6 6 6
56896- 26 26 26 70 70 70 163 133 67 213 154 11
56897-236 178 12 246 190 14 246 190 14 246 190 14
56898-246 190 14 246 190 14 246 190 14 246 190 14
56899-246 190 14 246 190 14 246 190 14 246 190 14
56900-246 190 14 246 190 14 246 190 14 241 196 14
56901-190 146 13 18 14 6 2 2 6 2 2 6
56902- 46 46 46 246 246 246 253 253 253 253 253 253
56903-253 253 253 253 253 253 253 253 253 253 253 253
56904-253 253 253 253 253 253 253 253 253 253 253 253
56905-253 253 253 253 253 253 253 253 253 253 253 253
56906-253 253 253 221 221 221 86 86 86 156 107 11
56907-216 158 10 236 178 12 242 186 14 246 186 14
56908-242 186 14 239 182 13 239 182 13 242 186 14
56909-242 186 14 246 186 14 246 190 14 246 190 14
56910-246 190 14 246 190 14 246 190 14 246 190 14
56911-242 186 14 225 175 15 142 122 72 66 66 66
56912- 30 30 30 10 10 10 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 6 6 6
56916- 26 26 26 70 70 70 163 133 67 210 150 10
56917-236 178 12 246 190 14 246 190 14 246 190 14
56918-246 190 14 246 190 14 246 190 14 246 190 14
56919-246 190 14 246 190 14 246 190 14 246 190 14
56920-246 190 14 246 190 14 246 190 14 246 190 14
56921-232 195 16 121 92 8 34 34 34 106 106 106
56922-221 221 221 253 253 253 253 253 253 253 253 253
56923-253 253 253 253 253 253 253 253 253 253 253 253
56924-253 253 253 253 253 253 253 253 253 253 253 253
56925-253 253 253 253 253 253 253 253 253 253 253 253
56926-242 242 242 82 82 82 18 14 6 163 110 8
56927-216 158 10 236 178 12 242 186 14 246 190 14
56928-246 190 14 246 190 14 246 190 14 246 190 14
56929-246 190 14 246 190 14 246 190 14 246 190 14
56930-246 190 14 246 190 14 246 190 14 246 190 14
56931-246 190 14 246 190 14 242 186 14 163 133 67
56932- 46 46 46 18 18 18 6 6 6 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 10 10 10
56936- 30 30 30 78 78 78 163 133 67 210 150 10
56937-236 178 12 246 186 14 246 190 14 246 190 14
56938-246 190 14 246 190 14 246 190 14 246 190 14
56939-246 190 14 246 190 14 246 190 14 246 190 14
56940-246 190 14 246 190 14 246 190 14 246 190 14
56941-241 196 14 215 174 15 190 178 144 253 253 253
56942-253 253 253 253 253 253 253 253 253 253 253 253
56943-253 253 253 253 253 253 253 253 253 253 253 253
56944-253 253 253 253 253 253 253 253 253 253 253 253
56945-253 253 253 253 253 253 253 253 253 218 218 218
56946- 58 58 58 2 2 6 22 18 6 167 114 7
56947-216 158 10 236 178 12 246 186 14 246 190 14
56948-246 190 14 246 190 14 246 190 14 246 190 14
56949-246 190 14 246 190 14 246 190 14 246 190 14
56950-246 190 14 246 190 14 246 190 14 246 190 14
56951-246 190 14 246 186 14 242 186 14 190 150 46
56952- 54 54 54 22 22 22 6 6 6 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 14 14 14
56956- 38 38 38 86 86 86 180 133 36 213 154 11
56957-236 178 12 246 186 14 246 190 14 246 190 14
56958-246 190 14 246 190 14 246 190 14 246 190 14
56959-246 190 14 246 190 14 246 190 14 246 190 14
56960-246 190 14 246 190 14 246 190 14 246 190 14
56961-246 190 14 232 195 16 190 146 13 214 214 214
56962-253 253 253 253 253 253 253 253 253 253 253 253
56963-253 253 253 253 253 253 253 253 253 253 253 253
56964-253 253 253 253 253 253 253 253 253 253 253 253
56965-253 253 253 250 250 250 170 170 170 26 26 26
56966- 2 2 6 2 2 6 37 26 9 163 110 8
56967-219 162 10 239 182 13 246 186 14 246 190 14
56968-246 190 14 246 190 14 246 190 14 246 190 14
56969-246 190 14 246 190 14 246 190 14 246 190 14
56970-246 190 14 246 190 14 246 190 14 246 190 14
56971-246 186 14 236 178 12 224 166 10 142 122 72
56972- 46 46 46 18 18 18 6 6 6 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 6 6 6 18 18 18
56976- 50 50 50 109 106 95 192 133 9 224 166 10
56977-242 186 14 246 190 14 246 190 14 246 190 14
56978-246 190 14 246 190 14 246 190 14 246 190 14
56979-246 190 14 246 190 14 246 190 14 246 190 14
56980-246 190 14 246 190 14 246 190 14 246 190 14
56981-242 186 14 226 184 13 210 162 10 142 110 46
56982-226 226 226 253 253 253 253 253 253 253 253 253
56983-253 253 253 253 253 253 253 253 253 253 253 253
56984-253 253 253 253 253 253 253 253 253 253 253 253
56985-198 198 198 66 66 66 2 2 6 2 2 6
56986- 2 2 6 2 2 6 50 34 6 156 107 11
56987-219 162 10 239 182 13 246 186 14 246 190 14
56988-246 190 14 246 190 14 246 190 14 246 190 14
56989-246 190 14 246 190 14 246 190 14 246 190 14
56990-246 190 14 246 190 14 246 190 14 242 186 14
56991-234 174 13 213 154 11 154 122 46 66 66 66
56992- 30 30 30 10 10 10 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 6 6 6 22 22 22
56996- 58 58 58 154 121 60 206 145 10 234 174 13
56997-242 186 14 246 186 14 246 190 14 246 190 14
56998-246 190 14 246 190 14 246 190 14 246 190 14
56999-246 190 14 246 190 14 246 190 14 246 190 14
57000-246 190 14 246 190 14 246 190 14 246 190 14
57001-246 186 14 236 178 12 210 162 10 163 110 8
57002- 61 42 6 138 138 138 218 218 218 250 250 250
57003-253 253 253 253 253 253 253 253 253 250 250 250
57004-242 242 242 210 210 210 144 144 144 66 66 66
57005- 6 6 6 2 2 6 2 2 6 2 2 6
57006- 2 2 6 2 2 6 61 42 6 163 110 8
57007-216 158 10 236 178 12 246 190 14 246 190 14
57008-246 190 14 246 190 14 246 190 14 246 190 14
57009-246 190 14 246 190 14 246 190 14 246 190 14
57010-246 190 14 239 182 13 230 174 11 216 158 10
57011-190 142 34 124 112 88 70 70 70 38 38 38
57012- 18 18 18 6 6 6 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 6 6 6 22 22 22
57016- 62 62 62 168 124 44 206 145 10 224 166 10
57017-236 178 12 239 182 13 242 186 14 242 186 14
57018-246 186 14 246 190 14 246 190 14 246 190 14
57019-246 190 14 246 190 14 246 190 14 246 190 14
57020-246 190 14 246 190 14 246 190 14 246 190 14
57021-246 190 14 236 178 12 216 158 10 175 118 6
57022- 80 54 7 2 2 6 6 6 6 30 30 30
57023- 54 54 54 62 62 62 50 50 50 38 38 38
57024- 14 14 14 2 2 6 2 2 6 2 2 6
57025- 2 2 6 2 2 6 2 2 6 2 2 6
57026- 2 2 6 6 6 6 80 54 7 167 114 7
57027-213 154 11 236 178 12 246 190 14 246 190 14
57028-246 190 14 246 190 14 246 190 14 246 190 14
57029-246 190 14 242 186 14 239 182 13 239 182 13
57030-230 174 11 210 150 10 174 135 50 124 112 88
57031- 82 82 82 54 54 54 34 34 34 18 18 18
57032- 6 6 6 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 0 0 0 6 6 6 18 18 18
57036- 50 50 50 158 118 36 192 133 9 200 144 11
57037-216 158 10 219 162 10 224 166 10 226 170 11
57038-230 174 11 236 178 12 239 182 13 239 182 13
57039-242 186 14 246 186 14 246 190 14 246 190 14
57040-246 190 14 246 190 14 246 190 14 246 190 14
57041-246 186 14 230 174 11 210 150 10 163 110 8
57042-104 69 6 10 10 10 2 2 6 2 2 6
57043- 2 2 6 2 2 6 2 2 6 2 2 6
57044- 2 2 6 2 2 6 2 2 6 2 2 6
57045- 2 2 6 2 2 6 2 2 6 2 2 6
57046- 2 2 6 6 6 6 91 60 6 167 114 7
57047-206 145 10 230 174 11 242 186 14 246 190 14
57048-246 190 14 246 190 14 246 186 14 242 186 14
57049-239 182 13 230 174 11 224 166 10 213 154 11
57050-180 133 36 124 112 88 86 86 86 58 58 58
57051- 38 38 38 22 22 22 10 10 10 6 6 6
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 14 14 14
57056- 34 34 34 70 70 70 138 110 50 158 118 36
57057-167 114 7 180 123 7 192 133 9 197 138 11
57058-200 144 11 206 145 10 213 154 11 219 162 10
57059-224 166 10 230 174 11 239 182 13 242 186 14
57060-246 186 14 246 186 14 246 186 14 246 186 14
57061-239 182 13 216 158 10 185 133 11 152 99 6
57062-104 69 6 18 14 6 2 2 6 2 2 6
57063- 2 2 6 2 2 6 2 2 6 2 2 6
57064- 2 2 6 2 2 6 2 2 6 2 2 6
57065- 2 2 6 2 2 6 2 2 6 2 2 6
57066- 2 2 6 6 6 6 80 54 7 152 99 6
57067-192 133 9 219 162 10 236 178 12 239 182 13
57068-246 186 14 242 186 14 239 182 13 236 178 12
57069-224 166 10 206 145 10 192 133 9 154 121 60
57070- 94 94 94 62 62 62 42 42 42 22 22 22
57071- 14 14 14 6 6 6 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 6 6 6
57076- 18 18 18 34 34 34 58 58 58 78 78 78
57077-101 98 89 124 112 88 142 110 46 156 107 11
57078-163 110 8 167 114 7 175 118 6 180 123 7
57079-185 133 11 197 138 11 210 150 10 219 162 10
57080-226 170 11 236 178 12 236 178 12 234 174 13
57081-219 162 10 197 138 11 163 110 8 130 83 6
57082- 91 60 6 10 10 10 2 2 6 2 2 6
57083- 18 18 18 38 38 38 38 38 38 38 38 38
57084- 38 38 38 38 38 38 38 38 38 38 38 38
57085- 38 38 38 38 38 38 26 26 26 2 2 6
57086- 2 2 6 6 6 6 70 47 6 137 92 6
57087-175 118 6 200 144 11 219 162 10 230 174 11
57088-234 174 13 230 174 11 219 162 10 210 150 10
57089-192 133 9 163 110 8 124 112 88 82 82 82
57090- 50 50 50 30 30 30 14 14 14 6 6 6
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 6 6 6 14 14 14 22 22 22 34 34 34
57097- 42 42 42 58 58 58 74 74 74 86 86 86
57098-101 98 89 122 102 70 130 98 46 121 87 25
57099-137 92 6 152 99 6 163 110 8 180 123 7
57100-185 133 11 197 138 11 206 145 10 200 144 11
57101-180 123 7 156 107 11 130 83 6 104 69 6
57102- 50 34 6 54 54 54 110 110 110 101 98 89
57103- 86 86 86 82 82 82 78 78 78 78 78 78
57104- 78 78 78 78 78 78 78 78 78 78 78 78
57105- 78 78 78 82 82 82 86 86 86 94 94 94
57106-106 106 106 101 101 101 86 66 34 124 80 6
57107-156 107 11 180 123 7 192 133 9 200 144 11
57108-206 145 10 200 144 11 192 133 9 175 118 6
57109-139 102 15 109 106 95 70 70 70 42 42 42
57110- 22 22 22 10 10 10 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 6 6 6 10 10 10
57117- 14 14 14 22 22 22 30 30 30 38 38 38
57118- 50 50 50 62 62 62 74 74 74 90 90 90
57119-101 98 89 112 100 78 121 87 25 124 80 6
57120-137 92 6 152 99 6 152 99 6 152 99 6
57121-138 86 6 124 80 6 98 70 6 86 66 30
57122-101 98 89 82 82 82 58 58 58 46 46 46
57123- 38 38 38 34 34 34 34 34 34 34 34 34
57124- 34 34 34 34 34 34 34 34 34 34 34 34
57125- 34 34 34 34 34 34 38 38 38 42 42 42
57126- 54 54 54 82 82 82 94 86 76 91 60 6
57127-134 86 6 156 107 11 167 114 7 175 118 6
57128-175 118 6 167 114 7 152 99 6 121 87 25
57129-101 98 89 62 62 62 34 34 34 18 18 18
57130- 6 6 6 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 6 6 6 6 6 6 10 10 10
57138- 18 18 18 22 22 22 30 30 30 42 42 42
57139- 50 50 50 66 66 66 86 86 86 101 98 89
57140-106 86 58 98 70 6 104 69 6 104 69 6
57141-104 69 6 91 60 6 82 62 34 90 90 90
57142- 62 62 62 38 38 38 22 22 22 14 14 14
57143- 10 10 10 10 10 10 10 10 10 10 10 10
57144- 10 10 10 10 10 10 6 6 6 10 10 10
57145- 10 10 10 10 10 10 10 10 10 14 14 14
57146- 22 22 22 42 42 42 70 70 70 89 81 66
57147- 80 54 7 104 69 6 124 80 6 137 92 6
57148-134 86 6 116 81 8 100 82 52 86 86 86
57149- 58 58 58 30 30 30 14 14 14 6 6 6
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 6 6 6 10 10 10 14 14 14
57159- 18 18 18 26 26 26 38 38 38 54 54 54
57160- 70 70 70 86 86 86 94 86 76 89 81 66
57161- 89 81 66 86 86 86 74 74 74 50 50 50
57162- 30 30 30 14 14 14 6 6 6 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 0 0 0
57165- 0 0 0 0 0 0 0 0 0 0 0 0
57166- 6 6 6 18 18 18 34 34 34 58 58 58
57167- 82 82 82 89 81 66 89 81 66 89 81 66
57168- 94 86 66 94 86 76 74 74 74 50 50 50
57169- 26 26 26 14 14 14 6 6 6 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 6 6 6 6 6 6 14 14 14 18 18 18
57180- 30 30 30 38 38 38 46 46 46 54 54 54
57181- 50 50 50 42 42 42 30 30 30 18 18 18
57182- 10 10 10 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 0 0 0
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 6 6 6 14 14 14 26 26 26
57187- 38 38 38 50 50 50 58 58 58 58 58 58
57188- 54 54 54 42 42 42 30 30 30 18 18 18
57189- 10 10 10 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 6 6 6
57200- 6 6 6 10 10 10 14 14 14 18 18 18
57201- 18 18 18 14 14 14 10 10 10 6 6 6
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 0 0 0 0
57205- 0 0 0 0 0 0 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 6 6 6
57207- 14 14 14 18 18 18 22 22 22 22 22 22
57208- 18 18 18 14 14 14 10 10 10 6 6 6
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57227+4 4 4 4 4 4
57228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57241+4 4 4 4 4 4
57242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57255+4 4 4 4 4 4
57256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57269+4 4 4 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57302+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57303+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57307+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57308+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57309+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4
57312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57316+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57317+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57318+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57321+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57322+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57323+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57324+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4
57326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57330+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57331+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57332+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57335+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57336+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57337+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57338+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57339+4 4 4 4 4 4
57340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57343+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57344+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57345+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57346+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57348+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57349+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57350+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57351+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57352+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57353+4 4 4 4 4 4
57354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57357+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57358+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57359+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57360+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57361+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57362+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57363+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57364+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57365+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57366+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57367+4 4 4 4 4 4
57368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57371+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57372+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57373+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57374+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57375+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57376+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57377+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57378+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57379+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57380+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57381+4 4 4 4 4 4
57382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57384+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57385+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57386+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57387+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57388+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57389+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57390+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57391+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57392+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57393+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57394+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57395+4 4 4 4 4 4
57396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57398+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57399+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57400+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57401+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57402+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57403+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57404+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57405+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57406+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57407+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57408+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57409+4 4 4 4 4 4
57410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57412+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57413+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57414+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57415+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57416+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57417+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57418+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57419+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57420+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57421+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57422+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57423+4 4 4 4 4 4
57424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57426+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57427+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57428+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57429+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57430+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57431+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57432+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57433+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57434+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57435+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57436+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57437+4 4 4 4 4 4
57438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57439+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57440+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57441+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57442+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57443+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57444+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57445+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57446+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57447+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57448+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57449+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57450+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57451+4 4 4 4 4 4
57452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57453+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57454+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57455+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57456+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57457+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57458+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57459+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57460+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57461+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57462+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57463+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57464+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57465+0 0 0 4 4 4
57466+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57467+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57468+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57469+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57470+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57471+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57472+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57473+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57474+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57475+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57476+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57477+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57478+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57479+2 0 0 0 0 0
57480+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57481+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57482+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57483+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57484+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57485+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57486+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57487+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57488+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57489+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57490+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57491+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57492+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57493+37 38 37 0 0 0
57494+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57495+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57496+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57497+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57498+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57499+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57500+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57501+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57502+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57503+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57504+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57505+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57506+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57507+85 115 134 4 0 0
57508+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57509+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57510+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57511+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57512+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57513+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57514+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57515+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57516+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57517+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57518+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57519+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57520+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57521+60 73 81 4 0 0
57522+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57523+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57524+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57525+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57526+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57527+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57528+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57529+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57530+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57531+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57532+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57533+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57534+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57535+16 19 21 4 0 0
57536+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57537+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57538+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57539+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57540+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57541+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57542+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57543+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57544+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57545+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57546+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57547+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57548+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57549+4 0 0 4 3 3
57550+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57551+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57552+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57554+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57555+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57556+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57557+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57558+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57559+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57560+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57561+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57562+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57563+3 2 2 4 4 4
57564+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57565+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57566+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57567+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57568+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57569+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57570+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57571+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57572+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57573+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57574+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57575+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57576+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57577+4 4 4 4 4 4
57578+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57579+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57580+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57581+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57582+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57583+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57584+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57585+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57586+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57587+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57588+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57589+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57590+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57591+4 4 4 4 4 4
57592+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57593+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57594+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57595+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57596+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57597+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57598+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57599+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57600+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57601+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57602+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57603+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57604+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57605+5 5 5 5 5 5
57606+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57607+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57608+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57609+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57610+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57611+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57612+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57613+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57614+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57615+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57616+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57617+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57618+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57619+5 5 5 4 4 4
57620+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57621+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57622+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57623+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57624+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57625+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57626+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57627+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57628+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57629+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57630+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57631+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57633+4 4 4 4 4 4
57634+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57635+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57636+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57637+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57638+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57639+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57640+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57641+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57642+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57643+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57644+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57645+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57647+4 4 4 4 4 4
57648+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57649+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57650+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57651+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57652+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57653+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57654+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57655+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57656+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57657+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57658+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57661+4 4 4 4 4 4
57662+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57663+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57664+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57665+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57666+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57667+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57668+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57669+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57670+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57671+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57672+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57675+4 4 4 4 4 4
57676+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57677+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57678+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57679+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57680+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57681+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57682+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57683+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57684+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57685+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57686+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57689+4 4 4 4 4 4
57690+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57691+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57692+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57693+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57694+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57695+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57696+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57697+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57698+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57699+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57700+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57703+4 4 4 4 4 4
57704+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57705+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57706+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57707+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57708+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57709+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57710+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57711+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57712+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57713+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57714+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57717+4 4 4 4 4 4
57718+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57719+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57720+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57721+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57722+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57723+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57724+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57725+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57726+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57727+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57728+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57731+4 4 4 4 4 4
57732+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57733+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57734+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57735+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57736+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57737+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57738+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57739+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57740+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57741+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57742+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57745+4 4 4 4 4 4
57746+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57747+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57748+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57749+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57750+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57751+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57752+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57753+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57754+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57755+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57756+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57759+4 4 4 4 4 4
57760+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57761+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57762+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57763+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57764+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57765+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57766+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57767+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57768+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57769+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57770+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57773+4 4 4 4 4 4
57774+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57775+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57776+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57777+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57778+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57779+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57780+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57781+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57782+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57783+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57784+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57787+4 4 4 4 4 4
57788+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57789+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57790+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57791+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57792+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57793+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57794+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57795+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57796+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57797+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57798+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57801+4 4 4 4 4 4
57802+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57803+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57804+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57805+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57806+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57807+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57808+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57809+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57810+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57811+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57812+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57815+4 4 4 4 4 4
57816+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57817+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57818+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57819+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57820+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57821+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57822+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57823+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57824+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57825+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57826+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57829+4 4 4 4 4 4
57830+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57831+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57832+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57833+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57834+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57835+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57836+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57837+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57838+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57839+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57840+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57843+4 4 4 4 4 4
57844+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57845+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57846+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57847+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57848+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57849+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57850+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57851+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57852+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57853+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57854+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57857+4 4 4 4 4 4
57858+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57859+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57860+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57861+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57862+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57863+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57864+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57865+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57866+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57867+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57868+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57871+4 4 4 4 4 4
57872+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57873+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57874+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57875+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57876+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57877+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57878+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57879+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57880+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57881+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57882+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57885+4 4 4 4 4 4
57886+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57887+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57888+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57889+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57890+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57891+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57892+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57893+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57894+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57895+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57896+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57899+4 4 4 4 4 4
57900+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57901+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57902+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57903+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57904+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57905+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57906+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57907+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57908+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57909+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57910+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57913+4 4 4 4 4 4
57914+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57915+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57916+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57917+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57918+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57919+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57920+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57921+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57922+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57923+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57924+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57927+4 4 4 4 4 4
57928+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57929+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57930+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57931+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57932+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57933+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57934+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57935+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57936+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57937+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57938+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57941+4 4 4 4 4 4
57942+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57943+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57944+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57945+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57946+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57947+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57948+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57949+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57950+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57951+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57952+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57955+4 4 4 4 4 4
57956+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57957+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57958+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57960+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57961+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57962+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57963+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57964+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57965+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57966+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57969+4 4 4 4 4 4
57970+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57971+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57972+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57973+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57974+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57975+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57976+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57977+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57978+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57979+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57980+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57983+4 4 4 4 4 4
57984+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57985+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57986+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57987+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57988+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57989+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57990+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57991+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57992+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57993+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57994+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57997+4 4 4 4 4 4
57998+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57999+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58000+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58001+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58002+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58003+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58004+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58006+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58007+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58008+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58011+4 4 4 4 4 4
58012+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58013+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58014+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58015+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58016+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58017+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58018+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58019+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58020+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58021+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58022+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58025+4 4 4 4 4 4
58026+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58027+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58028+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58029+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58030+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58031+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58032+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58033+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58034+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58035+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58039+4 4 4 4 4 4
58040+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58041+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58042+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58043+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58044+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58045+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58046+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58047+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58048+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58049+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58053+4 4 4 4 4 4
58054+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58055+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58056+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58057+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58058+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58059+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58060+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58061+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58062+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58063+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58067+4 4 4 4 4 4
58068+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58069+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58070+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58071+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58072+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58073+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58074+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58075+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58076+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58077+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58081+4 4 4 4 4 4
58082+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58083+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58084+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58085+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58086+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58087+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58088+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58089+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58090+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58095+4 4 4 4 4 4
58096+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58097+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58098+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58099+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58100+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58101+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58102+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58103+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58104+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58109+4 4 4 4 4 4
58110+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58111+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58112+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58113+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58114+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58115+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58116+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58117+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58118+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58123+4 4 4 4 4 4
58124+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58125+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58126+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58127+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58128+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58129+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58130+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58131+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58137+4 4 4 4 4 4
58138+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58139+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58140+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58141+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58142+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58143+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58144+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58145+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58151+4 4 4 4 4 4
58152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58153+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58154+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58155+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58156+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58157+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58158+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58159+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58165+4 4 4 4 4 4
58166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58168+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58169+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58170+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58171+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58172+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58173+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 4 4 4
58180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58181+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58182+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58183+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58184+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58185+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58186+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58187+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4
58194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58196+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58197+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58198+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58199+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58200+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58201+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4
58208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58211+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58212+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58213+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58214+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58221+4 4 4 4 4 4
58222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58225+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58226+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58227+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58228+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58235+4 4 4 4 4 4
58236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58239+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58240+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58241+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58242+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58249+4 4 4 4 4 4
58250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58253+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58254+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58255+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58256+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58263+4 4 4 4 4 4
58264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58268+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58269+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58270+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58277+4 4 4 4 4 4
58278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58282+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58283+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58284+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58291+4 4 4 4 4 4
58292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58296+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58297+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58298+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58305+4 4 4 4 4 4
58306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58310+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58311+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58319+4 4 4 4 4 4
58320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58324+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58325+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58333+4 4 4 4 4 4
58334diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
58335index 70fba97..8ec7f86 100644
58336--- a/drivers/xen/events/events_base.c
58337+++ b/drivers/xen/events/events_base.c
58338@@ -1563,7 +1563,7 @@ void xen_irq_resume(void)
58339 restore_pirqs();
58340 }
58341
58342-static struct irq_chip xen_dynamic_chip __read_mostly = {
58343+static struct irq_chip xen_dynamic_chip = {
58344 .name = "xen-dyn",
58345
58346 .irq_disable = disable_dynirq,
58347@@ -1577,7 +1577,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
58348 .irq_retrigger = retrigger_dynirq,
58349 };
58350
58351-static struct irq_chip xen_pirq_chip __read_mostly = {
58352+static struct irq_chip xen_pirq_chip = {
58353 .name = "xen-pirq",
58354
58355 .irq_startup = startup_pirq,
58356@@ -1597,7 +1597,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
58357 .irq_retrigger = retrigger_dynirq,
58358 };
58359
58360-static struct irq_chip xen_percpu_chip __read_mostly = {
58361+static struct irq_chip xen_percpu_chip = {
58362 .name = "xen-percpu",
58363
58364 .irq_disable = disable_dynirq,
58365diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58366index fef20db..d28b1ab 100644
58367--- a/drivers/xen/xenfs/xenstored.c
58368+++ b/drivers/xen/xenfs/xenstored.c
58369@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58370 static int xsd_kva_open(struct inode *inode, struct file *file)
58371 {
58372 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58373+#ifdef CONFIG_GRKERNSEC_HIDESYM
58374+ NULL);
58375+#else
58376 xen_store_interface);
58377+#endif
58378+
58379 if (!file->private_data)
58380 return -ENOMEM;
58381 return 0;
58382diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58383index eb14e05..5156de7 100644
58384--- a/fs/9p/vfs_addr.c
58385+++ b/fs/9p/vfs_addr.c
58386@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58387
58388 retval = v9fs_file_write_internal(inode,
58389 v9inode->writeback_fid,
58390- (__force const char __user *)buffer,
58391+ (const char __force_user *)buffer,
58392 len, &offset, 0);
58393 if (retval > 0)
58394 retval = 0;
58395diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58396index 3662f1d..90558b5 100644
58397--- a/fs/9p/vfs_inode.c
58398+++ b/fs/9p/vfs_inode.c
58399@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58400 void
58401 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58402 {
58403- char *s = nd_get_link(nd);
58404+ const char *s = nd_get_link(nd);
58405
58406 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58407 dentry, IS_ERR(s) ? "<error>" : s);
58408diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58409index 270c481..0d8a962 100644
58410--- a/fs/Kconfig.binfmt
58411+++ b/fs/Kconfig.binfmt
58412@@ -106,7 +106,7 @@ config HAVE_AOUT
58413
58414 config BINFMT_AOUT
58415 tristate "Kernel support for a.out and ECOFF binaries"
58416- depends on HAVE_AOUT
58417+ depends on HAVE_AOUT && BROKEN
58418 ---help---
58419 A.out (Assembler.OUTput) is a set of formats for libraries and
58420 executables used in the earliest versions of UNIX. Linux used
58421diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58422index 8a1d38e..300a14e 100644
58423--- a/fs/afs/inode.c
58424+++ b/fs/afs/inode.c
58425@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58426 struct afs_vnode *vnode;
58427 struct super_block *sb;
58428 struct inode *inode;
58429- static atomic_t afs_autocell_ino;
58430+ static atomic_unchecked_t afs_autocell_ino;
58431
58432 _enter("{%x:%u},%*.*s,",
58433 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58434@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58435 data.fid.unique = 0;
58436 data.fid.vnode = 0;
58437
58438- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58439+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58440 afs_iget5_autocell_test, afs_iget5_set,
58441 &data);
58442 if (!inode) {
58443diff --git a/fs/aio.c b/fs/aio.c
58444index a793f70..46f45af 100644
58445--- a/fs/aio.c
58446+++ b/fs/aio.c
58447@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58448 size += sizeof(struct io_event) * nr_events;
58449
58450 nr_pages = PFN_UP(size);
58451- if (nr_pages < 0)
58452+ if (nr_pages <= 0)
58453 return -EINVAL;
58454
58455 file = aio_private_file(ctx, nr_pages);
58456diff --git a/fs/attr.c b/fs/attr.c
58457index 6530ced..4a827e2 100644
58458--- a/fs/attr.c
58459+++ b/fs/attr.c
58460@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58461 unsigned long limit;
58462
58463 limit = rlimit(RLIMIT_FSIZE);
58464+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58465 if (limit != RLIM_INFINITY && offset > limit)
58466 goto out_sig;
58467 if (offset > inode->i_sb->s_maxbytes)
58468diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58469index 116fd38..c04182da 100644
58470--- a/fs/autofs4/waitq.c
58471+++ b/fs/autofs4/waitq.c
58472@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58473 {
58474 unsigned long sigpipe, flags;
58475 mm_segment_t fs;
58476- const char *data = (const char *)addr;
58477+ const char __user *data = (const char __force_user *)addr;
58478 ssize_t wr = 0;
58479
58480 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58481@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58482 return 1;
58483 }
58484
58485+#ifdef CONFIG_GRKERNSEC_HIDESYM
58486+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58487+#endif
58488+
58489 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58490 enum autofs_notify notify)
58491 {
58492@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58493
58494 /* If this is a direct mount request create a dummy name */
58495 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58496+#ifdef CONFIG_GRKERNSEC_HIDESYM
58497+ /* this name does get written to userland via autofs4_write() */
58498+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58499+#else
58500 qstr.len = sprintf(name, "%p", dentry);
58501+#endif
58502 else {
58503 qstr.len = autofs4_getpath(sbi, dentry, &name);
58504 if (!qstr.len) {
58505diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58506index 2722387..56059b5 100644
58507--- a/fs/befs/endian.h
58508+++ b/fs/befs/endian.h
58509@@ -11,7 +11,7 @@
58510
58511 #include <asm/byteorder.h>
58512
58513-static inline u64
58514+static inline u64 __intentional_overflow(-1)
58515 fs64_to_cpu(const struct super_block *sb, fs64 n)
58516 {
58517 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58518@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58519 return (__force fs64)cpu_to_be64(n);
58520 }
58521
58522-static inline u32
58523+static inline u32 __intentional_overflow(-1)
58524 fs32_to_cpu(const struct super_block *sb, fs32 n)
58525 {
58526 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58527@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58528 return (__force fs32)cpu_to_be32(n);
58529 }
58530
58531-static inline u16
58532+static inline u16 __intentional_overflow(-1)
58533 fs16_to_cpu(const struct super_block *sb, fs16 n)
58534 {
58535 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58536diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58537index 4c55668..eeae150 100644
58538--- a/fs/binfmt_aout.c
58539+++ b/fs/binfmt_aout.c
58540@@ -16,6 +16,7 @@
58541 #include <linux/string.h>
58542 #include <linux/fs.h>
58543 #include <linux/file.h>
58544+#include <linux/security.h>
58545 #include <linux/stat.h>
58546 #include <linux/fcntl.h>
58547 #include <linux/ptrace.h>
58548@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58549 #endif
58550 # define START_STACK(u) ((void __user *)u.start_stack)
58551
58552+ memset(&dump, 0, sizeof(dump));
58553+
58554 fs = get_fs();
58555 set_fs(KERNEL_DS);
58556 has_dumped = 1;
58557@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58558
58559 /* If the size of the dump file exceeds the rlimit, then see what would happen
58560 if we wrote the stack, but not the data area. */
58561+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58562 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58563 dump.u_dsize = 0;
58564
58565 /* Make sure we have enough room to write the stack and data areas. */
58566+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58567 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58568 dump.u_ssize = 0;
58569
58570@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58571 rlim = rlimit(RLIMIT_DATA);
58572 if (rlim >= RLIM_INFINITY)
58573 rlim = ~0;
58574+
58575+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58576 if (ex.a_data + ex.a_bss > rlim)
58577 return -ENOMEM;
58578
58579@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58580
58581 install_exec_creds(bprm);
58582
58583+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58584+ current->mm->pax_flags = 0UL;
58585+#endif
58586+
58587+#ifdef CONFIG_PAX_PAGEEXEC
58588+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58589+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58590+
58591+#ifdef CONFIG_PAX_EMUTRAMP
58592+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58593+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58594+#endif
58595+
58596+#ifdef CONFIG_PAX_MPROTECT
58597+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58598+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58599+#endif
58600+
58601+ }
58602+#endif
58603+
58604 if (N_MAGIC(ex) == OMAGIC) {
58605 unsigned long text_addr, map_size;
58606 loff_t pos;
58607@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58608 return error;
58609
58610 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58611- PROT_READ | PROT_WRITE | PROT_EXEC,
58612+ PROT_READ | PROT_WRITE,
58613 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58614 fd_offset + ex.a_text);
58615 if (error != N_DATADDR(ex))
58616diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58617index 995986b..dcc4ef2 100644
58618--- a/fs/binfmt_elf.c
58619+++ b/fs/binfmt_elf.c
58620@@ -34,6 +34,7 @@
58621 #include <linux/utsname.h>
58622 #include <linux/coredump.h>
58623 #include <linux/sched.h>
58624+#include <linux/xattr.h>
58625 #include <asm/uaccess.h>
58626 #include <asm/param.h>
58627 #include <asm/page.h>
58628@@ -47,7 +48,7 @@
58629
58630 static int load_elf_binary(struct linux_binprm *bprm);
58631 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58632- int, int, unsigned long);
58633+ int, int, unsigned long) __intentional_overflow(-1);
58634
58635 #ifdef CONFIG_USELIB
58636 static int load_elf_library(struct file *);
58637@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58638 #define elf_core_dump NULL
58639 #endif
58640
58641+#ifdef CONFIG_PAX_MPROTECT
58642+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58643+#endif
58644+
58645+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58646+static void elf_handle_mmap(struct file *file);
58647+#endif
58648+
58649 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58650 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58651 #else
58652@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58653 .load_binary = load_elf_binary,
58654 .load_shlib = load_elf_library,
58655 .core_dump = elf_core_dump,
58656+
58657+#ifdef CONFIG_PAX_MPROTECT
58658+ .handle_mprotect= elf_handle_mprotect,
58659+#endif
58660+
58661+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58662+ .handle_mmap = elf_handle_mmap,
58663+#endif
58664+
58665 .min_coredump = ELF_EXEC_PAGESIZE,
58666 };
58667
58668@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58669
58670 static int set_brk(unsigned long start, unsigned long end)
58671 {
58672+ unsigned long e = end;
58673+
58674 start = ELF_PAGEALIGN(start);
58675 end = ELF_PAGEALIGN(end);
58676 if (end > start) {
58677@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58678 if (BAD_ADDR(addr))
58679 return addr;
58680 }
58681- current->mm->start_brk = current->mm->brk = end;
58682+ current->mm->start_brk = current->mm->brk = e;
58683 return 0;
58684 }
58685
58686@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58687 elf_addr_t __user *u_rand_bytes;
58688 const char *k_platform = ELF_PLATFORM;
58689 const char *k_base_platform = ELF_BASE_PLATFORM;
58690- unsigned char k_rand_bytes[16];
58691+ u32 k_rand_bytes[4];
58692 int items;
58693 elf_addr_t *elf_info;
58694 int ei_index = 0;
58695 const struct cred *cred = current_cred();
58696 struct vm_area_struct *vma;
58697+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58698
58699 /*
58700 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58701@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58702 * Generate 16 random bytes for userspace PRNG seeding.
58703 */
58704 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58705- u_rand_bytes = (elf_addr_t __user *)
58706- STACK_ALLOC(p, sizeof(k_rand_bytes));
58707+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58708+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58709+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58710+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58711+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58712+ u_rand_bytes = (elf_addr_t __user *) p;
58713 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58714 return -EFAULT;
58715
58716@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58717 return -EFAULT;
58718 current->mm->env_end = p;
58719
58720+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58721+
58722 /* Put the elf_info on the stack in the right place. */
58723 sp = (elf_addr_t __user *)envp + 1;
58724- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58725+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58726 return -EFAULT;
58727 return 0;
58728 }
58729@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58730 an ELF header */
58731
58732 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58733- struct file *interpreter, unsigned long *interp_map_addr,
58734+ struct file *interpreter,
58735 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58736 {
58737 struct elf_phdr *eppnt;
58738- unsigned long load_addr = 0;
58739+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58740 int load_addr_set = 0;
58741 unsigned long last_bss = 0, elf_bss = 0;
58742- unsigned long error = ~0UL;
58743+ unsigned long error = -EINVAL;
58744 unsigned long total_size;
58745 int i;
58746
58747@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58748 goto out;
58749 }
58750
58751+#ifdef CONFIG_PAX_SEGMEXEC
58752+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58753+ pax_task_size = SEGMEXEC_TASK_SIZE;
58754+#endif
58755+
58756 eppnt = interp_elf_phdata;
58757 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58758 if (eppnt->p_type == PT_LOAD) {
58759@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58760 map_addr = elf_map(interpreter, load_addr + vaddr,
58761 eppnt, elf_prot, elf_type, total_size);
58762 total_size = 0;
58763- if (!*interp_map_addr)
58764- *interp_map_addr = map_addr;
58765 error = map_addr;
58766 if (BAD_ADDR(map_addr))
58767 goto out;
58768@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58769 k = load_addr + eppnt->p_vaddr;
58770 if (BAD_ADDR(k) ||
58771 eppnt->p_filesz > eppnt->p_memsz ||
58772- eppnt->p_memsz > TASK_SIZE ||
58773- TASK_SIZE - eppnt->p_memsz < k) {
58774+ eppnt->p_memsz > pax_task_size ||
58775+ pax_task_size - eppnt->p_memsz < k) {
58776 error = -ENOMEM;
58777 goto out;
58778 }
58779@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58780 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58781
58782 /* Map the last of the bss segment */
58783- error = vm_brk(elf_bss, last_bss - elf_bss);
58784- if (BAD_ADDR(error))
58785- goto out;
58786+ if (last_bss > elf_bss) {
58787+ error = vm_brk(elf_bss, last_bss - elf_bss);
58788+ if (BAD_ADDR(error))
58789+ goto out;
58790+ }
58791 }
58792
58793 error = load_addr;
58794@@ -634,6 +666,336 @@ out:
58795 return error;
58796 }
58797
58798+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58799+#ifdef CONFIG_PAX_SOFTMODE
58800+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58801+{
58802+ unsigned long pax_flags = 0UL;
58803+
58804+#ifdef CONFIG_PAX_PAGEEXEC
58805+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58806+ pax_flags |= MF_PAX_PAGEEXEC;
58807+#endif
58808+
58809+#ifdef CONFIG_PAX_SEGMEXEC
58810+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58811+ pax_flags |= MF_PAX_SEGMEXEC;
58812+#endif
58813+
58814+#ifdef CONFIG_PAX_EMUTRAMP
58815+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58816+ pax_flags |= MF_PAX_EMUTRAMP;
58817+#endif
58818+
58819+#ifdef CONFIG_PAX_MPROTECT
58820+ if (elf_phdata->p_flags & PF_MPROTECT)
58821+ pax_flags |= MF_PAX_MPROTECT;
58822+#endif
58823+
58824+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58825+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58826+ pax_flags |= MF_PAX_RANDMMAP;
58827+#endif
58828+
58829+ return pax_flags;
58830+}
58831+#endif
58832+
58833+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58834+{
58835+ unsigned long pax_flags = 0UL;
58836+
58837+#ifdef CONFIG_PAX_PAGEEXEC
58838+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58839+ pax_flags |= MF_PAX_PAGEEXEC;
58840+#endif
58841+
58842+#ifdef CONFIG_PAX_SEGMEXEC
58843+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58844+ pax_flags |= MF_PAX_SEGMEXEC;
58845+#endif
58846+
58847+#ifdef CONFIG_PAX_EMUTRAMP
58848+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58849+ pax_flags |= MF_PAX_EMUTRAMP;
58850+#endif
58851+
58852+#ifdef CONFIG_PAX_MPROTECT
58853+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58854+ pax_flags |= MF_PAX_MPROTECT;
58855+#endif
58856+
58857+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58858+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58859+ pax_flags |= MF_PAX_RANDMMAP;
58860+#endif
58861+
58862+ return pax_flags;
58863+}
58864+#endif
58865+
58866+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58867+#ifdef CONFIG_PAX_SOFTMODE
58868+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58869+{
58870+ unsigned long pax_flags = 0UL;
58871+
58872+#ifdef CONFIG_PAX_PAGEEXEC
58873+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58874+ pax_flags |= MF_PAX_PAGEEXEC;
58875+#endif
58876+
58877+#ifdef CONFIG_PAX_SEGMEXEC
58878+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58879+ pax_flags |= MF_PAX_SEGMEXEC;
58880+#endif
58881+
58882+#ifdef CONFIG_PAX_EMUTRAMP
58883+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58884+ pax_flags |= MF_PAX_EMUTRAMP;
58885+#endif
58886+
58887+#ifdef CONFIG_PAX_MPROTECT
58888+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58889+ pax_flags |= MF_PAX_MPROTECT;
58890+#endif
58891+
58892+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58893+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58894+ pax_flags |= MF_PAX_RANDMMAP;
58895+#endif
58896+
58897+ return pax_flags;
58898+}
58899+#endif
58900+
58901+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58902+{
58903+ unsigned long pax_flags = 0UL;
58904+
58905+#ifdef CONFIG_PAX_PAGEEXEC
58906+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58907+ pax_flags |= MF_PAX_PAGEEXEC;
58908+#endif
58909+
58910+#ifdef CONFIG_PAX_SEGMEXEC
58911+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58912+ pax_flags |= MF_PAX_SEGMEXEC;
58913+#endif
58914+
58915+#ifdef CONFIG_PAX_EMUTRAMP
58916+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58917+ pax_flags |= MF_PAX_EMUTRAMP;
58918+#endif
58919+
58920+#ifdef CONFIG_PAX_MPROTECT
58921+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58922+ pax_flags |= MF_PAX_MPROTECT;
58923+#endif
58924+
58925+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58926+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58927+ pax_flags |= MF_PAX_RANDMMAP;
58928+#endif
58929+
58930+ return pax_flags;
58931+}
58932+#endif
58933+
58934+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58935+static unsigned long pax_parse_defaults(void)
58936+{
58937+ unsigned long pax_flags = 0UL;
58938+
58939+#ifdef CONFIG_PAX_SOFTMODE
58940+ if (pax_softmode)
58941+ return pax_flags;
58942+#endif
58943+
58944+#ifdef CONFIG_PAX_PAGEEXEC
58945+ pax_flags |= MF_PAX_PAGEEXEC;
58946+#endif
58947+
58948+#ifdef CONFIG_PAX_SEGMEXEC
58949+ pax_flags |= MF_PAX_SEGMEXEC;
58950+#endif
58951+
58952+#ifdef CONFIG_PAX_MPROTECT
58953+ pax_flags |= MF_PAX_MPROTECT;
58954+#endif
58955+
58956+#ifdef CONFIG_PAX_RANDMMAP
58957+ if (randomize_va_space)
58958+ pax_flags |= MF_PAX_RANDMMAP;
58959+#endif
58960+
58961+ return pax_flags;
58962+}
58963+
58964+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58965+{
58966+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58967+
58968+#ifdef CONFIG_PAX_EI_PAX
58969+
58970+#ifdef CONFIG_PAX_SOFTMODE
58971+ if (pax_softmode)
58972+ return pax_flags;
58973+#endif
58974+
58975+ pax_flags = 0UL;
58976+
58977+#ifdef CONFIG_PAX_PAGEEXEC
58978+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58979+ pax_flags |= MF_PAX_PAGEEXEC;
58980+#endif
58981+
58982+#ifdef CONFIG_PAX_SEGMEXEC
58983+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58984+ pax_flags |= MF_PAX_SEGMEXEC;
58985+#endif
58986+
58987+#ifdef CONFIG_PAX_EMUTRAMP
58988+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58989+ pax_flags |= MF_PAX_EMUTRAMP;
58990+#endif
58991+
58992+#ifdef CONFIG_PAX_MPROTECT
58993+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58994+ pax_flags |= MF_PAX_MPROTECT;
58995+#endif
58996+
58997+#ifdef CONFIG_PAX_ASLR
58998+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58999+ pax_flags |= MF_PAX_RANDMMAP;
59000+#endif
59001+
59002+#endif
59003+
59004+ return pax_flags;
59005+
59006+}
59007+
59008+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59009+{
59010+
59011+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59012+ unsigned long i;
59013+
59014+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59015+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59016+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59017+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59018+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59019+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59020+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59021+ return PAX_PARSE_FLAGS_FALLBACK;
59022+
59023+#ifdef CONFIG_PAX_SOFTMODE
59024+ if (pax_softmode)
59025+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59026+ else
59027+#endif
59028+
59029+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59030+ break;
59031+ }
59032+#endif
59033+
59034+ return PAX_PARSE_FLAGS_FALLBACK;
59035+}
59036+
59037+static unsigned long pax_parse_xattr_pax(struct file * const file)
59038+{
59039+
59040+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59041+ ssize_t xattr_size, i;
59042+ unsigned char xattr_value[sizeof("pemrs") - 1];
59043+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59044+
59045+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59046+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59047+ return PAX_PARSE_FLAGS_FALLBACK;
59048+
59049+ for (i = 0; i < xattr_size; i++)
59050+ switch (xattr_value[i]) {
59051+ default:
59052+ return PAX_PARSE_FLAGS_FALLBACK;
59053+
59054+#define parse_flag(option1, option2, flag) \
59055+ case option1: \
59056+ if (pax_flags_hardmode & MF_PAX_##flag) \
59057+ return PAX_PARSE_FLAGS_FALLBACK;\
59058+ pax_flags_hardmode |= MF_PAX_##flag; \
59059+ break; \
59060+ case option2: \
59061+ if (pax_flags_softmode & MF_PAX_##flag) \
59062+ return PAX_PARSE_FLAGS_FALLBACK;\
59063+ pax_flags_softmode |= MF_PAX_##flag; \
59064+ break;
59065+
59066+ parse_flag('p', 'P', PAGEEXEC);
59067+ parse_flag('e', 'E', EMUTRAMP);
59068+ parse_flag('m', 'M', MPROTECT);
59069+ parse_flag('r', 'R', RANDMMAP);
59070+ parse_flag('s', 'S', SEGMEXEC);
59071+
59072+#undef parse_flag
59073+ }
59074+
59075+ if (pax_flags_hardmode & pax_flags_softmode)
59076+ return PAX_PARSE_FLAGS_FALLBACK;
59077+
59078+#ifdef CONFIG_PAX_SOFTMODE
59079+ if (pax_softmode)
59080+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59081+ else
59082+#endif
59083+
59084+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59085+#else
59086+ return PAX_PARSE_FLAGS_FALLBACK;
59087+#endif
59088+
59089+}
59090+
59091+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59092+{
59093+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59094+
59095+ pax_flags = pax_parse_defaults();
59096+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59097+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59098+ xattr_pax_flags = pax_parse_xattr_pax(file);
59099+
59100+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59101+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59102+ pt_pax_flags != xattr_pax_flags)
59103+ return -EINVAL;
59104+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59105+ pax_flags = xattr_pax_flags;
59106+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59107+ pax_flags = pt_pax_flags;
59108+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59109+ pax_flags = ei_pax_flags;
59110+
59111+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59112+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59113+ if ((__supported_pte_mask & _PAGE_NX))
59114+ pax_flags &= ~MF_PAX_SEGMEXEC;
59115+ else
59116+ pax_flags &= ~MF_PAX_PAGEEXEC;
59117+ }
59118+#endif
59119+
59120+ if (0 > pax_check_flags(&pax_flags))
59121+ return -EINVAL;
59122+
59123+ current->mm->pax_flags = pax_flags;
59124+ return 0;
59125+}
59126+#endif
59127+
59128 /*
59129 * These are the functions used to load ELF style executables and shared
59130 * libraries. There is no binary dependent code anywhere else.
59131@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59132 {
59133 unsigned long random_variable = 0;
59134
59135+#ifdef CONFIG_PAX_RANDUSTACK
59136+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59137+ return stack_top - current->mm->delta_stack;
59138+#endif
59139+
59140 if ((current->flags & PF_RANDOMIZE) &&
59141 !(current->personality & ADDR_NO_RANDOMIZE)) {
59142 random_variable = (unsigned long) get_random_int();
59143@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59144 unsigned long load_addr = 0, load_bias = 0;
59145 int load_addr_set = 0;
59146 char * elf_interpreter = NULL;
59147- unsigned long error;
59148+ unsigned long error = 0;
59149 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
59150 unsigned long elf_bss, elf_brk;
59151 int retval, i;
59152@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59153 struct elfhdr interp_elf_ex;
59154 } *loc;
59155 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
59156+ unsigned long pax_task_size;
59157
59158 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59159 if (!loc) {
59160@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59161 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59162 may depend on the personality. */
59163 SET_PERSONALITY2(loc->elf_ex, &arch_state);
59164+
59165+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59166+ current->mm->pax_flags = 0UL;
59167+#endif
59168+
59169+#ifdef CONFIG_PAX_DLRESOLVE
59170+ current->mm->call_dl_resolve = 0UL;
59171+#endif
59172+
59173+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59174+ current->mm->call_syscall = 0UL;
59175+#endif
59176+
59177+#ifdef CONFIG_PAX_ASLR
59178+ current->mm->delta_mmap = 0UL;
59179+ current->mm->delta_stack = 0UL;
59180+#endif
59181+
59182+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59183+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59184+ send_sig(SIGKILL, current, 0);
59185+ goto out_free_dentry;
59186+ }
59187+#endif
59188+
59189+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59190+ pax_set_initial_flags(bprm);
59191+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59192+ if (pax_set_initial_flags_func)
59193+ (pax_set_initial_flags_func)(bprm);
59194+#endif
59195+
59196+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59197+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59198+ current->mm->context.user_cs_limit = PAGE_SIZE;
59199+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59200+ }
59201+#endif
59202+
59203+#ifdef CONFIG_PAX_SEGMEXEC
59204+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59205+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59206+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59207+ pax_task_size = SEGMEXEC_TASK_SIZE;
59208+ current->mm->def_flags |= VM_NOHUGEPAGE;
59209+ } else
59210+#endif
59211+
59212+ pax_task_size = TASK_SIZE;
59213+
59214+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59215+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59216+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59217+ put_cpu();
59218+ }
59219+#endif
59220+
59221+#ifdef CONFIG_PAX_ASLR
59222+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59223+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59224+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59225+ }
59226+#endif
59227+
59228+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59229+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59230+ executable_stack = EXSTACK_DISABLE_X;
59231+ current->personality &= ~READ_IMPLIES_EXEC;
59232+ } else
59233+#endif
59234+
59235 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59236 current->personality |= READ_IMPLIES_EXEC;
59237
59238@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59239 #else
59240 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59241 #endif
59242+
59243+#ifdef CONFIG_PAX_RANDMMAP
59244+ /* PaX: randomize base address at the default exe base if requested */
59245+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59246+#ifdef CONFIG_SPARC64
59247+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59248+#else
59249+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59250+#endif
59251+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59252+ elf_flags |= MAP_FIXED;
59253+ }
59254+#endif
59255+
59256 }
59257
59258 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59259@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59260 * allowed task size. Note that p_filesz must always be
59261 * <= p_memsz so it is only necessary to check p_memsz.
59262 */
59263- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59264- elf_ppnt->p_memsz > TASK_SIZE ||
59265- TASK_SIZE - elf_ppnt->p_memsz < k) {
59266+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59267+ elf_ppnt->p_memsz > pax_task_size ||
59268+ pax_task_size - elf_ppnt->p_memsz < k) {
59269 /* set_brk can never work. Avoid overflows. */
59270 retval = -EINVAL;
59271 goto out_free_dentry;
59272@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
59273 if (retval)
59274 goto out_free_dentry;
59275 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59276- retval = -EFAULT; /* Nobody gets to see this, but.. */
59277- goto out_free_dentry;
59278+ /*
59279+ * This bss-zeroing can fail if the ELF
59280+ * file specifies odd protections. So
59281+ * we don't check the return value
59282+ */
59283 }
59284
59285+#ifdef CONFIG_PAX_RANDMMAP
59286+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59287+ unsigned long start, size, flags;
59288+ vm_flags_t vm_flags;
59289+
59290+ start = ELF_PAGEALIGN(elf_brk);
59291+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59292+ flags = MAP_FIXED | MAP_PRIVATE;
59293+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59294+
59295+ down_write(&current->mm->mmap_sem);
59296+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59297+ retval = -ENOMEM;
59298+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59299+// if (current->personality & ADDR_NO_RANDOMIZE)
59300+// vm_flags |= VM_READ | VM_MAYREAD;
59301+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59302+ retval = IS_ERR_VALUE(start) ? start : 0;
59303+ }
59304+ up_write(&current->mm->mmap_sem);
59305+ if (retval == 0)
59306+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59307+ if (retval < 0)
59308+ goto out_free_dentry;
59309+ }
59310+#endif
59311+
59312 if (elf_interpreter) {
59313- unsigned long interp_map_addr = 0;
59314-
59315 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59316 interpreter,
59317- &interp_map_addr,
59318 load_bias, interp_elf_phdata);
59319 if (!IS_ERR((void *)elf_entry)) {
59320 /*
59321@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59322 * Decide what to dump of a segment, part, all or none.
59323 */
59324 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59325- unsigned long mm_flags)
59326+ unsigned long mm_flags, long signr)
59327 {
59328 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59329
59330@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59331 if (vma->vm_file == NULL)
59332 return 0;
59333
59334- if (FILTER(MAPPED_PRIVATE))
59335+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59336 goto whole;
59337
59338 /*
59339@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59340 {
59341 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59342 int i = 0;
59343- do
59344+ do {
59345 i += 2;
59346- while (auxv[i - 2] != AT_NULL);
59347+ } while (auxv[i - 2] != AT_NULL);
59348 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59349 }
59350
59351@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59352 {
59353 mm_segment_t old_fs = get_fs();
59354 set_fs(KERNEL_DS);
59355- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59356+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59357 set_fs(old_fs);
59358 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59359 }
59360@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59361 vma = next_vma(vma, gate_vma)) {
59362 unsigned long dump_size;
59363
59364- dump_size = vma_dump_size(vma, cprm->mm_flags);
59365+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59366 vma_filesz[i++] = dump_size;
59367 vma_data_size += dump_size;
59368 }
59369@@ -2314,6 +2794,167 @@ out:
59370
59371 #endif /* CONFIG_ELF_CORE */
59372
59373+#ifdef CONFIG_PAX_MPROTECT
59374+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59375+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59376+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59377+ *
59378+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59379+ * basis because we want to allow the common case and not the special ones.
59380+ */
59381+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59382+{
59383+ struct elfhdr elf_h;
59384+ struct elf_phdr elf_p;
59385+ unsigned long i;
59386+ unsigned long oldflags;
59387+ bool is_textrel_rw, is_textrel_rx, is_relro;
59388+
59389+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59390+ return;
59391+
59392+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59393+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59394+
59395+#ifdef CONFIG_PAX_ELFRELOCS
59396+ /* possible TEXTREL */
59397+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59398+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59399+#else
59400+ is_textrel_rw = false;
59401+ is_textrel_rx = false;
59402+#endif
59403+
59404+ /* possible RELRO */
59405+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59406+
59407+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59408+ return;
59409+
59410+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59411+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59412+
59413+#ifdef CONFIG_PAX_ETEXECRELOCS
59414+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59415+#else
59416+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59417+#endif
59418+
59419+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59420+ !elf_check_arch(&elf_h) ||
59421+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59422+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59423+ return;
59424+
59425+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59426+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59427+ return;
59428+ switch (elf_p.p_type) {
59429+ case PT_DYNAMIC:
59430+ if (!is_textrel_rw && !is_textrel_rx)
59431+ continue;
59432+ i = 0UL;
59433+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59434+ elf_dyn dyn;
59435+
59436+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59437+ break;
59438+ if (dyn.d_tag == DT_NULL)
59439+ break;
59440+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59441+ gr_log_textrel(vma);
59442+ if (is_textrel_rw)
59443+ vma->vm_flags |= VM_MAYWRITE;
59444+ else
59445+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59446+ vma->vm_flags &= ~VM_MAYWRITE;
59447+ break;
59448+ }
59449+ i++;
59450+ }
59451+ is_textrel_rw = false;
59452+ is_textrel_rx = false;
59453+ continue;
59454+
59455+ case PT_GNU_RELRO:
59456+ if (!is_relro)
59457+ continue;
59458+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59459+ vma->vm_flags &= ~VM_MAYWRITE;
59460+ is_relro = false;
59461+ continue;
59462+
59463+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59464+ case PT_PAX_FLAGS: {
59465+ const char *msg_mprotect = "", *msg_emutramp = "";
59466+ char *buffer_lib, *buffer_exe;
59467+
59468+ if (elf_p.p_flags & PF_NOMPROTECT)
59469+ msg_mprotect = "MPROTECT disabled";
59470+
59471+#ifdef CONFIG_PAX_EMUTRAMP
59472+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59473+ msg_emutramp = "EMUTRAMP enabled";
59474+#endif
59475+
59476+ if (!msg_mprotect[0] && !msg_emutramp[0])
59477+ continue;
59478+
59479+ if (!printk_ratelimit())
59480+ continue;
59481+
59482+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59483+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59484+ if (buffer_lib && buffer_exe) {
59485+ char *path_lib, *path_exe;
59486+
59487+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59488+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59489+
59490+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59491+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59492+
59493+ }
59494+ free_page((unsigned long)buffer_exe);
59495+ free_page((unsigned long)buffer_lib);
59496+ continue;
59497+ }
59498+#endif
59499+
59500+ }
59501+ }
59502+}
59503+#endif
59504+
59505+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59506+
59507+extern int grsec_enable_log_rwxmaps;
59508+
59509+static void elf_handle_mmap(struct file *file)
59510+{
59511+ struct elfhdr elf_h;
59512+ struct elf_phdr elf_p;
59513+ unsigned long i;
59514+
59515+ if (!grsec_enable_log_rwxmaps)
59516+ return;
59517+
59518+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59519+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59520+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59521+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59522+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59523+ return;
59524+
59525+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59526+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59527+ return;
59528+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59529+ gr_log_ptgnustack(file);
59530+ }
59531+}
59532+#endif
59533+
59534 static int __init init_elf_binfmt(void)
59535 {
59536 register_binfmt(&elf_format);
59537diff --git a/fs/block_dev.c b/fs/block_dev.c
59538index 975266b..c3d1856 100644
59539--- a/fs/block_dev.c
59540+++ b/fs/block_dev.c
59541@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59542 else if (bdev->bd_contains == bdev)
59543 return true; /* is a whole device which isn't held */
59544
59545- else if (whole->bd_holder == bd_may_claim)
59546+ else if (whole->bd_holder == (void *)bd_may_claim)
59547 return true; /* is a partition of a device that is being partitioned */
59548 else if (whole->bd_holder != NULL)
59549 return false; /* is a partition of a held device */
59550diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59551index 6d67f32..8f33187 100644
59552--- a/fs/btrfs/ctree.c
59553+++ b/fs/btrfs/ctree.c
59554@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59555 free_extent_buffer(buf);
59556 add_root_to_dirty_list(root);
59557 } else {
59558- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59559- parent_start = parent->start;
59560- else
59561+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59562+ if (parent)
59563+ parent_start = parent->start;
59564+ else
59565+ parent_start = 0;
59566+ } else
59567 parent_start = 0;
59568
59569 WARN_ON(trans->transid != btrfs_header_generation(parent));
59570diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59571index 82f0c7c..dff78a8 100644
59572--- a/fs/btrfs/delayed-inode.c
59573+++ b/fs/btrfs/delayed-inode.c
59574@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59575
59576 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59577 {
59578- int seq = atomic_inc_return(&delayed_root->items_seq);
59579+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59580 if ((atomic_dec_return(&delayed_root->items) <
59581 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59582 waitqueue_active(&delayed_root->wait))
59583@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59584
59585 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59586 {
59587- int val = atomic_read(&delayed_root->items_seq);
59588+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59589
59590 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59591 return 1;
59592@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59593 int seq;
59594 int ret;
59595
59596- seq = atomic_read(&delayed_root->items_seq);
59597+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59598
59599 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59600 if (ret)
59601diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59602index f70119f..ab5894d 100644
59603--- a/fs/btrfs/delayed-inode.h
59604+++ b/fs/btrfs/delayed-inode.h
59605@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59606 */
59607 struct list_head prepare_list;
59608 atomic_t items; /* for delayed items */
59609- atomic_t items_seq; /* for delayed items */
59610+ atomic_unchecked_t items_seq; /* for delayed items */
59611 int nodes; /* for delayed nodes */
59612 wait_queue_head_t wait;
59613 };
59614@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59615 struct btrfs_delayed_root *delayed_root)
59616 {
59617 atomic_set(&delayed_root->items, 0);
59618- atomic_set(&delayed_root->items_seq, 0);
59619+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59620 delayed_root->nodes = 0;
59621 spin_lock_init(&delayed_root->lock);
59622 init_waitqueue_head(&delayed_root->wait);
59623diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59624index 05fef19..f3774b8 100644
59625--- a/fs/btrfs/super.c
59626+++ b/fs/btrfs/super.c
59627@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59628 function, line, errstr);
59629 return;
59630 }
59631- ACCESS_ONCE(trans->transaction->aborted) = errno;
59632+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59633 /* Wake up anybody who may be waiting on this transaction */
59634 wake_up(&root->fs_info->transaction_wait);
59635 wake_up(&root->fs_info->transaction_blocked_wait);
59636diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59637index 94edb0a..e94dc93 100644
59638--- a/fs/btrfs/sysfs.c
59639+++ b/fs/btrfs/sysfs.c
59640@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59641 for (set = 0; set < FEAT_MAX; set++) {
59642 int i;
59643 struct attribute *attrs[2];
59644- struct attribute_group agroup = {
59645+ attribute_group_no_const agroup = {
59646 .name = "features",
59647 .attrs = attrs,
59648 };
59649diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59650index 2299bfd..4098e72 100644
59651--- a/fs/btrfs/tests/free-space-tests.c
59652+++ b/fs/btrfs/tests/free-space-tests.c
59653@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59654 * extent entry.
59655 */
59656 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59657- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59658+ pax_open_kernel();
59659+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59660+ pax_close_kernel();
59661
59662 /*
59663 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59664@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59665 if (ret)
59666 return ret;
59667
59668- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59669+ pax_open_kernel();
59670+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59671+ pax_close_kernel();
59672 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59673
59674 return 0;
59675diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59676index 154990c..d0cf699 100644
59677--- a/fs/btrfs/tree-log.h
59678+++ b/fs/btrfs/tree-log.h
59679@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59680 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59681 struct btrfs_trans_handle *trans)
59682 {
59683- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59684+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59685 }
59686
59687 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59688diff --git a/fs/buffer.c b/fs/buffer.c
59689index 20805db..2e8fc69 100644
59690--- a/fs/buffer.c
59691+++ b/fs/buffer.c
59692@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59693 bh_cachep = kmem_cache_create("buffer_head",
59694 sizeof(struct buffer_head), 0,
59695 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59696- SLAB_MEM_SPREAD),
59697+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59698 NULL);
59699
59700 /*
59701diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59702index fbb08e9..0fda764 100644
59703--- a/fs/cachefiles/bind.c
59704+++ b/fs/cachefiles/bind.c
59705@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59706 args);
59707
59708 /* start by checking things over */
59709- ASSERT(cache->fstop_percent >= 0 &&
59710- cache->fstop_percent < cache->fcull_percent &&
59711+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59712 cache->fcull_percent < cache->frun_percent &&
59713 cache->frun_percent < 100);
59714
59715- ASSERT(cache->bstop_percent >= 0 &&
59716- cache->bstop_percent < cache->bcull_percent &&
59717+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59718 cache->bcull_percent < cache->brun_percent &&
59719 cache->brun_percent < 100);
59720
59721diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59722index f601def..b2cf704 100644
59723--- a/fs/cachefiles/daemon.c
59724+++ b/fs/cachefiles/daemon.c
59725@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59726 if (n > buflen)
59727 return -EMSGSIZE;
59728
59729- if (copy_to_user(_buffer, buffer, n) != 0)
59730+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59731 return -EFAULT;
59732
59733 return n;
59734@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59735 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59736 return -EIO;
59737
59738- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59739+ if (datalen > PAGE_SIZE - 1)
59740 return -EOPNOTSUPP;
59741
59742 /* drag the command string into the kernel so we can parse it */
59743@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59744 if (args[0] != '%' || args[1] != '\0')
59745 return -EINVAL;
59746
59747- if (fstop < 0 || fstop >= cache->fcull_percent)
59748+ if (fstop >= cache->fcull_percent)
59749 return cachefiles_daemon_range_error(cache, args);
59750
59751 cache->fstop_percent = fstop;
59752@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59753 if (args[0] != '%' || args[1] != '\0')
59754 return -EINVAL;
59755
59756- if (bstop < 0 || bstop >= cache->bcull_percent)
59757+ if (bstop >= cache->bcull_percent)
59758 return cachefiles_daemon_range_error(cache, args);
59759
59760 cache->bstop_percent = bstop;
59761diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59762index 8c52472..c4e3a69 100644
59763--- a/fs/cachefiles/internal.h
59764+++ b/fs/cachefiles/internal.h
59765@@ -66,7 +66,7 @@ struct cachefiles_cache {
59766 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59767 struct rb_root active_nodes; /* active nodes (can't be culled) */
59768 rwlock_t active_lock; /* lock for active_nodes */
59769- atomic_t gravecounter; /* graveyard uniquifier */
59770+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59771 unsigned frun_percent; /* when to stop culling (% files) */
59772 unsigned fcull_percent; /* when to start culling (% files) */
59773 unsigned fstop_percent; /* when to stop allocating (% files) */
59774@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59775 * proc.c
59776 */
59777 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59778-extern atomic_t cachefiles_lookup_histogram[HZ];
59779-extern atomic_t cachefiles_mkdir_histogram[HZ];
59780-extern atomic_t cachefiles_create_histogram[HZ];
59781+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59782+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59783+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59784
59785 extern int __init cachefiles_proc_init(void);
59786 extern void cachefiles_proc_cleanup(void);
59787 static inline
59788-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59789+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59790 {
59791 unsigned long jif = jiffies - start_jif;
59792 if (jif >= HZ)
59793 jif = HZ - 1;
59794- atomic_inc(&histogram[jif]);
59795+ atomic_inc_unchecked(&histogram[jif]);
59796 }
59797
59798 #else
59799diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59800index 1e51714..411eded 100644
59801--- a/fs/cachefiles/namei.c
59802+++ b/fs/cachefiles/namei.c
59803@@ -309,7 +309,7 @@ try_again:
59804 /* first step is to make up a grave dentry in the graveyard */
59805 sprintf(nbuffer, "%08x%08x",
59806 (uint32_t) get_seconds(),
59807- (uint32_t) atomic_inc_return(&cache->gravecounter));
59808+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59809
59810 /* do the multiway lock magic */
59811 trap = lock_rename(cache->graveyard, dir);
59812diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59813index eccd339..4c1d995 100644
59814--- a/fs/cachefiles/proc.c
59815+++ b/fs/cachefiles/proc.c
59816@@ -14,9 +14,9 @@
59817 #include <linux/seq_file.h>
59818 #include "internal.h"
59819
59820-atomic_t cachefiles_lookup_histogram[HZ];
59821-atomic_t cachefiles_mkdir_histogram[HZ];
59822-atomic_t cachefiles_create_histogram[HZ];
59823+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59824+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59825+atomic_unchecked_t cachefiles_create_histogram[HZ];
59826
59827 /*
59828 * display the latency histogram
59829@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59830 return 0;
59831 default:
59832 index = (unsigned long) v - 3;
59833- x = atomic_read(&cachefiles_lookup_histogram[index]);
59834- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59835- z = atomic_read(&cachefiles_create_histogram[index]);
59836+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59837+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59838+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59839 if (x == 0 && y == 0 && z == 0)
59840 return 0;
59841
59842diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59843index 83e9976..bfd1eee 100644
59844--- a/fs/ceph/dir.c
59845+++ b/fs/ceph/dir.c
59846@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59847 struct dentry *dentry, *last;
59848 struct ceph_dentry_info *di;
59849 int err = 0;
59850+ char d_name[DNAME_INLINE_LEN];
59851+ const unsigned char *name;
59852
59853 /* claim ref on last dentry we returned */
59854 last = fi->dentry;
59855@@ -190,7 +192,12 @@ more:
59856
59857 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59858 dentry, dentry, dentry->d_inode);
59859- if (!dir_emit(ctx, dentry->d_name.name,
59860+ name = dentry->d_name.name;
59861+ if (name == dentry->d_iname) {
59862+ memcpy(d_name, name, dentry->d_name.len);
59863+ name = d_name;
59864+ }
59865+ if (!dir_emit(ctx, name,
59866 dentry->d_name.len,
59867 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59868 dentry->d_inode->i_mode >> 12)) {
59869@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59870 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59871 struct ceph_mds_client *mdsc = fsc->mdsc;
59872 unsigned frag = fpos_frag(ctx->pos);
59873- int off = fpos_off(ctx->pos);
59874+ unsigned int off = fpos_off(ctx->pos);
59875 int err;
59876 u32 ftype;
59877 struct ceph_mds_reply_info_parsed *rinfo;
59878diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59879index a63997b..ddc0577 100644
59880--- a/fs/ceph/super.c
59881+++ b/fs/ceph/super.c
59882@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59883 /*
59884 * construct our own bdi so we can control readahead, etc.
59885 */
59886-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59887+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59888
59889 static int ceph_register_bdi(struct super_block *sb,
59890 struct ceph_fs_client *fsc)
59891@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
59892 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
59893
59894 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59895- atomic_long_inc_return(&bdi_seq));
59896+ atomic_long_inc_return_unchecked(&bdi_seq));
59897 if (!err)
59898 sb->s_bdi = &fsc->backing_dev_info;
59899 return err;
59900diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59901index 7febcf2..62a5721 100644
59902--- a/fs/cifs/cifs_debug.c
59903+++ b/fs/cifs/cifs_debug.c
59904@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59905
59906 if (strtobool(&c, &bv) == 0) {
59907 #ifdef CONFIG_CIFS_STATS2
59908- atomic_set(&totBufAllocCount, 0);
59909- atomic_set(&totSmBufAllocCount, 0);
59910+ atomic_set_unchecked(&totBufAllocCount, 0);
59911+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59912 #endif /* CONFIG_CIFS_STATS2 */
59913 spin_lock(&cifs_tcp_ses_lock);
59914 list_for_each(tmp1, &cifs_tcp_ses_list) {
59915@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59916 tcon = list_entry(tmp3,
59917 struct cifs_tcon,
59918 tcon_list);
59919- atomic_set(&tcon->num_smbs_sent, 0);
59920+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59921 if (server->ops->clear_stats)
59922 server->ops->clear_stats(tcon);
59923 }
59924@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59925 smBufAllocCount.counter, cifs_min_small);
59926 #ifdef CONFIG_CIFS_STATS2
59927 seq_printf(m, "Total Large %d Small %d Allocations\n",
59928- atomic_read(&totBufAllocCount),
59929- atomic_read(&totSmBufAllocCount));
59930+ atomic_read_unchecked(&totBufAllocCount),
59931+ atomic_read_unchecked(&totSmBufAllocCount));
59932 #endif /* CONFIG_CIFS_STATS2 */
59933
59934 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59935@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59936 if (tcon->need_reconnect)
59937 seq_puts(m, "\tDISCONNECTED ");
59938 seq_printf(m, "\nSMBs: %d",
59939- atomic_read(&tcon->num_smbs_sent));
59940+ atomic_read_unchecked(&tcon->num_smbs_sent));
59941 if (server->ops->print_stats)
59942 server->ops->print_stats(m, tcon);
59943 }
59944diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59945index d72fe37..ded5511 100644
59946--- a/fs/cifs/cifsfs.c
59947+++ b/fs/cifs/cifsfs.c
59948@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59949 */
59950 cifs_req_cachep = kmem_cache_create("cifs_request",
59951 CIFSMaxBufSize + max_hdr_size, 0,
59952- SLAB_HWCACHE_ALIGN, NULL);
59953+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59954 if (cifs_req_cachep == NULL)
59955 return -ENOMEM;
59956
59957@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59958 efficient to alloc 1 per page off the slab compared to 17K (5page)
59959 alloc of large cifs buffers even when page debugging is on */
59960 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59961- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59962+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59963 NULL);
59964 if (cifs_sm_req_cachep == NULL) {
59965 mempool_destroy(cifs_req_poolp);
59966@@ -1204,8 +1204,8 @@ init_cifs(void)
59967 atomic_set(&bufAllocCount, 0);
59968 atomic_set(&smBufAllocCount, 0);
59969 #ifdef CONFIG_CIFS_STATS2
59970- atomic_set(&totBufAllocCount, 0);
59971- atomic_set(&totSmBufAllocCount, 0);
59972+ atomic_set_unchecked(&totBufAllocCount, 0);
59973+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59974 #endif /* CONFIG_CIFS_STATS2 */
59975
59976 atomic_set(&midCount, 0);
59977diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59978index 22b289a..bbbba08 100644
59979--- a/fs/cifs/cifsglob.h
59980+++ b/fs/cifs/cifsglob.h
59981@@ -823,35 +823,35 @@ struct cifs_tcon {
59982 __u16 Flags; /* optional support bits */
59983 enum statusEnum tidStatus;
59984 #ifdef CONFIG_CIFS_STATS
59985- atomic_t num_smbs_sent;
59986+ atomic_unchecked_t num_smbs_sent;
59987 union {
59988 struct {
59989- atomic_t num_writes;
59990- atomic_t num_reads;
59991- atomic_t num_flushes;
59992- atomic_t num_oplock_brks;
59993- atomic_t num_opens;
59994- atomic_t num_closes;
59995- atomic_t num_deletes;
59996- atomic_t num_mkdirs;
59997- atomic_t num_posixopens;
59998- atomic_t num_posixmkdirs;
59999- atomic_t num_rmdirs;
60000- atomic_t num_renames;
60001- atomic_t num_t2renames;
60002- atomic_t num_ffirst;
60003- atomic_t num_fnext;
60004- atomic_t num_fclose;
60005- atomic_t num_hardlinks;
60006- atomic_t num_symlinks;
60007- atomic_t num_locks;
60008- atomic_t num_acl_get;
60009- atomic_t num_acl_set;
60010+ atomic_unchecked_t num_writes;
60011+ atomic_unchecked_t num_reads;
60012+ atomic_unchecked_t num_flushes;
60013+ atomic_unchecked_t num_oplock_brks;
60014+ atomic_unchecked_t num_opens;
60015+ atomic_unchecked_t num_closes;
60016+ atomic_unchecked_t num_deletes;
60017+ atomic_unchecked_t num_mkdirs;
60018+ atomic_unchecked_t num_posixopens;
60019+ atomic_unchecked_t num_posixmkdirs;
60020+ atomic_unchecked_t num_rmdirs;
60021+ atomic_unchecked_t num_renames;
60022+ atomic_unchecked_t num_t2renames;
60023+ atomic_unchecked_t num_ffirst;
60024+ atomic_unchecked_t num_fnext;
60025+ atomic_unchecked_t num_fclose;
60026+ atomic_unchecked_t num_hardlinks;
60027+ atomic_unchecked_t num_symlinks;
60028+ atomic_unchecked_t num_locks;
60029+ atomic_unchecked_t num_acl_get;
60030+ atomic_unchecked_t num_acl_set;
60031 } cifs_stats;
60032 #ifdef CONFIG_CIFS_SMB2
60033 struct {
60034- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60035- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60036+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60037+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60038 } smb2_stats;
60039 #endif /* CONFIG_CIFS_SMB2 */
60040 } stats;
60041@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
60042 }
60043
60044 #ifdef CONFIG_CIFS_STATS
60045-#define cifs_stats_inc atomic_inc
60046+#define cifs_stats_inc atomic_inc_unchecked
60047
60048 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60049 unsigned int bytes)
60050@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60051 /* Various Debug counters */
60052 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60053 #ifdef CONFIG_CIFS_STATS2
60054-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60055-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60056+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60057+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60058 #endif
60059 GLOBAL_EXTERN atomic_t smBufAllocCount;
60060 GLOBAL_EXTERN atomic_t midCount;
60061diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60062index ca30c39..570fb94 100644
60063--- a/fs/cifs/file.c
60064+++ b/fs/cifs/file.c
60065@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
60066 index = mapping->writeback_index; /* Start from prev offset */
60067 end = -1;
60068 } else {
60069- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60070- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60071- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60072+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60073 range_whole = true;
60074+ index = 0;
60075+ end = ULONG_MAX;
60076+ } else {
60077+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60078+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60079+ }
60080 scanned = true;
60081 }
60082 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60083diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60084index 3379463..3af418a 100644
60085--- a/fs/cifs/misc.c
60086+++ b/fs/cifs/misc.c
60087@@ -170,7 +170,7 @@ cifs_buf_get(void)
60088 memset(ret_buf, 0, buf_size + 3);
60089 atomic_inc(&bufAllocCount);
60090 #ifdef CONFIG_CIFS_STATS2
60091- atomic_inc(&totBufAllocCount);
60092+ atomic_inc_unchecked(&totBufAllocCount);
60093 #endif /* CONFIG_CIFS_STATS2 */
60094 }
60095
60096@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60097 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60098 atomic_inc(&smBufAllocCount);
60099 #ifdef CONFIG_CIFS_STATS2
60100- atomic_inc(&totSmBufAllocCount);
60101+ atomic_inc_unchecked(&totSmBufAllocCount);
60102 #endif /* CONFIG_CIFS_STATS2 */
60103
60104 }
60105diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60106index d297903..1cb7516 100644
60107--- a/fs/cifs/smb1ops.c
60108+++ b/fs/cifs/smb1ops.c
60109@@ -622,27 +622,27 @@ static void
60110 cifs_clear_stats(struct cifs_tcon *tcon)
60111 {
60112 #ifdef CONFIG_CIFS_STATS
60113- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60114- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60115- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60116- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60117- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60118- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60119- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60120- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60121- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60122- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60123- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60124- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60125- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60126- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60127- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60128- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60129- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60130- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60131- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60132- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60133- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60134+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60135+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60136+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60137+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60138+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60139+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60140+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60141+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60142+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60143+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60144+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60145+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60146+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60147+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60148+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60149+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60150+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60151+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60152+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60153+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60154+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60155 #endif
60156 }
60157
60158@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60159 {
60160 #ifdef CONFIG_CIFS_STATS
60161 seq_printf(m, " Oplocks breaks: %d",
60162- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60163+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60164 seq_printf(m, "\nReads: %d Bytes: %llu",
60165- atomic_read(&tcon->stats.cifs_stats.num_reads),
60166+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60167 (long long)(tcon->bytes_read));
60168 seq_printf(m, "\nWrites: %d Bytes: %llu",
60169- atomic_read(&tcon->stats.cifs_stats.num_writes),
60170+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60171 (long long)(tcon->bytes_written));
60172 seq_printf(m, "\nFlushes: %d",
60173- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60174+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60175 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60176- atomic_read(&tcon->stats.cifs_stats.num_locks),
60177- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60178- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60179+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60180+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60181+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60182 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60183- atomic_read(&tcon->stats.cifs_stats.num_opens),
60184- atomic_read(&tcon->stats.cifs_stats.num_closes),
60185- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60186+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60187+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60188+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60189 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60190- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60191- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60192+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60193+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60194 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60195- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60196- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60197+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60198+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60199 seq_printf(m, "\nRenames: %d T2 Renames %d",
60200- atomic_read(&tcon->stats.cifs_stats.num_renames),
60201- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60202+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60203+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60204 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60205- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
60206- atomic_read(&tcon->stats.cifs_stats.num_fnext),
60207- atomic_read(&tcon->stats.cifs_stats.num_fclose));
60208+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
60209+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
60210+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
60211 #endif
60212 }
60213
60214diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
60215index eab05e1..ffe5ea4 100644
60216--- a/fs/cifs/smb2ops.c
60217+++ b/fs/cifs/smb2ops.c
60218@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
60219 #ifdef CONFIG_CIFS_STATS
60220 int i;
60221 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
60222- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60223- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60224+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60225+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60226 }
60227 #endif
60228 }
60229@@ -459,65 +459,65 @@ static void
60230 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60231 {
60232 #ifdef CONFIG_CIFS_STATS
60233- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60234- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60235+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60236+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60237 seq_printf(m, "\nNegotiates: %d sent %d failed",
60238- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
60239- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
60240+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
60241+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
60242 seq_printf(m, "\nSessionSetups: %d sent %d failed",
60243- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
60244- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
60245+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
60246+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
60247 seq_printf(m, "\nLogoffs: %d sent %d failed",
60248- atomic_read(&sent[SMB2_LOGOFF_HE]),
60249- atomic_read(&failed[SMB2_LOGOFF_HE]));
60250+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
60251+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
60252 seq_printf(m, "\nTreeConnects: %d sent %d failed",
60253- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
60254- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
60255+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
60256+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
60257 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
60258- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
60259- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
60260+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60261+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60262 seq_printf(m, "\nCreates: %d sent %d failed",
60263- atomic_read(&sent[SMB2_CREATE_HE]),
60264- atomic_read(&failed[SMB2_CREATE_HE]));
60265+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60266+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60267 seq_printf(m, "\nCloses: %d sent %d failed",
60268- atomic_read(&sent[SMB2_CLOSE_HE]),
60269- atomic_read(&failed[SMB2_CLOSE_HE]));
60270+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60271+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60272 seq_printf(m, "\nFlushes: %d sent %d failed",
60273- atomic_read(&sent[SMB2_FLUSH_HE]),
60274- atomic_read(&failed[SMB2_FLUSH_HE]));
60275+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60276+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60277 seq_printf(m, "\nReads: %d sent %d failed",
60278- atomic_read(&sent[SMB2_READ_HE]),
60279- atomic_read(&failed[SMB2_READ_HE]));
60280+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60281+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60282 seq_printf(m, "\nWrites: %d sent %d failed",
60283- atomic_read(&sent[SMB2_WRITE_HE]),
60284- atomic_read(&failed[SMB2_WRITE_HE]));
60285+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60286+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60287 seq_printf(m, "\nLocks: %d sent %d failed",
60288- atomic_read(&sent[SMB2_LOCK_HE]),
60289- atomic_read(&failed[SMB2_LOCK_HE]));
60290+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60291+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60292 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60293- atomic_read(&sent[SMB2_IOCTL_HE]),
60294- atomic_read(&failed[SMB2_IOCTL_HE]));
60295+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60296+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60297 seq_printf(m, "\nCancels: %d sent %d failed",
60298- atomic_read(&sent[SMB2_CANCEL_HE]),
60299- atomic_read(&failed[SMB2_CANCEL_HE]));
60300+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60301+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60302 seq_printf(m, "\nEchos: %d sent %d failed",
60303- atomic_read(&sent[SMB2_ECHO_HE]),
60304- atomic_read(&failed[SMB2_ECHO_HE]));
60305+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60306+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60307 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60308- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60309- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60310+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60311+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60312 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60313- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60314- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60315+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60316+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60317 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60318- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60319- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60320+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60321+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60322 seq_printf(m, "\nSetInfos: %d sent %d failed",
60323- atomic_read(&sent[SMB2_SET_INFO_HE]),
60324- atomic_read(&failed[SMB2_SET_INFO_HE]));
60325+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60326+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60327 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60328- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60329- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60330+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60331+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60332 #endif
60333 }
60334
60335diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60336index 65cd7a8..3518676 100644
60337--- a/fs/cifs/smb2pdu.c
60338+++ b/fs/cifs/smb2pdu.c
60339@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60340 default:
60341 cifs_dbg(VFS, "info level %u isn't supported\n",
60342 srch_inf->info_level);
60343- rc = -EINVAL;
60344- goto qdir_exit;
60345+ return -EINVAL;
60346 }
60347
60348 req->FileIndex = cpu_to_le32(index);
60349diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60350index 46ee6f2..89a9e7f 100644
60351--- a/fs/coda/cache.c
60352+++ b/fs/coda/cache.c
60353@@ -24,7 +24,7 @@
60354 #include "coda_linux.h"
60355 #include "coda_cache.h"
60356
60357-static atomic_t permission_epoch = ATOMIC_INIT(0);
60358+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60359
60360 /* replace or extend an acl cache hit */
60361 void coda_cache_enter(struct inode *inode, int mask)
60362@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60363 struct coda_inode_info *cii = ITOC(inode);
60364
60365 spin_lock(&cii->c_lock);
60366- cii->c_cached_epoch = atomic_read(&permission_epoch);
60367+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60368 if (!uid_eq(cii->c_uid, current_fsuid())) {
60369 cii->c_uid = current_fsuid();
60370 cii->c_cached_perm = mask;
60371@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60372 {
60373 struct coda_inode_info *cii = ITOC(inode);
60374 spin_lock(&cii->c_lock);
60375- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60376+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60377 spin_unlock(&cii->c_lock);
60378 }
60379
60380 /* remove all acl caches */
60381 void coda_cache_clear_all(struct super_block *sb)
60382 {
60383- atomic_inc(&permission_epoch);
60384+ atomic_inc_unchecked(&permission_epoch);
60385 }
60386
60387
60388@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60389 spin_lock(&cii->c_lock);
60390 hit = (mask & cii->c_cached_perm) == mask &&
60391 uid_eq(cii->c_uid, current_fsuid()) &&
60392- cii->c_cached_epoch == atomic_read(&permission_epoch);
60393+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60394 spin_unlock(&cii->c_lock);
60395
60396 return hit;
60397diff --git a/fs/compat.c b/fs/compat.c
60398index 6fd272d..dd34ba2 100644
60399--- a/fs/compat.c
60400+++ b/fs/compat.c
60401@@ -54,7 +54,7 @@
60402 #include <asm/ioctls.h>
60403 #include "internal.h"
60404
60405-int compat_log = 1;
60406+int compat_log = 0;
60407
60408 int compat_printk(const char *fmt, ...)
60409 {
60410@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60411
60412 set_fs(KERNEL_DS);
60413 /* The __user pointer cast is valid because of the set_fs() */
60414- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60415+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60416 set_fs(oldfs);
60417 /* truncating is ok because it's a user address */
60418 if (!ret)
60419@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60420 goto out;
60421
60422 ret = -EINVAL;
60423- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60424+ if (nr_segs > UIO_MAXIOV)
60425 goto out;
60426 if (nr_segs > fast_segs) {
60427 ret = -ENOMEM;
60428@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60429 struct compat_readdir_callback {
60430 struct dir_context ctx;
60431 struct compat_old_linux_dirent __user *dirent;
60432+ struct file * file;
60433 int result;
60434 };
60435
60436@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60437 buf->result = -EOVERFLOW;
60438 return -EOVERFLOW;
60439 }
60440+
60441+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60442+ return 0;
60443+
60444 buf->result++;
60445 dirent = buf->dirent;
60446 if (!access_ok(VERIFY_WRITE, dirent,
60447@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60448 if (!f.file)
60449 return -EBADF;
60450
60451+ buf.file = f.file;
60452 error = iterate_dir(f.file, &buf.ctx);
60453 if (buf.result)
60454 error = buf.result;
60455@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60456 struct dir_context ctx;
60457 struct compat_linux_dirent __user *current_dir;
60458 struct compat_linux_dirent __user *previous;
60459+ struct file * file;
60460 int count;
60461 int error;
60462 };
60463@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60464 buf->error = -EOVERFLOW;
60465 return -EOVERFLOW;
60466 }
60467+
60468+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60469+ return 0;
60470+
60471 dirent = buf->previous;
60472 if (dirent) {
60473 if (__put_user(offset, &dirent->d_off))
60474@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60475 if (!f.file)
60476 return -EBADF;
60477
60478+ buf.file = f.file;
60479 error = iterate_dir(f.file, &buf.ctx);
60480 if (error >= 0)
60481 error = buf.error;
60482@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60483 struct dir_context ctx;
60484 struct linux_dirent64 __user *current_dir;
60485 struct linux_dirent64 __user *previous;
60486+ struct file * file;
60487 int count;
60488 int error;
60489 };
60490@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60491 buf->error = -EINVAL; /* only used if we fail.. */
60492 if (reclen > buf->count)
60493 return -EINVAL;
60494+
60495+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60496+ return 0;
60497+
60498 dirent = buf->previous;
60499
60500 if (dirent) {
60501@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60502 if (!f.file)
60503 return -EBADF;
60504
60505+ buf.file = f.file;
60506 error = iterate_dir(f.file, &buf.ctx);
60507 if (error >= 0)
60508 error = buf.error;
60509diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60510index 4d24d17..4f8c09e 100644
60511--- a/fs/compat_binfmt_elf.c
60512+++ b/fs/compat_binfmt_elf.c
60513@@ -30,11 +30,13 @@
60514 #undef elf_phdr
60515 #undef elf_shdr
60516 #undef elf_note
60517+#undef elf_dyn
60518 #undef elf_addr_t
60519 #define elfhdr elf32_hdr
60520 #define elf_phdr elf32_phdr
60521 #define elf_shdr elf32_shdr
60522 #define elf_note elf32_note
60523+#define elf_dyn Elf32_Dyn
60524 #define elf_addr_t Elf32_Addr
60525
60526 /*
60527diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60528index afec645..9c65620 100644
60529--- a/fs/compat_ioctl.c
60530+++ b/fs/compat_ioctl.c
60531@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60532 return -EFAULT;
60533 if (__get_user(udata, &ss32->iomem_base))
60534 return -EFAULT;
60535- ss.iomem_base = compat_ptr(udata);
60536+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60537 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60538 __get_user(ss.port_high, &ss32->port_high))
60539 return -EFAULT;
60540@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60541 for (i = 0; i < nmsgs; i++) {
60542 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60543 return -EFAULT;
60544- if (get_user(datap, &umsgs[i].buf) ||
60545- put_user(compat_ptr(datap), &tmsgs[i].buf))
60546+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60547+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60548 return -EFAULT;
60549 }
60550 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60551@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60552 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60553 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60554 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60555- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60556+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60557 return -EFAULT;
60558
60559 return ioctl_preallocate(file, p);
60560@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60561 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60562 {
60563 unsigned int a, b;
60564- a = *(unsigned int *)p;
60565- b = *(unsigned int *)q;
60566+ a = *(const unsigned int *)p;
60567+ b = *(const unsigned int *)q;
60568 if (a > b)
60569 return 1;
60570 if (a < b)
60571diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60572index cf0db00..c7f70e8 100644
60573--- a/fs/configfs/dir.c
60574+++ b/fs/configfs/dir.c
60575@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60576 }
60577 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60578 struct configfs_dirent *next;
60579- const char *name;
60580+ const unsigned char * name;
60581+ char d_name[sizeof(next->s_dentry->d_iname)];
60582 int len;
60583 struct inode *inode = NULL;
60584
60585@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60586 continue;
60587
60588 name = configfs_get_name(next);
60589- len = strlen(name);
60590+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60591+ len = next->s_dentry->d_name.len;
60592+ memcpy(d_name, name, len);
60593+ name = d_name;
60594+ } else
60595+ len = strlen(name);
60596
60597 /*
60598 * We'll have a dentry and an inode for
60599diff --git a/fs/coredump.c b/fs/coredump.c
60600index f319926..55f4ec2 100644
60601--- a/fs/coredump.c
60602+++ b/fs/coredump.c
60603@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60604 struct pipe_inode_info *pipe = file->private_data;
60605
60606 pipe_lock(pipe);
60607- pipe->readers++;
60608- pipe->writers--;
60609+ atomic_inc(&pipe->readers);
60610+ atomic_dec(&pipe->writers);
60611 wake_up_interruptible_sync(&pipe->wait);
60612 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60613 pipe_unlock(pipe);
60614@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60615 * We actually want wait_event_freezable() but then we need
60616 * to clear TIF_SIGPENDING and improve dump_interrupted().
60617 */
60618- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60619+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60620
60621 pipe_lock(pipe);
60622- pipe->readers--;
60623- pipe->writers++;
60624+ atomic_dec(&pipe->readers);
60625+ atomic_inc(&pipe->writers);
60626 pipe_unlock(pipe);
60627 }
60628
60629@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60630 struct files_struct *displaced;
60631 bool need_nonrelative = false;
60632 bool core_dumped = false;
60633- static atomic_t core_dump_count = ATOMIC_INIT(0);
60634+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60635+ long signr = siginfo->si_signo;
60636+ int dumpable;
60637 struct coredump_params cprm = {
60638 .siginfo = siginfo,
60639 .regs = signal_pt_regs(),
60640@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60641 .mm_flags = mm->flags,
60642 };
60643
60644- audit_core_dumps(siginfo->si_signo);
60645+ audit_core_dumps(signr);
60646+
60647+ dumpable = __get_dumpable(cprm.mm_flags);
60648+
60649+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60650+ gr_handle_brute_attach(dumpable);
60651
60652 binfmt = mm->binfmt;
60653 if (!binfmt || !binfmt->core_dump)
60654 goto fail;
60655- if (!__get_dumpable(cprm.mm_flags))
60656+ if (!dumpable)
60657 goto fail;
60658
60659 cred = prepare_creds();
60660@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60661 need_nonrelative = true;
60662 }
60663
60664- retval = coredump_wait(siginfo->si_signo, &core_state);
60665+ retval = coredump_wait(signr, &core_state);
60666 if (retval < 0)
60667 goto fail_creds;
60668
60669@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60670 }
60671 cprm.limit = RLIM_INFINITY;
60672
60673- dump_count = atomic_inc_return(&core_dump_count);
60674+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60675 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60676 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60677 task_tgid_vnr(current), current->comm);
60678@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60679 } else {
60680 struct inode *inode;
60681
60682+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60683+
60684 if (cprm.limit < binfmt->min_coredump)
60685 goto fail_unlock;
60686
60687@@ -681,7 +690,7 @@ close_fail:
60688 filp_close(cprm.file, NULL);
60689 fail_dropcount:
60690 if (ispipe)
60691- atomic_dec(&core_dump_count);
60692+ atomic_dec_unchecked(&core_dump_count);
60693 fail_unlock:
60694 kfree(cn.corename);
60695 coredump_finish(mm, core_dumped);
60696@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60697 struct file *file = cprm->file;
60698 loff_t pos = file->f_pos;
60699 ssize_t n;
60700+
60701+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60702 if (cprm->written + nr > cprm->limit)
60703 return 0;
60704 while (nr) {
60705diff --git a/fs/dcache.c b/fs/dcache.c
60706index c71e373..5c1f656 100644
60707--- a/fs/dcache.c
60708+++ b/fs/dcache.c
60709@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
60710 * dentry_iput drops the locks, at which point nobody (except
60711 * transient RCU lookups) can reach this dentry.
60712 */
60713- BUG_ON(dentry->d_lockref.count > 0);
60714+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
60715 this_cpu_dec(nr_dentry);
60716 if (dentry->d_op && dentry->d_op->d_release)
60717 dentry->d_op->d_release(dentry);
60718@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60719 struct dentry *parent = dentry->d_parent;
60720 if (IS_ROOT(dentry))
60721 return NULL;
60722- if (unlikely(dentry->d_lockref.count < 0))
60723+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
60724 return NULL;
60725 if (likely(spin_trylock(&parent->d_lock)))
60726 return parent;
60727@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
60728 */
60729 if (unlikely(ret < 0)) {
60730 spin_lock(&dentry->d_lock);
60731- if (dentry->d_lockref.count > 1) {
60732- dentry->d_lockref.count--;
60733+ if (__lockref_read(&dentry->d_lockref) > 1) {
60734+ __lockref_dec(&dentry->d_lockref);
60735 spin_unlock(&dentry->d_lock);
60736 return 1;
60737 }
60738@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
60739 * else could have killed it and marked it dead. Either way, we
60740 * don't need to do anything else.
60741 */
60742- if (dentry->d_lockref.count) {
60743+ if (__lockref_read(&dentry->d_lockref)) {
60744 spin_unlock(&dentry->d_lock);
60745 return 1;
60746 }
60747@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
60748 * lock, and we just tested that it was zero, so we can just
60749 * set it to 1.
60750 */
60751- dentry->d_lockref.count = 1;
60752+ __lockref_set(&dentry->d_lockref, 1);
60753 return 0;
60754 }
60755
60756@@ -751,7 +751,7 @@ repeat:
60757 dentry->d_flags |= DCACHE_REFERENCED;
60758 dentry_lru_add(dentry);
60759
60760- dentry->d_lockref.count--;
60761+ __lockref_dec(&dentry->d_lockref);
60762 spin_unlock(&dentry->d_lock);
60763 return;
60764
60765@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
60766 /* This must be called with d_lock held */
60767 static inline void __dget_dlock(struct dentry *dentry)
60768 {
60769- dentry->d_lockref.count++;
60770+ __lockref_inc(&dentry->d_lockref);
60771 }
60772
60773 static inline void __dget(struct dentry *dentry)
60774@@ -807,8 +807,8 @@ repeat:
60775 goto repeat;
60776 }
60777 rcu_read_unlock();
60778- BUG_ON(!ret->d_lockref.count);
60779- ret->d_lockref.count++;
60780+ BUG_ON(!__lockref_read(&ret->d_lockref));
60781+ __lockref_inc(&ret->d_lockref);
60782 spin_unlock(&ret->d_lock);
60783 return ret;
60784 }
60785@@ -886,9 +886,9 @@ restart:
60786 spin_lock(&inode->i_lock);
60787 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60788 spin_lock(&dentry->d_lock);
60789- if (!dentry->d_lockref.count) {
60790+ if (!__lockref_read(&dentry->d_lockref)) {
60791 struct dentry *parent = lock_parent(dentry);
60792- if (likely(!dentry->d_lockref.count)) {
60793+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60794 __dentry_kill(dentry);
60795 dput(parent);
60796 goto restart;
60797@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
60798 * We found an inuse dentry which was not removed from
60799 * the LRU because of laziness during lookup. Do not free it.
60800 */
60801- if (dentry->d_lockref.count > 0) {
60802+ if (__lockref_read(&dentry->d_lockref) > 0) {
60803 spin_unlock(&dentry->d_lock);
60804 if (parent)
60805 spin_unlock(&parent->d_lock);
60806@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
60807 dentry = parent;
60808 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60809 parent = lock_parent(dentry);
60810- if (dentry->d_lockref.count != 1) {
60811- dentry->d_lockref.count--;
60812+ if (__lockref_read(&dentry->d_lockref) != 1) {
60813+ __lockref_inc(&dentry->d_lockref);
60814 spin_unlock(&dentry->d_lock);
60815 if (parent)
60816 spin_unlock(&parent->d_lock);
60817@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
60818 * counts, just remove them from the LRU. Otherwise give them
60819 * another pass through the LRU.
60820 */
60821- if (dentry->d_lockref.count) {
60822+ if (__lockref_read(&dentry->d_lockref)) {
60823 d_lru_isolate(lru, dentry);
60824 spin_unlock(&dentry->d_lock);
60825 return LRU_REMOVED;
60826@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60827 } else {
60828 if (dentry->d_flags & DCACHE_LRU_LIST)
60829 d_lru_del(dentry);
60830- if (!dentry->d_lockref.count) {
60831+ if (!__lockref_read(&dentry->d_lockref)) {
60832 d_shrink_add(dentry, &data->dispose);
60833 data->found++;
60834 }
60835@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60836 return D_WALK_CONTINUE;
60837
60838 /* root with refcount 1 is fine */
60839- if (dentry == _data && dentry->d_lockref.count == 1)
60840+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60841 return D_WALK_CONTINUE;
60842
60843 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60844@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60845 dentry->d_inode ?
60846 dentry->d_inode->i_ino : 0UL,
60847 dentry,
60848- dentry->d_lockref.count,
60849+ __lockref_read(&dentry->d_lockref),
60850 dentry->d_sb->s_type->name,
60851 dentry->d_sb->s_id);
60852 WARN_ON(1);
60853@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60854 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60855 if (name->len > DNAME_INLINE_LEN-1) {
60856 size_t size = offsetof(struct external_name, name[1]);
60857- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60858+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60859 if (!p) {
60860 kmem_cache_free(dentry_cache, dentry);
60861 return NULL;
60862@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60863 smp_wmb();
60864 dentry->d_name.name = dname;
60865
60866- dentry->d_lockref.count = 1;
60867+ __lockref_set(&dentry->d_lockref, 1);
60868 dentry->d_flags = 0;
60869 spin_lock_init(&dentry->d_lock);
60870 seqcount_init(&dentry->d_seq);
60871@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60872 dentry->d_sb = sb;
60873 dentry->d_op = NULL;
60874 dentry->d_fsdata = NULL;
60875+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60876+ atomic_set(&dentry->chroot_refcnt, 0);
60877+#endif
60878 INIT_HLIST_BL_NODE(&dentry->d_hash);
60879 INIT_LIST_HEAD(&dentry->d_lru);
60880 INIT_LIST_HEAD(&dentry->d_subdirs);
60881@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60882 goto next;
60883 }
60884
60885- dentry->d_lockref.count++;
60886+ __lockref_inc(&dentry->d_lockref);
60887 found = dentry;
60888 spin_unlock(&dentry->d_lock);
60889 break;
60890@@ -2358,7 +2361,7 @@ again:
60891 spin_lock(&dentry->d_lock);
60892 inode = dentry->d_inode;
60893 isdir = S_ISDIR(inode->i_mode);
60894- if (dentry->d_lockref.count == 1) {
60895+ if (__lockref_read(&dentry->d_lockref) == 1) {
60896 if (!spin_trylock(&inode->i_lock)) {
60897 spin_unlock(&dentry->d_lock);
60898 cpu_relax();
60899@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60900
60901 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60902 dentry->d_flags |= DCACHE_GENOCIDE;
60903- dentry->d_lockref.count--;
60904+ __lockref_dec(&dentry->d_lockref);
60905 }
60906 }
60907 return D_WALK_CONTINUE;
60908@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
60909 mempages -= reserve;
60910
60911 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60912- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60913+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60914+ SLAB_NO_SANITIZE, NULL);
60915
60916 dcache_init();
60917 inode_init();
60918diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60919index 96400ab..906103d 100644
60920--- a/fs/debugfs/inode.c
60921+++ b/fs/debugfs/inode.c
60922@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
60923 }
60924 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
60925
60926+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60927+extern int grsec_enable_sysfs_restrict;
60928+#endif
60929+
60930 /**
60931 * debugfs_create_dir - create a directory in the debugfs filesystem
60932 * @name: a pointer to a string containing the name of the directory to
60933@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
60934 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60935 * returned.
60936 */
60937+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60938+extern int grsec_enable_sysfs_restrict;
60939+#endif
60940+
60941 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60942 {
60943 struct dentry *dentry = start_creating(name, parent);
60944@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60945 if (unlikely(!inode))
60946 return failed_creating(dentry);
60947
60948- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60949+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60950+ if (grsec_enable_sysfs_restrict)
60951+ inode->i_mode = S_IFDIR | S_IRWXU;
60952+ else
60953+#endif
60954+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60955 inode->i_op = &simple_dir_inode_operations;
60956 inode->i_fop = &simple_dir_operations;
60957
60958diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60959index b08b518..d6acffa 100644
60960--- a/fs/ecryptfs/inode.c
60961+++ b/fs/ecryptfs/inode.c
60962@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60963 old_fs = get_fs();
60964 set_fs(get_ds());
60965 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60966- (char __user *)lower_buf,
60967+ (char __force_user *)lower_buf,
60968 PATH_MAX);
60969 set_fs(old_fs);
60970 if (rc < 0)
60971diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60972index e4141f2..d8263e8 100644
60973--- a/fs/ecryptfs/miscdev.c
60974+++ b/fs/ecryptfs/miscdev.c
60975@@ -304,7 +304,7 @@ check_list:
60976 goto out_unlock_msg_ctx;
60977 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60978 if (msg_ctx->msg) {
60979- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60980+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60981 goto out_unlock_msg_ctx;
60982 i += packet_length_size;
60983 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60984diff --git a/fs/exec.c b/fs/exec.c
60985index 00400cf..b9dca28 100644
60986--- a/fs/exec.c
60987+++ b/fs/exec.c
60988@@ -56,8 +56,20 @@
60989 #include <linux/pipe_fs_i.h>
60990 #include <linux/oom.h>
60991 #include <linux/compat.h>
60992+#include <linux/random.h>
60993+#include <linux/seq_file.h>
60994+#include <linux/coredump.h>
60995+#include <linux/mman.h>
60996+
60997+#ifdef CONFIG_PAX_REFCOUNT
60998+#include <linux/kallsyms.h>
60999+#include <linux/kdebug.h>
61000+#endif
61001+
61002+#include <trace/events/fs.h>
61003
61004 #include <asm/uaccess.h>
61005+#include <asm/sections.h>
61006 #include <asm/mmu_context.h>
61007 #include <asm/tlb.h>
61008
61009@@ -66,19 +78,34 @@
61010
61011 #include <trace/events/sched.h>
61012
61013+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61014+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61015+{
61016+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61017+}
61018+#endif
61019+
61020+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61021+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61022+EXPORT_SYMBOL(pax_set_initial_flags_func);
61023+#endif
61024+
61025 int suid_dumpable = 0;
61026
61027 static LIST_HEAD(formats);
61028 static DEFINE_RWLOCK(binfmt_lock);
61029
61030+extern int gr_process_kernel_exec_ban(void);
61031+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61032+
61033 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61034 {
61035 BUG_ON(!fmt);
61036 if (WARN_ON(!fmt->load_binary))
61037 return;
61038 write_lock(&binfmt_lock);
61039- insert ? list_add(&fmt->lh, &formats) :
61040- list_add_tail(&fmt->lh, &formats);
61041+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61042+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61043 write_unlock(&binfmt_lock);
61044 }
61045
61046@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61047 void unregister_binfmt(struct linux_binfmt * fmt)
61048 {
61049 write_lock(&binfmt_lock);
61050- list_del(&fmt->lh);
61051+ pax_list_del((struct list_head *)&fmt->lh);
61052 write_unlock(&binfmt_lock);
61053 }
61054
61055@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61056 int write)
61057 {
61058 struct page *page;
61059- int ret;
61060
61061-#ifdef CONFIG_STACK_GROWSUP
61062- if (write) {
61063- ret = expand_downwards(bprm->vma, pos);
61064- if (ret < 0)
61065- return NULL;
61066- }
61067-#endif
61068- ret = get_user_pages(current, bprm->mm, pos,
61069- 1, write, 1, &page, NULL);
61070- if (ret <= 0)
61071+ if (0 > expand_downwards(bprm->vma, pos))
61072+ return NULL;
61073+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61074 return NULL;
61075
61076 if (write) {
61077@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61078 if (size <= ARG_MAX)
61079 return page;
61080
61081+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61082+ // only allow 512KB for argv+env on suid/sgid binaries
61083+ // to prevent easy ASLR exhaustion
61084+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61085+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61086+ (size > (512 * 1024))) {
61087+ put_page(page);
61088+ return NULL;
61089+ }
61090+#endif
61091+
61092 /*
61093 * Limit to 1/4-th the stack size for the argv+env strings.
61094 * This ensures that:
61095@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61096 vma->vm_end = STACK_TOP_MAX;
61097 vma->vm_start = vma->vm_end - PAGE_SIZE;
61098 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61099+
61100+#ifdef CONFIG_PAX_SEGMEXEC
61101+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61102+#endif
61103+
61104 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61105 INIT_LIST_HEAD(&vma->anon_vma_chain);
61106
61107@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61108 arch_bprm_mm_init(mm, vma);
61109 up_write(&mm->mmap_sem);
61110 bprm->p = vma->vm_end - sizeof(void *);
61111+
61112+#ifdef CONFIG_PAX_RANDUSTACK
61113+ if (randomize_va_space)
61114+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61115+#endif
61116+
61117 return 0;
61118 err:
61119 up_write(&mm->mmap_sem);
61120@@ -396,7 +437,7 @@ struct user_arg_ptr {
61121 } ptr;
61122 };
61123
61124-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61125+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61126 {
61127 const char __user *native;
61128
61129@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61130 compat_uptr_t compat;
61131
61132 if (get_user(compat, argv.ptr.compat + nr))
61133- return ERR_PTR(-EFAULT);
61134+ return (const char __force_user *)ERR_PTR(-EFAULT);
61135
61136 return compat_ptr(compat);
61137 }
61138 #endif
61139
61140 if (get_user(native, argv.ptr.native + nr))
61141- return ERR_PTR(-EFAULT);
61142+ return (const char __force_user *)ERR_PTR(-EFAULT);
61143
61144 return native;
61145 }
61146@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
61147 if (!p)
61148 break;
61149
61150- if (IS_ERR(p))
61151+ if (IS_ERR((const char __force_kernel *)p))
61152 return -EFAULT;
61153
61154 if (i >= max)
61155@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61156
61157 ret = -EFAULT;
61158 str = get_user_arg_ptr(argv, argc);
61159- if (IS_ERR(str))
61160+ if (IS_ERR((const char __force_kernel *)str))
61161 goto out;
61162
61163 len = strnlen_user(str, MAX_ARG_STRLEN);
61164@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61165 int r;
61166 mm_segment_t oldfs = get_fs();
61167 struct user_arg_ptr argv = {
61168- .ptr.native = (const char __user *const __user *)__argv,
61169+ .ptr.native = (const char __user * const __force_user *)__argv,
61170 };
61171
61172 set_fs(KERNEL_DS);
61173@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61174 unsigned long new_end = old_end - shift;
61175 struct mmu_gather tlb;
61176
61177- BUG_ON(new_start > new_end);
61178+ if (new_start >= new_end || new_start < mmap_min_addr)
61179+ return -ENOMEM;
61180
61181 /*
61182 * ensure there are no vmas between where we want to go
61183@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61184 if (vma != find_vma(mm, new_start))
61185 return -EFAULT;
61186
61187+#ifdef CONFIG_PAX_SEGMEXEC
61188+ BUG_ON(pax_find_mirror_vma(vma));
61189+#endif
61190+
61191 /*
61192 * cover the whole range: [new_start, old_end)
61193 */
61194@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61195 stack_top = arch_align_stack(stack_top);
61196 stack_top = PAGE_ALIGN(stack_top);
61197
61198- if (unlikely(stack_top < mmap_min_addr) ||
61199- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61200- return -ENOMEM;
61201-
61202 stack_shift = vma->vm_end - stack_top;
61203
61204 bprm->p -= stack_shift;
61205@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61206 bprm->exec -= stack_shift;
61207
61208 down_write(&mm->mmap_sem);
61209+
61210+ /* Move stack pages down in memory. */
61211+ if (stack_shift) {
61212+ ret = shift_arg_pages(vma, stack_shift);
61213+ if (ret)
61214+ goto out_unlock;
61215+ }
61216+
61217 vm_flags = VM_STACK_FLAGS;
61218
61219+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61220+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61221+ vm_flags &= ~VM_EXEC;
61222+
61223+#ifdef CONFIG_PAX_MPROTECT
61224+ if (mm->pax_flags & MF_PAX_MPROTECT)
61225+ vm_flags &= ~VM_MAYEXEC;
61226+#endif
61227+
61228+ }
61229+#endif
61230+
61231 /*
61232 * Adjust stack execute permissions; explicitly enable for
61233 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61234@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61235 goto out_unlock;
61236 BUG_ON(prev != vma);
61237
61238- /* Move stack pages down in memory. */
61239- if (stack_shift) {
61240- ret = shift_arg_pages(vma, stack_shift);
61241- if (ret)
61242- goto out_unlock;
61243- }
61244-
61245 /* mprotect_fixup is overkill to remove the temporary stack flags */
61246 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
61247
61248@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
61249 #endif
61250 current->mm->start_stack = bprm->p;
61251 ret = expand_stack(vma, stack_base);
61252+
61253+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
61254+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
61255+ unsigned long size;
61256+ vm_flags_t vm_flags;
61257+
61258+ size = STACK_TOP - vma->vm_end;
61259+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
61260+
61261+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
61262+
61263+#ifdef CONFIG_X86
61264+ if (!ret) {
61265+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
61266+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
61267+ }
61268+#endif
61269+
61270+ }
61271+#endif
61272+
61273 if (ret)
61274 ret = -EFAULT;
61275
61276@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
61277 if (err)
61278 goto exit;
61279
61280- if (name->name[0] != '\0')
61281+ if (name->name[0] != '\0') {
61282 fsnotify_open(file);
61283+ trace_open_exec(name->name);
61284+ }
61285
61286 out:
61287 return file;
61288@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
61289 old_fs = get_fs();
61290 set_fs(get_ds());
61291 /* The cast to a user pointer is valid due to the set_fs() */
61292- result = vfs_read(file, (void __user *)addr, count, &pos);
61293+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
61294 set_fs(old_fs);
61295 return result;
61296 }
61297@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
61298 tsk->mm = mm;
61299 tsk->active_mm = mm;
61300 activate_mm(active_mm, mm);
61301+ populate_stack();
61302 tsk->mm->vmacache_seqnum = 0;
61303 vmacache_flush(tsk);
61304 task_unlock(tsk);
61305@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
61306 if (!thread_group_leader(tsk)) {
61307 struct task_struct *leader = tsk->group_leader;
61308
61309- sig->notify_count = -1; /* for exit_notify() */
61310 for (;;) {
61311 threadgroup_change_begin(tsk);
61312 write_lock_irq(&tasklist_lock);
61313+ /*
61314+ * Do this under tasklist_lock to ensure that
61315+ * exit_notify() can't miss ->group_exit_task
61316+ */
61317+ sig->notify_count = -1;
61318 if (likely(leader->exit_state))
61319 break;
61320 __set_current_state(TASK_KILLABLE);
61321@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61322 }
61323 rcu_read_unlock();
61324
61325- if (p->fs->users > n_fs)
61326+ if (atomic_read(&p->fs->users) > n_fs)
61327 bprm->unsafe |= LSM_UNSAFE_SHARE;
61328 else
61329 p->fs->in_exec = 1;
61330@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61331 return ret;
61332 }
61333
61334+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61335+static DEFINE_PER_CPU(u64, exec_counter);
61336+static int __init init_exec_counters(void)
61337+{
61338+ unsigned int cpu;
61339+
61340+ for_each_possible_cpu(cpu) {
61341+ per_cpu(exec_counter, cpu) = (u64)cpu;
61342+ }
61343+
61344+ return 0;
61345+}
61346+early_initcall(init_exec_counters);
61347+static inline void increment_exec_counter(void)
61348+{
61349+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61350+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61351+}
61352+#else
61353+static inline void increment_exec_counter(void) {}
61354+#endif
61355+
61356+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61357+ struct user_arg_ptr argv);
61358+
61359 /*
61360 * sys_execve() executes a new program.
61361 */
61362@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61363 struct user_arg_ptr envp,
61364 int flags)
61365 {
61366+#ifdef CONFIG_GRKERNSEC
61367+ struct file *old_exec_file;
61368+ struct acl_subject_label *old_acl;
61369+ struct rlimit old_rlim[RLIM_NLIMITS];
61370+#endif
61371 char *pathbuf = NULL;
61372 struct linux_binprm *bprm;
61373 struct file *file;
61374@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
61375 if (IS_ERR(filename))
61376 return PTR_ERR(filename);
61377
61378+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61379+
61380 /*
61381 * We move the actual failure in case of RLIMIT_NPROC excess from
61382 * set*uid() to execve() because too many poorly written programs
61383@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61384 if (IS_ERR(file))
61385 goto out_unmark;
61386
61387+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61388+ retval = -EPERM;
61389+ goto out_unmark;
61390+ }
61391+
61392 sched_exec();
61393
61394 bprm->file = file;
61395@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61396 }
61397 bprm->interp = bprm->filename;
61398
61399+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61400+ retval = -EACCES;
61401+ goto out_unmark;
61402+ }
61403+
61404 retval = bprm_mm_init(bprm);
61405 if (retval)
61406 goto out_unmark;
61407@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61408 if (retval < 0)
61409 goto out;
61410
61411+#ifdef CONFIG_GRKERNSEC
61412+ old_acl = current->acl;
61413+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61414+ old_exec_file = current->exec_file;
61415+ get_file(file);
61416+ current->exec_file = file;
61417+#endif
61418+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61419+ /* limit suid stack to 8MB
61420+ * we saved the old limits above and will restore them if this exec fails
61421+ */
61422+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61423+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61424+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61425+#endif
61426+
61427+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61428+ retval = -EPERM;
61429+ goto out_fail;
61430+ }
61431+
61432+ if (!gr_tpe_allow(file)) {
61433+ retval = -EACCES;
61434+ goto out_fail;
61435+ }
61436+
61437+ if (gr_check_crash_exec(file)) {
61438+ retval = -EACCES;
61439+ goto out_fail;
61440+ }
61441+
61442+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61443+ bprm->unsafe);
61444+ if (retval < 0)
61445+ goto out_fail;
61446+
61447 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61448 if (retval < 0)
61449- goto out;
61450+ goto out_fail;
61451
61452 bprm->exec = bprm->p;
61453 retval = copy_strings(bprm->envc, envp, bprm);
61454 if (retval < 0)
61455- goto out;
61456+ goto out_fail;
61457
61458 retval = copy_strings(bprm->argc, argv, bprm);
61459 if (retval < 0)
61460- goto out;
61461+ goto out_fail;
61462+
61463+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61464+
61465+ gr_handle_exec_args(bprm, argv);
61466
61467 retval = exec_binprm(bprm);
61468 if (retval < 0)
61469- goto out;
61470+ goto out_fail;
61471+#ifdef CONFIG_GRKERNSEC
61472+ if (old_exec_file)
61473+ fput(old_exec_file);
61474+#endif
61475
61476 /* execve succeeded */
61477+
61478+ increment_exec_counter();
61479 current->fs->in_exec = 0;
61480 current->in_execve = 0;
61481 acct_update_integrals(current);
61482@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61483 put_files_struct(displaced);
61484 return retval;
61485
61486+out_fail:
61487+#ifdef CONFIG_GRKERNSEC
61488+ current->acl = old_acl;
61489+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61490+ fput(current->exec_file);
61491+ current->exec_file = old_exec_file;
61492+#endif
61493+
61494 out:
61495 if (bprm->mm) {
61496 acct_arg_size(bprm, 0);
61497@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61498 argv, envp, flags);
61499 }
61500 #endif
61501+
61502+int pax_check_flags(unsigned long *flags)
61503+{
61504+ int retval = 0;
61505+
61506+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61507+ if (*flags & MF_PAX_SEGMEXEC)
61508+ {
61509+ *flags &= ~MF_PAX_SEGMEXEC;
61510+ retval = -EINVAL;
61511+ }
61512+#endif
61513+
61514+ if ((*flags & MF_PAX_PAGEEXEC)
61515+
61516+#ifdef CONFIG_PAX_PAGEEXEC
61517+ && (*flags & MF_PAX_SEGMEXEC)
61518+#endif
61519+
61520+ )
61521+ {
61522+ *flags &= ~MF_PAX_PAGEEXEC;
61523+ retval = -EINVAL;
61524+ }
61525+
61526+ if ((*flags & MF_PAX_MPROTECT)
61527+
61528+#ifdef CONFIG_PAX_MPROTECT
61529+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61530+#endif
61531+
61532+ )
61533+ {
61534+ *flags &= ~MF_PAX_MPROTECT;
61535+ retval = -EINVAL;
61536+ }
61537+
61538+ if ((*flags & MF_PAX_EMUTRAMP)
61539+
61540+#ifdef CONFIG_PAX_EMUTRAMP
61541+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61542+#endif
61543+
61544+ )
61545+ {
61546+ *flags &= ~MF_PAX_EMUTRAMP;
61547+ retval = -EINVAL;
61548+ }
61549+
61550+ return retval;
61551+}
61552+
61553+EXPORT_SYMBOL(pax_check_flags);
61554+
61555+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61556+char *pax_get_path(const struct path *path, char *buf, int buflen)
61557+{
61558+ char *pathname = d_path(path, buf, buflen);
61559+
61560+ if (IS_ERR(pathname))
61561+ goto toolong;
61562+
61563+ pathname = mangle_path(buf, pathname, "\t\n\\");
61564+ if (!pathname)
61565+ goto toolong;
61566+
61567+ *pathname = 0;
61568+ return buf;
61569+
61570+toolong:
61571+ return "<path too long>";
61572+}
61573+EXPORT_SYMBOL(pax_get_path);
61574+
61575+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61576+{
61577+ struct task_struct *tsk = current;
61578+ struct mm_struct *mm = current->mm;
61579+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61580+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61581+ char *path_exec = NULL;
61582+ char *path_fault = NULL;
61583+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61584+ siginfo_t info = { };
61585+
61586+ if (buffer_exec && buffer_fault) {
61587+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61588+
61589+ down_read(&mm->mmap_sem);
61590+ vma = mm->mmap;
61591+ while (vma && (!vma_exec || !vma_fault)) {
61592+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61593+ vma_exec = vma;
61594+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61595+ vma_fault = vma;
61596+ vma = vma->vm_next;
61597+ }
61598+ if (vma_exec)
61599+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61600+ if (vma_fault) {
61601+ start = vma_fault->vm_start;
61602+ end = vma_fault->vm_end;
61603+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61604+ if (vma_fault->vm_file)
61605+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61606+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61607+ path_fault = "<heap>";
61608+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61609+ path_fault = "<stack>";
61610+ else
61611+ path_fault = "<anonymous mapping>";
61612+ }
61613+ up_read(&mm->mmap_sem);
61614+ }
61615+ if (tsk->signal->curr_ip)
61616+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61617+ else
61618+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61619+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61620+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61621+ free_page((unsigned long)buffer_exec);
61622+ free_page((unsigned long)buffer_fault);
61623+ pax_report_insns(regs, pc, sp);
61624+ info.si_signo = SIGKILL;
61625+ info.si_errno = 0;
61626+ info.si_code = SI_KERNEL;
61627+ info.si_pid = 0;
61628+ info.si_uid = 0;
61629+ do_coredump(&info);
61630+}
61631+#endif
61632+
61633+#ifdef CONFIG_PAX_REFCOUNT
61634+void pax_report_refcount_overflow(struct pt_regs *regs)
61635+{
61636+ if (current->signal->curr_ip)
61637+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61638+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61639+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61640+ else
61641+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61642+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61643+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61644+ preempt_disable();
61645+ show_regs(regs);
61646+ preempt_enable();
61647+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61648+}
61649+#endif
61650+
61651+#ifdef CONFIG_PAX_USERCOPY
61652+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61653+static noinline int check_stack_object(const void *obj, unsigned long len)
61654+{
61655+ const void * const stack = task_stack_page(current);
61656+ const void * const stackend = stack + THREAD_SIZE;
61657+
61658+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61659+ const void *frame = NULL;
61660+ const void *oldframe;
61661+#endif
61662+
61663+ if (obj + len < obj)
61664+ return -1;
61665+
61666+ if (obj + len <= stack || stackend <= obj)
61667+ return 0;
61668+
61669+ if (obj < stack || stackend < obj + len)
61670+ return -1;
61671+
61672+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61673+ oldframe = __builtin_frame_address(1);
61674+ if (oldframe)
61675+ frame = __builtin_frame_address(2);
61676+ /*
61677+ low ----------------------------------------------> high
61678+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61679+ ^----------------^
61680+ allow copies only within here
61681+ */
61682+ while (stack <= frame && frame < stackend) {
61683+ /* if obj + len extends past the last frame, this
61684+ check won't pass and the next frame will be 0,
61685+ causing us to bail out and correctly report
61686+ the copy as invalid
61687+ */
61688+ if (obj + len <= frame)
61689+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61690+ oldframe = frame;
61691+ frame = *(const void * const *)frame;
61692+ }
61693+ return -1;
61694+#else
61695+ return 1;
61696+#endif
61697+}
61698+
61699+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61700+{
61701+ if (current->signal->curr_ip)
61702+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61703+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61704+ else
61705+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61706+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61707+ dump_stack();
61708+ gr_handle_kernel_exploit();
61709+ do_group_exit(SIGKILL);
61710+}
61711+#endif
61712+
61713+#ifdef CONFIG_PAX_USERCOPY
61714+
61715+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61716+{
61717+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61718+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61719+#ifdef CONFIG_MODULES
61720+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61721+#else
61722+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61723+#endif
61724+
61725+#else
61726+ unsigned long textlow = (unsigned long)_stext;
61727+ unsigned long texthigh = (unsigned long)_etext;
61728+
61729+#ifdef CONFIG_X86_64
61730+ /* check against linear mapping as well */
61731+ if (high > (unsigned long)__va(__pa(textlow)) &&
61732+ low < (unsigned long)__va(__pa(texthigh)))
61733+ return true;
61734+#endif
61735+
61736+#endif
61737+
61738+ if (high <= textlow || low >= texthigh)
61739+ return false;
61740+ else
61741+ return true;
61742+}
61743+#endif
61744+
61745+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61746+{
61747+#ifdef CONFIG_PAX_USERCOPY
61748+ const char *type;
61749+#endif
61750+
61751+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61752+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61753+ unsigned long currentsp = (unsigned long)&stackstart;
61754+ if (unlikely((currentsp < stackstart + 512 ||
61755+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61756+ BUG();
61757+#endif
61758+
61759+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61760+ if (const_size)
61761+ return;
61762+#endif
61763+
61764+#ifdef CONFIG_PAX_USERCOPY
61765+ if (!n)
61766+ return;
61767+
61768+ type = check_heap_object(ptr, n);
61769+ if (!type) {
61770+ int ret = check_stack_object(ptr, n);
61771+ if (ret == 1 || ret == 2)
61772+ return;
61773+ if (ret == 0) {
61774+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61775+ type = "<kernel text>";
61776+ else
61777+ return;
61778+ } else
61779+ type = "<process stack>";
61780+ }
61781+
61782+ pax_report_usercopy(ptr, n, to_user, type);
61783+#endif
61784+
61785+}
61786+EXPORT_SYMBOL(__check_object_size);
61787+
61788+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61789+void pax_track_stack(void)
61790+{
61791+ unsigned long sp = (unsigned long)&sp;
61792+ if (sp < current_thread_info()->lowest_stack &&
61793+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61794+ current_thread_info()->lowest_stack = sp;
61795+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61796+ BUG();
61797+}
61798+EXPORT_SYMBOL(pax_track_stack);
61799+#endif
61800+
61801+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61802+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61803+{
61804+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61805+ dump_stack();
61806+ do_group_exit(SIGKILL);
61807+}
61808+EXPORT_SYMBOL(report_size_overflow);
61809+#endif
61810diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61811index 9f9992b..8b59411 100644
61812--- a/fs/ext2/balloc.c
61813+++ b/fs/ext2/balloc.c
61814@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61815
61816 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61817 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61818- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61819+ if (free_blocks < root_blocks + 1 &&
61820 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61821 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61822- !in_group_p (sbi->s_resgid))) {
61823+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61824 return 0;
61825 }
61826 return 1;
61827diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61828index d0e746e..82e06f0 100644
61829--- a/fs/ext2/super.c
61830+++ b/fs/ext2/super.c
61831@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61832 #ifdef CONFIG_EXT2_FS_XATTR
61833 if (test_opt(sb, XATTR_USER))
61834 seq_puts(seq, ",user_xattr");
61835- if (!test_opt(sb, XATTR_USER) &&
61836- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61837+ if (!test_opt(sb, XATTR_USER))
61838 seq_puts(seq, ",nouser_xattr");
61839- }
61840 #endif
61841
61842 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61843@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61844 if (def_mount_opts & EXT2_DEFM_UID16)
61845 set_opt(sbi->s_mount_opt, NO_UID32);
61846 #ifdef CONFIG_EXT2_FS_XATTR
61847- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61848- set_opt(sbi->s_mount_opt, XATTR_USER);
61849+ /* always enable user xattrs */
61850+ set_opt(sbi->s_mount_opt, XATTR_USER);
61851 #endif
61852 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61853 if (def_mount_opts & EXT2_DEFM_ACL)
61854diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61855index 9142614..97484fa 100644
61856--- a/fs/ext2/xattr.c
61857+++ b/fs/ext2/xattr.c
61858@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61859 struct buffer_head *bh = NULL;
61860 struct ext2_xattr_entry *entry;
61861 char *end;
61862- size_t rest = buffer_size;
61863+ size_t rest = buffer_size, total_size = 0;
61864 int error;
61865
61866 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61867@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61868 buffer += size;
61869 }
61870 rest -= size;
61871+ total_size += size;
61872 }
61873 }
61874- error = buffer_size - rest; /* total size */
61875+ error = total_size;
61876
61877 cleanup:
61878 brelse(bh);
61879diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61880index 158b5d4..2432610 100644
61881--- a/fs/ext3/balloc.c
61882+++ b/fs/ext3/balloc.c
61883@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61884
61885 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61886 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61887- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61888+ if (free_blocks < root_blocks + 1 &&
61889 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61890 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61891- !in_group_p (sbi->s_resgid))) {
61892+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61893 return 0;
61894 }
61895 return 1;
61896diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61897index d4dbf3c..906a6fb 100644
61898--- a/fs/ext3/super.c
61899+++ b/fs/ext3/super.c
61900@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61901 #ifdef CONFIG_EXT3_FS_XATTR
61902 if (test_opt(sb, XATTR_USER))
61903 seq_puts(seq, ",user_xattr");
61904- if (!test_opt(sb, XATTR_USER) &&
61905- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61906+ if (!test_opt(sb, XATTR_USER))
61907 seq_puts(seq, ",nouser_xattr");
61908- }
61909 #endif
61910 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61911 if (test_opt(sb, POSIX_ACL))
61912@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61913 if (def_mount_opts & EXT3_DEFM_UID16)
61914 set_opt(sbi->s_mount_opt, NO_UID32);
61915 #ifdef CONFIG_EXT3_FS_XATTR
61916- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61917- set_opt(sbi->s_mount_opt, XATTR_USER);
61918+ /* always enable user xattrs */
61919+ set_opt(sbi->s_mount_opt, XATTR_USER);
61920 #endif
61921 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61922 if (def_mount_opts & EXT3_DEFM_ACL)
61923diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61924index c6874be..f8a6ae8 100644
61925--- a/fs/ext3/xattr.c
61926+++ b/fs/ext3/xattr.c
61927@@ -330,7 +330,7 @@ static int
61928 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61929 char *buffer, size_t buffer_size)
61930 {
61931- size_t rest = buffer_size;
61932+ size_t rest = buffer_size, total_size = 0;
61933
61934 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61935 const struct xattr_handler *handler =
61936@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61937 buffer += size;
61938 }
61939 rest -= size;
61940+ total_size += size;
61941 }
61942 }
61943- return buffer_size - rest;
61944+ return total_size;
61945 }
61946
61947 static int
61948diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61949index 83a6f49..d4e4d03 100644
61950--- a/fs/ext4/balloc.c
61951+++ b/fs/ext4/balloc.c
61952@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61953 /* Hm, nope. Are (enough) root reserved clusters available? */
61954 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61955 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61956- capable(CAP_SYS_RESOURCE) ||
61957- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61958+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61959+ capable_nolog(CAP_SYS_RESOURCE)) {
61960
61961 if (free_clusters >= (nclusters + dirty_clusters +
61962 resv_clusters))
61963diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61964index f63c3d5..3c1a033 100644
61965--- a/fs/ext4/ext4.h
61966+++ b/fs/ext4/ext4.h
61967@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
61968 unsigned long s_mb_last_start;
61969
61970 /* stats for buddy allocator */
61971- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61972- atomic_t s_bal_success; /* we found long enough chunks */
61973- atomic_t s_bal_allocated; /* in blocks */
61974- atomic_t s_bal_ex_scanned; /* total extents scanned */
61975- atomic_t s_bal_goals; /* goal hits */
61976- atomic_t s_bal_breaks; /* too long searches */
61977- atomic_t s_bal_2orders; /* 2^order hits */
61978+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61979+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61980+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61981+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61982+ atomic_unchecked_t s_bal_goals; /* goal hits */
61983+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61984+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61985 spinlock_t s_bal_lock;
61986 unsigned long s_mb_buddies_generated;
61987 unsigned long long s_mb_generation_time;
61988- atomic_t s_mb_lost_chunks;
61989- atomic_t s_mb_preallocated;
61990- atomic_t s_mb_discarded;
61991+ atomic_unchecked_t s_mb_lost_chunks;
61992+ atomic_unchecked_t s_mb_preallocated;
61993+ atomic_unchecked_t s_mb_discarded;
61994 atomic_t s_lock_busy;
61995
61996 /* locality groups */
61997diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61998index 8d1e602..abf497b 100644
61999--- a/fs/ext4/mballoc.c
62000+++ b/fs/ext4/mballoc.c
62001@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62002 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62003
62004 if (EXT4_SB(sb)->s_mb_stats)
62005- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62006+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62007
62008 break;
62009 }
62010@@ -2211,7 +2211,7 @@ repeat:
62011 ac->ac_status = AC_STATUS_CONTINUE;
62012 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62013 cr = 3;
62014- atomic_inc(&sbi->s_mb_lost_chunks);
62015+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62016 goto repeat;
62017 }
62018 }
62019@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
62020 if (sbi->s_mb_stats) {
62021 ext4_msg(sb, KERN_INFO,
62022 "mballoc: %u blocks %u reqs (%u success)",
62023- atomic_read(&sbi->s_bal_allocated),
62024- atomic_read(&sbi->s_bal_reqs),
62025- atomic_read(&sbi->s_bal_success));
62026+ atomic_read_unchecked(&sbi->s_bal_allocated),
62027+ atomic_read_unchecked(&sbi->s_bal_reqs),
62028+ atomic_read_unchecked(&sbi->s_bal_success));
62029 ext4_msg(sb, KERN_INFO,
62030 "mballoc: %u extents scanned, %u goal hits, "
62031 "%u 2^N hits, %u breaks, %u lost",
62032- atomic_read(&sbi->s_bal_ex_scanned),
62033- atomic_read(&sbi->s_bal_goals),
62034- atomic_read(&sbi->s_bal_2orders),
62035- atomic_read(&sbi->s_bal_breaks),
62036- atomic_read(&sbi->s_mb_lost_chunks));
62037+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62038+ atomic_read_unchecked(&sbi->s_bal_goals),
62039+ atomic_read_unchecked(&sbi->s_bal_2orders),
62040+ atomic_read_unchecked(&sbi->s_bal_breaks),
62041+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62042 ext4_msg(sb, KERN_INFO,
62043 "mballoc: %lu generated and it took %Lu",
62044 sbi->s_mb_buddies_generated,
62045 sbi->s_mb_generation_time);
62046 ext4_msg(sb, KERN_INFO,
62047 "mballoc: %u preallocated, %u discarded",
62048- atomic_read(&sbi->s_mb_preallocated),
62049- atomic_read(&sbi->s_mb_discarded));
62050+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62051+ atomic_read_unchecked(&sbi->s_mb_discarded));
62052 }
62053
62054 free_percpu(sbi->s_locality_groups);
62055@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62056 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62057
62058 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62059- atomic_inc(&sbi->s_bal_reqs);
62060- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62061+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62062+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62063 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62064- atomic_inc(&sbi->s_bal_success);
62065- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62066+ atomic_inc_unchecked(&sbi->s_bal_success);
62067+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62068 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62069 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62070- atomic_inc(&sbi->s_bal_goals);
62071+ atomic_inc_unchecked(&sbi->s_bal_goals);
62072 if (ac->ac_found > sbi->s_mb_max_to_scan)
62073- atomic_inc(&sbi->s_bal_breaks);
62074+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62075 }
62076
62077 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62078@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62079 trace_ext4_mb_new_inode_pa(ac, pa);
62080
62081 ext4_mb_use_inode_pa(ac, pa);
62082- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62083+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62084
62085 ei = EXT4_I(ac->ac_inode);
62086 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62087@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62088 trace_ext4_mb_new_group_pa(ac, pa);
62089
62090 ext4_mb_use_group_pa(ac, pa);
62091- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62092+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62093
62094 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62095 lg = ac->ac_lg;
62096@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62097 * from the bitmap and continue.
62098 */
62099 }
62100- atomic_add(free, &sbi->s_mb_discarded);
62101+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62102
62103 return err;
62104 }
62105@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62106 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62107 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62108 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62109- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62110+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62111 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62112
62113 return 0;
62114diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62115index 8313ca3..8a37d08 100644
62116--- a/fs/ext4/mmp.c
62117+++ b/fs/ext4/mmp.c
62118@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62119 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62120 const char *function, unsigned int line, const char *msg)
62121 {
62122- __ext4_warning(sb, function, line, msg);
62123+ __ext4_warning(sb, function, line, "%s", msg);
62124 __ext4_warning(sb, function, line,
62125 "MMP failure info: last update time: %llu, last update "
62126 "node: %s, last update device: %s\n",
62127diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
62128index 8a8ec62..1b02de5 100644
62129--- a/fs/ext4/resize.c
62130+++ b/fs/ext4/resize.c
62131@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62132
62133 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
62134 for (count2 = count; count > 0; count -= count2, block += count2) {
62135- ext4_fsblk_t start;
62136+ ext4_fsblk_t start, diff;
62137 struct buffer_head *bh;
62138 ext4_group_t group;
62139 int err;
62140@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62141 start = ext4_group_first_block_no(sb, group);
62142 group -= flex_gd->groups[0].group;
62143
62144- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
62145- if (count2 > count)
62146- count2 = count;
62147-
62148 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
62149 BUG_ON(flex_gd->count > 1);
62150 continue;
62151@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
62152 err = ext4_journal_get_write_access(handle, bh);
62153 if (err)
62154 return err;
62155+
62156+ diff = block - start;
62157+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
62158+ if (count2 > count)
62159+ count2 = count;
62160+
62161 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
62162- block - start, count2);
62163- ext4_set_bits(bh->b_data, block - start, count2);
62164+ diff, count2);
62165+ ext4_set_bits(bh->b_data, diff, count2);
62166
62167 err = ext4_handle_dirty_metadata(handle, NULL, bh);
62168 if (unlikely(err))
62169diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62170index e061e66..87bc092 100644
62171--- a/fs/ext4/super.c
62172+++ b/fs/ext4/super.c
62173@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62174 }
62175
62176 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62177-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62178+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62179 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62180
62181 #ifdef CONFIG_QUOTA
62182@@ -2443,7 +2443,7 @@ struct ext4_attr {
62183 int offset;
62184 int deprecated_val;
62185 } u;
62186-};
62187+} __do_const;
62188
62189 static int parse_strtoull(const char *buf,
62190 unsigned long long max, unsigned long long *value)
62191diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62192index 1e09fc7..0400dd4 100644
62193--- a/fs/ext4/xattr.c
62194+++ b/fs/ext4/xattr.c
62195@@ -399,7 +399,7 @@ static int
62196 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62197 char *buffer, size_t buffer_size)
62198 {
62199- size_t rest = buffer_size;
62200+ size_t rest = buffer_size, total_size = 0;
62201
62202 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62203 const struct xattr_handler *handler =
62204@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62205 buffer += size;
62206 }
62207 rest -= size;
62208+ total_size += size;
62209 }
62210 }
62211- return buffer_size - rest;
62212+ return total_size;
62213 }
62214
62215 static int
62216diff --git a/fs/fcntl.c b/fs/fcntl.c
62217index ee85cd4..9dd0d20 100644
62218--- a/fs/fcntl.c
62219+++ b/fs/fcntl.c
62220@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62221 int force)
62222 {
62223 security_file_set_fowner(filp);
62224+ if (gr_handle_chroot_fowner(pid, type))
62225+ return;
62226+ if (gr_check_protected_task_fowner(pid, type))
62227+ return;
62228 f_modown(filp, pid, type, force);
62229 }
62230 EXPORT_SYMBOL(__f_setown);
62231diff --git a/fs/fhandle.c b/fs/fhandle.c
62232index 999ff5c..2281df9 100644
62233--- a/fs/fhandle.c
62234+++ b/fs/fhandle.c
62235@@ -8,6 +8,7 @@
62236 #include <linux/fs_struct.h>
62237 #include <linux/fsnotify.h>
62238 #include <linux/personality.h>
62239+#include <linux/grsecurity.h>
62240 #include <asm/uaccess.h>
62241 #include "internal.h"
62242 #include "mount.h"
62243@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62244 } else
62245 retval = 0;
62246 /* copy the mount id */
62247- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62248- sizeof(*mnt_id)) ||
62249+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62250 copy_to_user(ufh, handle,
62251 sizeof(struct file_handle) + handle_bytes))
62252 retval = -EFAULT;
62253@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62254 * the directory. Ideally we would like CAP_DAC_SEARCH.
62255 * But we don't have that
62256 */
62257- if (!capable(CAP_DAC_READ_SEARCH)) {
62258+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62259 retval = -EPERM;
62260 goto out_err;
62261 }
62262@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62263 goto out_err;
62264 }
62265 /* copy the full handle */
62266- if (copy_from_user(handle, ufh,
62267- sizeof(struct file_handle) +
62268+ *handle = f_handle;
62269+ if (copy_from_user(&handle->f_handle,
62270+ &ufh->f_handle,
62271 f_handle.handle_bytes)) {
62272 retval = -EFAULT;
62273 goto out_handle;
62274diff --git a/fs/file.c b/fs/file.c
62275index ee738ea..f6c15629 100644
62276--- a/fs/file.c
62277+++ b/fs/file.c
62278@@ -16,6 +16,7 @@
62279 #include <linux/slab.h>
62280 #include <linux/vmalloc.h>
62281 #include <linux/file.h>
62282+#include <linux/security.h>
62283 #include <linux/fdtable.h>
62284 #include <linux/bitops.h>
62285 #include <linux/interrupt.h>
62286@@ -139,7 +140,7 @@ out:
62287 * Return <0 error code on error; 1 on successful completion.
62288 * The files->file_lock should be held on entry, and will be held on exit.
62289 */
62290-static int expand_fdtable(struct files_struct *files, int nr)
62291+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62292 __releases(files->file_lock)
62293 __acquires(files->file_lock)
62294 {
62295@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62296 * expanded and execution may have blocked.
62297 * The files->file_lock should be held on entry, and will be held on exit.
62298 */
62299-static int expand_files(struct files_struct *files, int nr)
62300+static int expand_files(struct files_struct *files, unsigned int nr)
62301 {
62302 struct fdtable *fdt;
62303
62304@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62305 if (!file)
62306 return __close_fd(files, fd);
62307
62308+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62309 if (fd >= rlimit(RLIMIT_NOFILE))
62310 return -EBADF;
62311
62312@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62313 if (unlikely(oldfd == newfd))
62314 return -EINVAL;
62315
62316+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62317 if (newfd >= rlimit(RLIMIT_NOFILE))
62318 return -EBADF;
62319
62320@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62321 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62322 {
62323 int err;
62324+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62325 if (from >= rlimit(RLIMIT_NOFILE))
62326 return -EINVAL;
62327 err = alloc_fd(from, flags);
62328diff --git a/fs/filesystems.c b/fs/filesystems.c
62329index 5797d45..7d7d79a 100644
62330--- a/fs/filesystems.c
62331+++ b/fs/filesystems.c
62332@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62333 int len = dot ? dot - name : strlen(name);
62334
62335 fs = __get_fs_type(name, len);
62336+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62337+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62338+#else
62339 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62340+#endif
62341 fs = __get_fs_type(name, len);
62342
62343 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62344diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62345index 7dca743..1ff87ae 100644
62346--- a/fs/fs_struct.c
62347+++ b/fs/fs_struct.c
62348@@ -4,6 +4,7 @@
62349 #include <linux/path.h>
62350 #include <linux/slab.h>
62351 #include <linux/fs_struct.h>
62352+#include <linux/grsecurity.h>
62353 #include "internal.h"
62354
62355 /*
62356@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62357 struct path old_root;
62358
62359 path_get(path);
62360+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
62361 spin_lock(&fs->lock);
62362 write_seqcount_begin(&fs->seq);
62363 old_root = fs->root;
62364 fs->root = *path;
62365+ gr_set_chroot_entries(current, path);
62366 write_seqcount_end(&fs->seq);
62367 spin_unlock(&fs->lock);
62368- if (old_root.dentry)
62369+ if (old_root.dentry) {
62370+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
62371 path_put(&old_root);
62372+ }
62373 }
62374
62375 /*
62376@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62377 int hits = 0;
62378 spin_lock(&fs->lock);
62379 write_seqcount_begin(&fs->seq);
62380+ /* this root replacement is only done by pivot_root,
62381+ leave grsec's chroot tagging alone for this task
62382+ so that a pivoted root isn't treated as a chroot
62383+ */
62384 hits += replace_path(&fs->root, old_root, new_root);
62385 hits += replace_path(&fs->pwd, old_root, new_root);
62386 write_seqcount_end(&fs->seq);
62387@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62388
62389 void free_fs_struct(struct fs_struct *fs)
62390 {
62391+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62392 path_put(&fs->root);
62393 path_put(&fs->pwd);
62394 kmem_cache_free(fs_cachep, fs);
62395@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
62396 task_lock(tsk);
62397 spin_lock(&fs->lock);
62398 tsk->fs = NULL;
62399- kill = !--fs->users;
62400+ gr_clear_chroot_entries(tsk);
62401+ kill = !atomic_dec_return(&fs->users);
62402 spin_unlock(&fs->lock);
62403 task_unlock(tsk);
62404 if (kill)
62405@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62406 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62407 /* We don't need to lock fs - think why ;-) */
62408 if (fs) {
62409- fs->users = 1;
62410+ atomic_set(&fs->users, 1);
62411 fs->in_exec = 0;
62412 spin_lock_init(&fs->lock);
62413 seqcount_init(&fs->seq);
62414@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62415 spin_lock(&old->lock);
62416 fs->root = old->root;
62417 path_get(&fs->root);
62418+ /* instead of calling gr_set_chroot_entries here,
62419+ we call it from every caller of this function
62420+ */
62421 fs->pwd = old->pwd;
62422 path_get(&fs->pwd);
62423 spin_unlock(&old->lock);
62424+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62425 }
62426 return fs;
62427 }
62428@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
62429
62430 task_lock(current);
62431 spin_lock(&fs->lock);
62432- kill = !--fs->users;
62433+ kill = !atomic_dec_return(&fs->users);
62434 current->fs = new_fs;
62435+ gr_set_chroot_entries(current, &new_fs->root);
62436 spin_unlock(&fs->lock);
62437 task_unlock(current);
62438
62439@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62440
62441 int current_umask(void)
62442 {
62443- return current->fs->umask;
62444+ return current->fs->umask | gr_acl_umask();
62445 }
62446 EXPORT_SYMBOL(current_umask);
62447
62448 /* to be mentioned only in INIT_TASK */
62449 struct fs_struct init_fs = {
62450- .users = 1,
62451+ .users = ATOMIC_INIT(1),
62452 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62453 .seq = SEQCNT_ZERO(init_fs.seq),
62454 .umask = 0022,
62455diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62456index 89acec7..a575262 100644
62457--- a/fs/fscache/cookie.c
62458+++ b/fs/fscache/cookie.c
62459@@ -19,7 +19,7 @@
62460
62461 struct kmem_cache *fscache_cookie_jar;
62462
62463-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62464+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62465
62466 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62467 static int fscache_alloc_object(struct fscache_cache *cache,
62468@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62469 parent ? (char *) parent->def->name : "<no-parent>",
62470 def->name, netfs_data, enable);
62471
62472- fscache_stat(&fscache_n_acquires);
62473+ fscache_stat_unchecked(&fscache_n_acquires);
62474
62475 /* if there's no parent cookie, then we don't create one here either */
62476 if (!parent) {
62477- fscache_stat(&fscache_n_acquires_null);
62478+ fscache_stat_unchecked(&fscache_n_acquires_null);
62479 _leave(" [no parent]");
62480 return NULL;
62481 }
62482@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62483 /* allocate and initialise a cookie */
62484 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62485 if (!cookie) {
62486- fscache_stat(&fscache_n_acquires_oom);
62487+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62488 _leave(" [ENOMEM]");
62489 return NULL;
62490 }
62491@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62492
62493 switch (cookie->def->type) {
62494 case FSCACHE_COOKIE_TYPE_INDEX:
62495- fscache_stat(&fscache_n_cookie_index);
62496+ fscache_stat_unchecked(&fscache_n_cookie_index);
62497 break;
62498 case FSCACHE_COOKIE_TYPE_DATAFILE:
62499- fscache_stat(&fscache_n_cookie_data);
62500+ fscache_stat_unchecked(&fscache_n_cookie_data);
62501 break;
62502 default:
62503- fscache_stat(&fscache_n_cookie_special);
62504+ fscache_stat_unchecked(&fscache_n_cookie_special);
62505 break;
62506 }
62507
62508@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62509 } else {
62510 atomic_dec(&parent->n_children);
62511 __fscache_cookie_put(cookie);
62512- fscache_stat(&fscache_n_acquires_nobufs);
62513+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62514 _leave(" = NULL");
62515 return NULL;
62516 }
62517@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62518 }
62519 }
62520
62521- fscache_stat(&fscache_n_acquires_ok);
62522+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62523 _leave(" = %p", cookie);
62524 return cookie;
62525 }
62526@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62527 cache = fscache_select_cache_for_object(cookie->parent);
62528 if (!cache) {
62529 up_read(&fscache_addremove_sem);
62530- fscache_stat(&fscache_n_acquires_no_cache);
62531+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62532 _leave(" = -ENOMEDIUM [no cache]");
62533 return -ENOMEDIUM;
62534 }
62535@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62536 object = cache->ops->alloc_object(cache, cookie);
62537 fscache_stat_d(&fscache_n_cop_alloc_object);
62538 if (IS_ERR(object)) {
62539- fscache_stat(&fscache_n_object_no_alloc);
62540+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62541 ret = PTR_ERR(object);
62542 goto error;
62543 }
62544
62545- fscache_stat(&fscache_n_object_alloc);
62546+ fscache_stat_unchecked(&fscache_n_object_alloc);
62547
62548- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62549+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62550
62551 _debug("ALLOC OBJ%x: %s {%lx}",
62552 object->debug_id, cookie->def->name, object->events);
62553@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62554
62555 _enter("{%s}", cookie->def->name);
62556
62557- fscache_stat(&fscache_n_invalidates);
62558+ fscache_stat_unchecked(&fscache_n_invalidates);
62559
62560 /* Only permit invalidation of data files. Invalidating an index will
62561 * require the caller to release all its attachments to the tree rooted
62562@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62563 {
62564 struct fscache_object *object;
62565
62566- fscache_stat(&fscache_n_updates);
62567+ fscache_stat_unchecked(&fscache_n_updates);
62568
62569 if (!cookie) {
62570- fscache_stat(&fscache_n_updates_null);
62571+ fscache_stat_unchecked(&fscache_n_updates_null);
62572 _leave(" [no cookie]");
62573 return;
62574 }
62575@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62576 */
62577 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62578 {
62579- fscache_stat(&fscache_n_relinquishes);
62580+ fscache_stat_unchecked(&fscache_n_relinquishes);
62581 if (retire)
62582- fscache_stat(&fscache_n_relinquishes_retire);
62583+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62584
62585 if (!cookie) {
62586- fscache_stat(&fscache_n_relinquishes_null);
62587+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62588 _leave(" [no cookie]");
62589 return;
62590 }
62591@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62592 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62593 goto inconsistent;
62594
62595- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62596+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62597
62598 __fscache_use_cookie(cookie);
62599 if (fscache_submit_op(object, op) < 0)
62600diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62601index 7872a62..d91b19f 100644
62602--- a/fs/fscache/internal.h
62603+++ b/fs/fscache/internal.h
62604@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62605 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62606 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62607 struct fscache_operation *,
62608- atomic_t *,
62609- atomic_t *,
62610+ atomic_unchecked_t *,
62611+ atomic_unchecked_t *,
62612 void (*)(struct fscache_operation *));
62613 extern void fscache_invalidate_writes(struct fscache_cookie *);
62614
62615@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62616 * stats.c
62617 */
62618 #ifdef CONFIG_FSCACHE_STATS
62619-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62620-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62621+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62622+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62623
62624-extern atomic_t fscache_n_op_pend;
62625-extern atomic_t fscache_n_op_run;
62626-extern atomic_t fscache_n_op_enqueue;
62627-extern atomic_t fscache_n_op_deferred_release;
62628-extern atomic_t fscache_n_op_release;
62629-extern atomic_t fscache_n_op_gc;
62630-extern atomic_t fscache_n_op_cancelled;
62631-extern atomic_t fscache_n_op_rejected;
62632+extern atomic_unchecked_t fscache_n_op_pend;
62633+extern atomic_unchecked_t fscache_n_op_run;
62634+extern atomic_unchecked_t fscache_n_op_enqueue;
62635+extern atomic_unchecked_t fscache_n_op_deferred_release;
62636+extern atomic_unchecked_t fscache_n_op_release;
62637+extern atomic_unchecked_t fscache_n_op_gc;
62638+extern atomic_unchecked_t fscache_n_op_cancelled;
62639+extern atomic_unchecked_t fscache_n_op_rejected;
62640
62641-extern atomic_t fscache_n_attr_changed;
62642-extern atomic_t fscache_n_attr_changed_ok;
62643-extern atomic_t fscache_n_attr_changed_nobufs;
62644-extern atomic_t fscache_n_attr_changed_nomem;
62645-extern atomic_t fscache_n_attr_changed_calls;
62646+extern atomic_unchecked_t fscache_n_attr_changed;
62647+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62648+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62649+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62650+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62651
62652-extern atomic_t fscache_n_allocs;
62653-extern atomic_t fscache_n_allocs_ok;
62654-extern atomic_t fscache_n_allocs_wait;
62655-extern atomic_t fscache_n_allocs_nobufs;
62656-extern atomic_t fscache_n_allocs_intr;
62657-extern atomic_t fscache_n_allocs_object_dead;
62658-extern atomic_t fscache_n_alloc_ops;
62659-extern atomic_t fscache_n_alloc_op_waits;
62660+extern atomic_unchecked_t fscache_n_allocs;
62661+extern atomic_unchecked_t fscache_n_allocs_ok;
62662+extern atomic_unchecked_t fscache_n_allocs_wait;
62663+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62664+extern atomic_unchecked_t fscache_n_allocs_intr;
62665+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62666+extern atomic_unchecked_t fscache_n_alloc_ops;
62667+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62668
62669-extern atomic_t fscache_n_retrievals;
62670-extern atomic_t fscache_n_retrievals_ok;
62671-extern atomic_t fscache_n_retrievals_wait;
62672-extern atomic_t fscache_n_retrievals_nodata;
62673-extern atomic_t fscache_n_retrievals_nobufs;
62674-extern atomic_t fscache_n_retrievals_intr;
62675-extern atomic_t fscache_n_retrievals_nomem;
62676-extern atomic_t fscache_n_retrievals_object_dead;
62677-extern atomic_t fscache_n_retrieval_ops;
62678-extern atomic_t fscache_n_retrieval_op_waits;
62679+extern atomic_unchecked_t fscache_n_retrievals;
62680+extern atomic_unchecked_t fscache_n_retrievals_ok;
62681+extern atomic_unchecked_t fscache_n_retrievals_wait;
62682+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62683+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62684+extern atomic_unchecked_t fscache_n_retrievals_intr;
62685+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62686+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62687+extern atomic_unchecked_t fscache_n_retrieval_ops;
62688+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62689
62690-extern atomic_t fscache_n_stores;
62691-extern atomic_t fscache_n_stores_ok;
62692-extern atomic_t fscache_n_stores_again;
62693-extern atomic_t fscache_n_stores_nobufs;
62694-extern atomic_t fscache_n_stores_oom;
62695-extern atomic_t fscache_n_store_ops;
62696-extern atomic_t fscache_n_store_calls;
62697-extern atomic_t fscache_n_store_pages;
62698-extern atomic_t fscache_n_store_radix_deletes;
62699-extern atomic_t fscache_n_store_pages_over_limit;
62700+extern atomic_unchecked_t fscache_n_stores;
62701+extern atomic_unchecked_t fscache_n_stores_ok;
62702+extern atomic_unchecked_t fscache_n_stores_again;
62703+extern atomic_unchecked_t fscache_n_stores_nobufs;
62704+extern atomic_unchecked_t fscache_n_stores_oom;
62705+extern atomic_unchecked_t fscache_n_store_ops;
62706+extern atomic_unchecked_t fscache_n_store_calls;
62707+extern atomic_unchecked_t fscache_n_store_pages;
62708+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62709+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62710
62711-extern atomic_t fscache_n_store_vmscan_not_storing;
62712-extern atomic_t fscache_n_store_vmscan_gone;
62713-extern atomic_t fscache_n_store_vmscan_busy;
62714-extern atomic_t fscache_n_store_vmscan_cancelled;
62715-extern atomic_t fscache_n_store_vmscan_wait;
62716+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62717+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62718+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62719+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62720+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62721
62722-extern atomic_t fscache_n_marks;
62723-extern atomic_t fscache_n_uncaches;
62724+extern atomic_unchecked_t fscache_n_marks;
62725+extern atomic_unchecked_t fscache_n_uncaches;
62726
62727-extern atomic_t fscache_n_acquires;
62728-extern atomic_t fscache_n_acquires_null;
62729-extern atomic_t fscache_n_acquires_no_cache;
62730-extern atomic_t fscache_n_acquires_ok;
62731-extern atomic_t fscache_n_acquires_nobufs;
62732-extern atomic_t fscache_n_acquires_oom;
62733+extern atomic_unchecked_t fscache_n_acquires;
62734+extern atomic_unchecked_t fscache_n_acquires_null;
62735+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62736+extern atomic_unchecked_t fscache_n_acquires_ok;
62737+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62738+extern atomic_unchecked_t fscache_n_acquires_oom;
62739
62740-extern atomic_t fscache_n_invalidates;
62741-extern atomic_t fscache_n_invalidates_run;
62742+extern atomic_unchecked_t fscache_n_invalidates;
62743+extern atomic_unchecked_t fscache_n_invalidates_run;
62744
62745-extern atomic_t fscache_n_updates;
62746-extern atomic_t fscache_n_updates_null;
62747-extern atomic_t fscache_n_updates_run;
62748+extern atomic_unchecked_t fscache_n_updates;
62749+extern atomic_unchecked_t fscache_n_updates_null;
62750+extern atomic_unchecked_t fscache_n_updates_run;
62751
62752-extern atomic_t fscache_n_relinquishes;
62753-extern atomic_t fscache_n_relinquishes_null;
62754-extern atomic_t fscache_n_relinquishes_waitcrt;
62755-extern atomic_t fscache_n_relinquishes_retire;
62756+extern atomic_unchecked_t fscache_n_relinquishes;
62757+extern atomic_unchecked_t fscache_n_relinquishes_null;
62758+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62759+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62760
62761-extern atomic_t fscache_n_cookie_index;
62762-extern atomic_t fscache_n_cookie_data;
62763-extern atomic_t fscache_n_cookie_special;
62764+extern atomic_unchecked_t fscache_n_cookie_index;
62765+extern atomic_unchecked_t fscache_n_cookie_data;
62766+extern atomic_unchecked_t fscache_n_cookie_special;
62767
62768-extern atomic_t fscache_n_object_alloc;
62769-extern atomic_t fscache_n_object_no_alloc;
62770-extern atomic_t fscache_n_object_lookups;
62771-extern atomic_t fscache_n_object_lookups_negative;
62772-extern atomic_t fscache_n_object_lookups_positive;
62773-extern atomic_t fscache_n_object_lookups_timed_out;
62774-extern atomic_t fscache_n_object_created;
62775-extern atomic_t fscache_n_object_avail;
62776-extern atomic_t fscache_n_object_dead;
62777+extern atomic_unchecked_t fscache_n_object_alloc;
62778+extern atomic_unchecked_t fscache_n_object_no_alloc;
62779+extern atomic_unchecked_t fscache_n_object_lookups;
62780+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62781+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62782+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62783+extern atomic_unchecked_t fscache_n_object_created;
62784+extern atomic_unchecked_t fscache_n_object_avail;
62785+extern atomic_unchecked_t fscache_n_object_dead;
62786
62787-extern atomic_t fscache_n_checkaux_none;
62788-extern atomic_t fscache_n_checkaux_okay;
62789-extern atomic_t fscache_n_checkaux_update;
62790-extern atomic_t fscache_n_checkaux_obsolete;
62791+extern atomic_unchecked_t fscache_n_checkaux_none;
62792+extern atomic_unchecked_t fscache_n_checkaux_okay;
62793+extern atomic_unchecked_t fscache_n_checkaux_update;
62794+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62795
62796 extern atomic_t fscache_n_cop_alloc_object;
62797 extern atomic_t fscache_n_cop_lookup_object;
62798@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62799 atomic_inc(stat);
62800 }
62801
62802+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62803+{
62804+ atomic_inc_unchecked(stat);
62805+}
62806+
62807 static inline void fscache_stat_d(atomic_t *stat)
62808 {
62809 atomic_dec(stat);
62810@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62811
62812 #define __fscache_stat(stat) (NULL)
62813 #define fscache_stat(stat) do {} while (0)
62814+#define fscache_stat_unchecked(stat) do {} while (0)
62815 #define fscache_stat_d(stat) do {} while (0)
62816 #endif
62817
62818diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62819index da032da..0076ce7 100644
62820--- a/fs/fscache/object.c
62821+++ b/fs/fscache/object.c
62822@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62823 _debug("LOOKUP \"%s\" in \"%s\"",
62824 cookie->def->name, object->cache->tag->name);
62825
62826- fscache_stat(&fscache_n_object_lookups);
62827+ fscache_stat_unchecked(&fscache_n_object_lookups);
62828 fscache_stat(&fscache_n_cop_lookup_object);
62829 ret = object->cache->ops->lookup_object(object);
62830 fscache_stat_d(&fscache_n_cop_lookup_object);
62831@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62832 if (ret == -ETIMEDOUT) {
62833 /* probably stuck behind another object, so move this one to
62834 * the back of the queue */
62835- fscache_stat(&fscache_n_object_lookups_timed_out);
62836+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62837 _leave(" [timeout]");
62838 return NO_TRANSIT;
62839 }
62840@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62841 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62842
62843 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62844- fscache_stat(&fscache_n_object_lookups_negative);
62845+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62846
62847 /* Allow write requests to begin stacking up and read requests to begin
62848 * returning ENODATA.
62849@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62850 /* if we were still looking up, then we must have a positive lookup
62851 * result, in which case there may be data available */
62852 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62853- fscache_stat(&fscache_n_object_lookups_positive);
62854+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62855
62856 /* We do (presumably) have data */
62857 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62858@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62859 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62860 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62861 } else {
62862- fscache_stat(&fscache_n_object_created);
62863+ fscache_stat_unchecked(&fscache_n_object_created);
62864 }
62865
62866 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62867@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62868 fscache_stat_d(&fscache_n_cop_lookup_complete);
62869
62870 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62871- fscache_stat(&fscache_n_object_avail);
62872+ fscache_stat_unchecked(&fscache_n_object_avail);
62873
62874 _leave("");
62875 return transit_to(JUMPSTART_DEPS);
62876@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62877
62878 /* this just shifts the object release to the work processor */
62879 fscache_put_object(object);
62880- fscache_stat(&fscache_n_object_dead);
62881+ fscache_stat_unchecked(&fscache_n_object_dead);
62882
62883 _leave("");
62884 return transit_to(OBJECT_DEAD);
62885@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62886 enum fscache_checkaux result;
62887
62888 if (!object->cookie->def->check_aux) {
62889- fscache_stat(&fscache_n_checkaux_none);
62890+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62891 return FSCACHE_CHECKAUX_OKAY;
62892 }
62893
62894@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62895 switch (result) {
62896 /* entry okay as is */
62897 case FSCACHE_CHECKAUX_OKAY:
62898- fscache_stat(&fscache_n_checkaux_okay);
62899+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62900 break;
62901
62902 /* entry requires update */
62903 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62904- fscache_stat(&fscache_n_checkaux_update);
62905+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62906 break;
62907
62908 /* entry requires deletion */
62909 case FSCACHE_CHECKAUX_OBSOLETE:
62910- fscache_stat(&fscache_n_checkaux_obsolete);
62911+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62912 break;
62913
62914 default:
62915@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62916 {
62917 const struct fscache_state *s;
62918
62919- fscache_stat(&fscache_n_invalidates_run);
62920+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62921 fscache_stat(&fscache_n_cop_invalidate_object);
62922 s = _fscache_invalidate_object(object, event);
62923 fscache_stat_d(&fscache_n_cop_invalidate_object);
62924@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62925 {
62926 _enter("{OBJ%x},%d", object->debug_id, event);
62927
62928- fscache_stat(&fscache_n_updates_run);
62929+ fscache_stat_unchecked(&fscache_n_updates_run);
62930 fscache_stat(&fscache_n_cop_update_object);
62931 object->cache->ops->update_object(object);
62932 fscache_stat_d(&fscache_n_cop_update_object);
62933diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62934index e7b87a0..a85d47a 100644
62935--- a/fs/fscache/operation.c
62936+++ b/fs/fscache/operation.c
62937@@ -17,7 +17,7 @@
62938 #include <linux/slab.h>
62939 #include "internal.h"
62940
62941-atomic_t fscache_op_debug_id;
62942+atomic_unchecked_t fscache_op_debug_id;
62943 EXPORT_SYMBOL(fscache_op_debug_id);
62944
62945 /**
62946@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62947 ASSERTCMP(atomic_read(&op->usage), >, 0);
62948 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62949
62950- fscache_stat(&fscache_n_op_enqueue);
62951+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62952 switch (op->flags & FSCACHE_OP_TYPE) {
62953 case FSCACHE_OP_ASYNC:
62954 _debug("queue async");
62955@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62956 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62957 if (op->processor)
62958 fscache_enqueue_operation(op);
62959- fscache_stat(&fscache_n_op_run);
62960+ fscache_stat_unchecked(&fscache_n_op_run);
62961 }
62962
62963 /*
62964@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62965 if (object->n_in_progress > 0) {
62966 atomic_inc(&op->usage);
62967 list_add_tail(&op->pend_link, &object->pending_ops);
62968- fscache_stat(&fscache_n_op_pend);
62969+ fscache_stat_unchecked(&fscache_n_op_pend);
62970 } else if (!list_empty(&object->pending_ops)) {
62971 atomic_inc(&op->usage);
62972 list_add_tail(&op->pend_link, &object->pending_ops);
62973- fscache_stat(&fscache_n_op_pend);
62974+ fscache_stat_unchecked(&fscache_n_op_pend);
62975 fscache_start_operations(object);
62976 } else {
62977 ASSERTCMP(object->n_in_progress, ==, 0);
62978@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62979 object->n_exclusive++; /* reads and writes must wait */
62980 atomic_inc(&op->usage);
62981 list_add_tail(&op->pend_link, &object->pending_ops);
62982- fscache_stat(&fscache_n_op_pend);
62983+ fscache_stat_unchecked(&fscache_n_op_pend);
62984 ret = 0;
62985 } else {
62986 /* If we're in any other state, there must have been an I/O
62987@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62988 if (object->n_exclusive > 0) {
62989 atomic_inc(&op->usage);
62990 list_add_tail(&op->pend_link, &object->pending_ops);
62991- fscache_stat(&fscache_n_op_pend);
62992+ fscache_stat_unchecked(&fscache_n_op_pend);
62993 } else if (!list_empty(&object->pending_ops)) {
62994 atomic_inc(&op->usage);
62995 list_add_tail(&op->pend_link, &object->pending_ops);
62996- fscache_stat(&fscache_n_op_pend);
62997+ fscache_stat_unchecked(&fscache_n_op_pend);
62998 fscache_start_operations(object);
62999 } else {
63000 ASSERTCMP(object->n_exclusive, ==, 0);
63001@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63002 object->n_ops++;
63003 atomic_inc(&op->usage);
63004 list_add_tail(&op->pend_link, &object->pending_ops);
63005- fscache_stat(&fscache_n_op_pend);
63006+ fscache_stat_unchecked(&fscache_n_op_pend);
63007 ret = 0;
63008 } else if (fscache_object_is_dying(object)) {
63009- fscache_stat(&fscache_n_op_rejected);
63010+ fscache_stat_unchecked(&fscache_n_op_rejected);
63011 op->state = FSCACHE_OP_ST_CANCELLED;
63012 ret = -ENOBUFS;
63013 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63014@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63015 ret = -EBUSY;
63016 if (op->state == FSCACHE_OP_ST_PENDING) {
63017 ASSERT(!list_empty(&op->pend_link));
63018- fscache_stat(&fscache_n_op_cancelled);
63019+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63020 list_del_init(&op->pend_link);
63021 if (do_cancel)
63022 do_cancel(op);
63023@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63024 while (!list_empty(&object->pending_ops)) {
63025 op = list_entry(object->pending_ops.next,
63026 struct fscache_operation, pend_link);
63027- fscache_stat(&fscache_n_op_cancelled);
63028+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63029 list_del_init(&op->pend_link);
63030
63031 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63032@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63033 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63034 op->state = FSCACHE_OP_ST_DEAD;
63035
63036- fscache_stat(&fscache_n_op_release);
63037+ fscache_stat_unchecked(&fscache_n_op_release);
63038
63039 if (op->release) {
63040 op->release(op);
63041@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63042 * lock, and defer it otherwise */
63043 if (!spin_trylock(&object->lock)) {
63044 _debug("defer put");
63045- fscache_stat(&fscache_n_op_deferred_release);
63046+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63047
63048 cache = object->cache;
63049 spin_lock(&cache->op_gc_list_lock);
63050@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63051
63052 _debug("GC DEFERRED REL OBJ%x OP%x",
63053 object->debug_id, op->debug_id);
63054- fscache_stat(&fscache_n_op_gc);
63055+ fscache_stat_unchecked(&fscache_n_op_gc);
63056
63057 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63058 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63059diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63060index de33b3f..8be4d29 100644
63061--- a/fs/fscache/page.c
63062+++ b/fs/fscache/page.c
63063@@ -74,7 +74,7 @@ try_again:
63064 val = radix_tree_lookup(&cookie->stores, page->index);
63065 if (!val) {
63066 rcu_read_unlock();
63067- fscache_stat(&fscache_n_store_vmscan_not_storing);
63068+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63069 __fscache_uncache_page(cookie, page);
63070 return true;
63071 }
63072@@ -104,11 +104,11 @@ try_again:
63073 spin_unlock(&cookie->stores_lock);
63074
63075 if (xpage) {
63076- fscache_stat(&fscache_n_store_vmscan_cancelled);
63077- fscache_stat(&fscache_n_store_radix_deletes);
63078+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63079+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63080 ASSERTCMP(xpage, ==, page);
63081 } else {
63082- fscache_stat(&fscache_n_store_vmscan_gone);
63083+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63084 }
63085
63086 wake_up_bit(&cookie->flags, 0);
63087@@ -123,11 +123,11 @@ page_busy:
63088 * sleeping on memory allocation, so we may need to impose a timeout
63089 * too. */
63090 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63091- fscache_stat(&fscache_n_store_vmscan_busy);
63092+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63093 return false;
63094 }
63095
63096- fscache_stat(&fscache_n_store_vmscan_wait);
63097+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63098 if (!release_page_wait_timeout(cookie, page))
63099 _debug("fscache writeout timeout page: %p{%lx}",
63100 page, page->index);
63101@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63102 FSCACHE_COOKIE_STORING_TAG);
63103 if (!radix_tree_tag_get(&cookie->stores, page->index,
63104 FSCACHE_COOKIE_PENDING_TAG)) {
63105- fscache_stat(&fscache_n_store_radix_deletes);
63106+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63107 xpage = radix_tree_delete(&cookie->stores, page->index);
63108 }
63109 spin_unlock(&cookie->stores_lock);
63110@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63111
63112 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63113
63114- fscache_stat(&fscache_n_attr_changed_calls);
63115+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63116
63117 if (fscache_object_is_active(object)) {
63118 fscache_stat(&fscache_n_cop_attr_changed);
63119@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63120
63121 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63122
63123- fscache_stat(&fscache_n_attr_changed);
63124+ fscache_stat_unchecked(&fscache_n_attr_changed);
63125
63126 op = kzalloc(sizeof(*op), GFP_KERNEL);
63127 if (!op) {
63128- fscache_stat(&fscache_n_attr_changed_nomem);
63129+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63130 _leave(" = -ENOMEM");
63131 return -ENOMEM;
63132 }
63133@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63134 if (fscache_submit_exclusive_op(object, op) < 0)
63135 goto nobufs_dec;
63136 spin_unlock(&cookie->lock);
63137- fscache_stat(&fscache_n_attr_changed_ok);
63138+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63139 fscache_put_operation(op);
63140 _leave(" = 0");
63141 return 0;
63142@@ -242,7 +242,7 @@ nobufs:
63143 kfree(op);
63144 if (wake_cookie)
63145 __fscache_wake_unused_cookie(cookie);
63146- fscache_stat(&fscache_n_attr_changed_nobufs);
63147+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63148 _leave(" = %d", -ENOBUFS);
63149 return -ENOBUFS;
63150 }
63151@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63152 /* allocate a retrieval operation and attempt to submit it */
63153 op = kzalloc(sizeof(*op), GFP_NOIO);
63154 if (!op) {
63155- fscache_stat(&fscache_n_retrievals_nomem);
63156+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63157 return NULL;
63158 }
63159
63160@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63161 return 0;
63162 }
63163
63164- fscache_stat(&fscache_n_retrievals_wait);
63165+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63166
63167 jif = jiffies;
63168 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63169 TASK_INTERRUPTIBLE) != 0) {
63170- fscache_stat(&fscache_n_retrievals_intr);
63171+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63172 _leave(" = -ERESTARTSYS");
63173 return -ERESTARTSYS;
63174 }
63175@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63176 */
63177 int fscache_wait_for_operation_activation(struct fscache_object *object,
63178 struct fscache_operation *op,
63179- atomic_t *stat_op_waits,
63180- atomic_t *stat_object_dead,
63181+ atomic_unchecked_t *stat_op_waits,
63182+ atomic_unchecked_t *stat_object_dead,
63183 void (*do_cancel)(struct fscache_operation *))
63184 {
63185 int ret;
63186@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63187
63188 _debug(">>> WT");
63189 if (stat_op_waits)
63190- fscache_stat(stat_op_waits);
63191+ fscache_stat_unchecked(stat_op_waits);
63192 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63193 TASK_INTERRUPTIBLE) != 0) {
63194 ret = fscache_cancel_op(op, do_cancel);
63195@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63196 check_if_dead:
63197 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63198 if (stat_object_dead)
63199- fscache_stat(stat_object_dead);
63200+ fscache_stat_unchecked(stat_object_dead);
63201 _leave(" = -ENOBUFS [cancelled]");
63202 return -ENOBUFS;
63203 }
63204@@ -381,7 +381,7 @@ check_if_dead:
63205 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63206 fscache_cancel_op(op, do_cancel);
63207 if (stat_object_dead)
63208- fscache_stat(stat_object_dead);
63209+ fscache_stat_unchecked(stat_object_dead);
63210 return -ENOBUFS;
63211 }
63212 return 0;
63213@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63214
63215 _enter("%p,%p,,,", cookie, page);
63216
63217- fscache_stat(&fscache_n_retrievals);
63218+ fscache_stat_unchecked(&fscache_n_retrievals);
63219
63220 if (hlist_empty(&cookie->backing_objects))
63221 goto nobufs;
63222@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63223 goto nobufs_unlock_dec;
63224 spin_unlock(&cookie->lock);
63225
63226- fscache_stat(&fscache_n_retrieval_ops);
63227+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63228
63229 /* pin the netfs read context in case we need to do the actual netfs
63230 * read because we've encountered a cache read failure */
63231@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63232
63233 error:
63234 if (ret == -ENOMEM)
63235- fscache_stat(&fscache_n_retrievals_nomem);
63236+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63237 else if (ret == -ERESTARTSYS)
63238- fscache_stat(&fscache_n_retrievals_intr);
63239+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63240 else if (ret == -ENODATA)
63241- fscache_stat(&fscache_n_retrievals_nodata);
63242+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63243 else if (ret < 0)
63244- fscache_stat(&fscache_n_retrievals_nobufs);
63245+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63246 else
63247- fscache_stat(&fscache_n_retrievals_ok);
63248+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63249
63250 fscache_put_retrieval(op);
63251 _leave(" = %d", ret);
63252@@ -505,7 +505,7 @@ nobufs_unlock:
63253 __fscache_wake_unused_cookie(cookie);
63254 kfree(op);
63255 nobufs:
63256- fscache_stat(&fscache_n_retrievals_nobufs);
63257+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63258 _leave(" = -ENOBUFS");
63259 return -ENOBUFS;
63260 }
63261@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63262
63263 _enter("%p,,%d,,,", cookie, *nr_pages);
63264
63265- fscache_stat(&fscache_n_retrievals);
63266+ fscache_stat_unchecked(&fscache_n_retrievals);
63267
63268 if (hlist_empty(&cookie->backing_objects))
63269 goto nobufs;
63270@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63271 goto nobufs_unlock_dec;
63272 spin_unlock(&cookie->lock);
63273
63274- fscache_stat(&fscache_n_retrieval_ops);
63275+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63276
63277 /* pin the netfs read context in case we need to do the actual netfs
63278 * read because we've encountered a cache read failure */
63279@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63280
63281 error:
63282 if (ret == -ENOMEM)
63283- fscache_stat(&fscache_n_retrievals_nomem);
63284+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63285 else if (ret == -ERESTARTSYS)
63286- fscache_stat(&fscache_n_retrievals_intr);
63287+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63288 else if (ret == -ENODATA)
63289- fscache_stat(&fscache_n_retrievals_nodata);
63290+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63291 else if (ret < 0)
63292- fscache_stat(&fscache_n_retrievals_nobufs);
63293+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63294 else
63295- fscache_stat(&fscache_n_retrievals_ok);
63296+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63297
63298 fscache_put_retrieval(op);
63299 _leave(" = %d", ret);
63300@@ -636,7 +636,7 @@ nobufs_unlock:
63301 if (wake_cookie)
63302 __fscache_wake_unused_cookie(cookie);
63303 nobufs:
63304- fscache_stat(&fscache_n_retrievals_nobufs);
63305+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63306 _leave(" = -ENOBUFS");
63307 return -ENOBUFS;
63308 }
63309@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63310
63311 _enter("%p,%p,,,", cookie, page);
63312
63313- fscache_stat(&fscache_n_allocs);
63314+ fscache_stat_unchecked(&fscache_n_allocs);
63315
63316 if (hlist_empty(&cookie->backing_objects))
63317 goto nobufs;
63318@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63319 goto nobufs_unlock_dec;
63320 spin_unlock(&cookie->lock);
63321
63322- fscache_stat(&fscache_n_alloc_ops);
63323+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63324
63325 ret = fscache_wait_for_operation_activation(
63326 object, &op->op,
63327@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63328
63329 error:
63330 if (ret == -ERESTARTSYS)
63331- fscache_stat(&fscache_n_allocs_intr);
63332+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63333 else if (ret < 0)
63334- fscache_stat(&fscache_n_allocs_nobufs);
63335+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63336 else
63337- fscache_stat(&fscache_n_allocs_ok);
63338+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63339
63340 fscache_put_retrieval(op);
63341 _leave(" = %d", ret);
63342@@ -730,7 +730,7 @@ nobufs_unlock:
63343 if (wake_cookie)
63344 __fscache_wake_unused_cookie(cookie);
63345 nobufs:
63346- fscache_stat(&fscache_n_allocs_nobufs);
63347+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63348 _leave(" = -ENOBUFS");
63349 return -ENOBUFS;
63350 }
63351@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63352
63353 spin_lock(&cookie->stores_lock);
63354
63355- fscache_stat(&fscache_n_store_calls);
63356+ fscache_stat_unchecked(&fscache_n_store_calls);
63357
63358 /* find a page to store */
63359 page = NULL;
63360@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63361 page = results[0];
63362 _debug("gang %d [%lx]", n, page->index);
63363 if (page->index > op->store_limit) {
63364- fscache_stat(&fscache_n_store_pages_over_limit);
63365+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63366 goto superseded;
63367 }
63368
63369@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63370 spin_unlock(&cookie->stores_lock);
63371 spin_unlock(&object->lock);
63372
63373- fscache_stat(&fscache_n_store_pages);
63374+ fscache_stat_unchecked(&fscache_n_store_pages);
63375 fscache_stat(&fscache_n_cop_write_page);
63376 ret = object->cache->ops->write_page(op, page);
63377 fscache_stat_d(&fscache_n_cop_write_page);
63378@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63379 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63380 ASSERT(PageFsCache(page));
63381
63382- fscache_stat(&fscache_n_stores);
63383+ fscache_stat_unchecked(&fscache_n_stores);
63384
63385 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63386 _leave(" = -ENOBUFS [invalidating]");
63387@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63388 spin_unlock(&cookie->stores_lock);
63389 spin_unlock(&object->lock);
63390
63391- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63392+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63393 op->store_limit = object->store_limit;
63394
63395 __fscache_use_cookie(cookie);
63396@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63397
63398 spin_unlock(&cookie->lock);
63399 radix_tree_preload_end();
63400- fscache_stat(&fscache_n_store_ops);
63401- fscache_stat(&fscache_n_stores_ok);
63402+ fscache_stat_unchecked(&fscache_n_store_ops);
63403+ fscache_stat_unchecked(&fscache_n_stores_ok);
63404
63405 /* the work queue now carries its own ref on the object */
63406 fscache_put_operation(&op->op);
63407@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63408 return 0;
63409
63410 already_queued:
63411- fscache_stat(&fscache_n_stores_again);
63412+ fscache_stat_unchecked(&fscache_n_stores_again);
63413 already_pending:
63414 spin_unlock(&cookie->stores_lock);
63415 spin_unlock(&object->lock);
63416 spin_unlock(&cookie->lock);
63417 radix_tree_preload_end();
63418 kfree(op);
63419- fscache_stat(&fscache_n_stores_ok);
63420+ fscache_stat_unchecked(&fscache_n_stores_ok);
63421 _leave(" = 0");
63422 return 0;
63423
63424@@ -1039,14 +1039,14 @@ nobufs:
63425 kfree(op);
63426 if (wake_cookie)
63427 __fscache_wake_unused_cookie(cookie);
63428- fscache_stat(&fscache_n_stores_nobufs);
63429+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63430 _leave(" = -ENOBUFS");
63431 return -ENOBUFS;
63432
63433 nomem_free:
63434 kfree(op);
63435 nomem:
63436- fscache_stat(&fscache_n_stores_oom);
63437+ fscache_stat_unchecked(&fscache_n_stores_oom);
63438 _leave(" = -ENOMEM");
63439 return -ENOMEM;
63440 }
63441@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63442 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63443 ASSERTCMP(page, !=, NULL);
63444
63445- fscache_stat(&fscache_n_uncaches);
63446+ fscache_stat_unchecked(&fscache_n_uncaches);
63447
63448 /* cache withdrawal may beat us to it */
63449 if (!PageFsCache(page))
63450@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63451 struct fscache_cookie *cookie = op->op.object->cookie;
63452
63453 #ifdef CONFIG_FSCACHE_STATS
63454- atomic_inc(&fscache_n_marks);
63455+ atomic_inc_unchecked(&fscache_n_marks);
63456 #endif
63457
63458 _debug("- mark %p{%lx}", page, page->index);
63459diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63460index 40d13c7..ddf52b9 100644
63461--- a/fs/fscache/stats.c
63462+++ b/fs/fscache/stats.c
63463@@ -18,99 +18,99 @@
63464 /*
63465 * operation counters
63466 */
63467-atomic_t fscache_n_op_pend;
63468-atomic_t fscache_n_op_run;
63469-atomic_t fscache_n_op_enqueue;
63470-atomic_t fscache_n_op_requeue;
63471-atomic_t fscache_n_op_deferred_release;
63472-atomic_t fscache_n_op_release;
63473-atomic_t fscache_n_op_gc;
63474-atomic_t fscache_n_op_cancelled;
63475-atomic_t fscache_n_op_rejected;
63476+atomic_unchecked_t fscache_n_op_pend;
63477+atomic_unchecked_t fscache_n_op_run;
63478+atomic_unchecked_t fscache_n_op_enqueue;
63479+atomic_unchecked_t fscache_n_op_requeue;
63480+atomic_unchecked_t fscache_n_op_deferred_release;
63481+atomic_unchecked_t fscache_n_op_release;
63482+atomic_unchecked_t fscache_n_op_gc;
63483+atomic_unchecked_t fscache_n_op_cancelled;
63484+atomic_unchecked_t fscache_n_op_rejected;
63485
63486-atomic_t fscache_n_attr_changed;
63487-atomic_t fscache_n_attr_changed_ok;
63488-atomic_t fscache_n_attr_changed_nobufs;
63489-atomic_t fscache_n_attr_changed_nomem;
63490-atomic_t fscache_n_attr_changed_calls;
63491+atomic_unchecked_t fscache_n_attr_changed;
63492+atomic_unchecked_t fscache_n_attr_changed_ok;
63493+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63494+atomic_unchecked_t fscache_n_attr_changed_nomem;
63495+atomic_unchecked_t fscache_n_attr_changed_calls;
63496
63497-atomic_t fscache_n_allocs;
63498-atomic_t fscache_n_allocs_ok;
63499-atomic_t fscache_n_allocs_wait;
63500-atomic_t fscache_n_allocs_nobufs;
63501-atomic_t fscache_n_allocs_intr;
63502-atomic_t fscache_n_allocs_object_dead;
63503-atomic_t fscache_n_alloc_ops;
63504-atomic_t fscache_n_alloc_op_waits;
63505+atomic_unchecked_t fscache_n_allocs;
63506+atomic_unchecked_t fscache_n_allocs_ok;
63507+atomic_unchecked_t fscache_n_allocs_wait;
63508+atomic_unchecked_t fscache_n_allocs_nobufs;
63509+atomic_unchecked_t fscache_n_allocs_intr;
63510+atomic_unchecked_t fscache_n_allocs_object_dead;
63511+atomic_unchecked_t fscache_n_alloc_ops;
63512+atomic_unchecked_t fscache_n_alloc_op_waits;
63513
63514-atomic_t fscache_n_retrievals;
63515-atomic_t fscache_n_retrievals_ok;
63516-atomic_t fscache_n_retrievals_wait;
63517-atomic_t fscache_n_retrievals_nodata;
63518-atomic_t fscache_n_retrievals_nobufs;
63519-atomic_t fscache_n_retrievals_intr;
63520-atomic_t fscache_n_retrievals_nomem;
63521-atomic_t fscache_n_retrievals_object_dead;
63522-atomic_t fscache_n_retrieval_ops;
63523-atomic_t fscache_n_retrieval_op_waits;
63524+atomic_unchecked_t fscache_n_retrievals;
63525+atomic_unchecked_t fscache_n_retrievals_ok;
63526+atomic_unchecked_t fscache_n_retrievals_wait;
63527+atomic_unchecked_t fscache_n_retrievals_nodata;
63528+atomic_unchecked_t fscache_n_retrievals_nobufs;
63529+atomic_unchecked_t fscache_n_retrievals_intr;
63530+atomic_unchecked_t fscache_n_retrievals_nomem;
63531+atomic_unchecked_t fscache_n_retrievals_object_dead;
63532+atomic_unchecked_t fscache_n_retrieval_ops;
63533+atomic_unchecked_t fscache_n_retrieval_op_waits;
63534
63535-atomic_t fscache_n_stores;
63536-atomic_t fscache_n_stores_ok;
63537-atomic_t fscache_n_stores_again;
63538-atomic_t fscache_n_stores_nobufs;
63539-atomic_t fscache_n_stores_oom;
63540-atomic_t fscache_n_store_ops;
63541-atomic_t fscache_n_store_calls;
63542-atomic_t fscache_n_store_pages;
63543-atomic_t fscache_n_store_radix_deletes;
63544-atomic_t fscache_n_store_pages_over_limit;
63545+atomic_unchecked_t fscache_n_stores;
63546+atomic_unchecked_t fscache_n_stores_ok;
63547+atomic_unchecked_t fscache_n_stores_again;
63548+atomic_unchecked_t fscache_n_stores_nobufs;
63549+atomic_unchecked_t fscache_n_stores_oom;
63550+atomic_unchecked_t fscache_n_store_ops;
63551+atomic_unchecked_t fscache_n_store_calls;
63552+atomic_unchecked_t fscache_n_store_pages;
63553+atomic_unchecked_t fscache_n_store_radix_deletes;
63554+atomic_unchecked_t fscache_n_store_pages_over_limit;
63555
63556-atomic_t fscache_n_store_vmscan_not_storing;
63557-atomic_t fscache_n_store_vmscan_gone;
63558-atomic_t fscache_n_store_vmscan_busy;
63559-atomic_t fscache_n_store_vmscan_cancelled;
63560-atomic_t fscache_n_store_vmscan_wait;
63561+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63562+atomic_unchecked_t fscache_n_store_vmscan_gone;
63563+atomic_unchecked_t fscache_n_store_vmscan_busy;
63564+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63565+atomic_unchecked_t fscache_n_store_vmscan_wait;
63566
63567-atomic_t fscache_n_marks;
63568-atomic_t fscache_n_uncaches;
63569+atomic_unchecked_t fscache_n_marks;
63570+atomic_unchecked_t fscache_n_uncaches;
63571
63572-atomic_t fscache_n_acquires;
63573-atomic_t fscache_n_acquires_null;
63574-atomic_t fscache_n_acquires_no_cache;
63575-atomic_t fscache_n_acquires_ok;
63576-atomic_t fscache_n_acquires_nobufs;
63577-atomic_t fscache_n_acquires_oom;
63578+atomic_unchecked_t fscache_n_acquires;
63579+atomic_unchecked_t fscache_n_acquires_null;
63580+atomic_unchecked_t fscache_n_acquires_no_cache;
63581+atomic_unchecked_t fscache_n_acquires_ok;
63582+atomic_unchecked_t fscache_n_acquires_nobufs;
63583+atomic_unchecked_t fscache_n_acquires_oom;
63584
63585-atomic_t fscache_n_invalidates;
63586-atomic_t fscache_n_invalidates_run;
63587+atomic_unchecked_t fscache_n_invalidates;
63588+atomic_unchecked_t fscache_n_invalidates_run;
63589
63590-atomic_t fscache_n_updates;
63591-atomic_t fscache_n_updates_null;
63592-atomic_t fscache_n_updates_run;
63593+atomic_unchecked_t fscache_n_updates;
63594+atomic_unchecked_t fscache_n_updates_null;
63595+atomic_unchecked_t fscache_n_updates_run;
63596
63597-atomic_t fscache_n_relinquishes;
63598-atomic_t fscache_n_relinquishes_null;
63599-atomic_t fscache_n_relinquishes_waitcrt;
63600-atomic_t fscache_n_relinquishes_retire;
63601+atomic_unchecked_t fscache_n_relinquishes;
63602+atomic_unchecked_t fscache_n_relinquishes_null;
63603+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63604+atomic_unchecked_t fscache_n_relinquishes_retire;
63605
63606-atomic_t fscache_n_cookie_index;
63607-atomic_t fscache_n_cookie_data;
63608-atomic_t fscache_n_cookie_special;
63609+atomic_unchecked_t fscache_n_cookie_index;
63610+atomic_unchecked_t fscache_n_cookie_data;
63611+atomic_unchecked_t fscache_n_cookie_special;
63612
63613-atomic_t fscache_n_object_alloc;
63614-atomic_t fscache_n_object_no_alloc;
63615-atomic_t fscache_n_object_lookups;
63616-atomic_t fscache_n_object_lookups_negative;
63617-atomic_t fscache_n_object_lookups_positive;
63618-atomic_t fscache_n_object_lookups_timed_out;
63619-atomic_t fscache_n_object_created;
63620-atomic_t fscache_n_object_avail;
63621-atomic_t fscache_n_object_dead;
63622+atomic_unchecked_t fscache_n_object_alloc;
63623+atomic_unchecked_t fscache_n_object_no_alloc;
63624+atomic_unchecked_t fscache_n_object_lookups;
63625+atomic_unchecked_t fscache_n_object_lookups_negative;
63626+atomic_unchecked_t fscache_n_object_lookups_positive;
63627+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63628+atomic_unchecked_t fscache_n_object_created;
63629+atomic_unchecked_t fscache_n_object_avail;
63630+atomic_unchecked_t fscache_n_object_dead;
63631
63632-atomic_t fscache_n_checkaux_none;
63633-atomic_t fscache_n_checkaux_okay;
63634-atomic_t fscache_n_checkaux_update;
63635-atomic_t fscache_n_checkaux_obsolete;
63636+atomic_unchecked_t fscache_n_checkaux_none;
63637+atomic_unchecked_t fscache_n_checkaux_okay;
63638+atomic_unchecked_t fscache_n_checkaux_update;
63639+atomic_unchecked_t fscache_n_checkaux_obsolete;
63640
63641 atomic_t fscache_n_cop_alloc_object;
63642 atomic_t fscache_n_cop_lookup_object;
63643@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63644 seq_puts(m, "FS-Cache statistics\n");
63645
63646 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63647- atomic_read(&fscache_n_cookie_index),
63648- atomic_read(&fscache_n_cookie_data),
63649- atomic_read(&fscache_n_cookie_special));
63650+ atomic_read_unchecked(&fscache_n_cookie_index),
63651+ atomic_read_unchecked(&fscache_n_cookie_data),
63652+ atomic_read_unchecked(&fscache_n_cookie_special));
63653
63654 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63655- atomic_read(&fscache_n_object_alloc),
63656- atomic_read(&fscache_n_object_no_alloc),
63657- atomic_read(&fscache_n_object_avail),
63658- atomic_read(&fscache_n_object_dead));
63659+ atomic_read_unchecked(&fscache_n_object_alloc),
63660+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63661+ atomic_read_unchecked(&fscache_n_object_avail),
63662+ atomic_read_unchecked(&fscache_n_object_dead));
63663 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63664- atomic_read(&fscache_n_checkaux_none),
63665- atomic_read(&fscache_n_checkaux_okay),
63666- atomic_read(&fscache_n_checkaux_update),
63667- atomic_read(&fscache_n_checkaux_obsolete));
63668+ atomic_read_unchecked(&fscache_n_checkaux_none),
63669+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63670+ atomic_read_unchecked(&fscache_n_checkaux_update),
63671+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63672
63673 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63674- atomic_read(&fscache_n_marks),
63675- atomic_read(&fscache_n_uncaches));
63676+ atomic_read_unchecked(&fscache_n_marks),
63677+ atomic_read_unchecked(&fscache_n_uncaches));
63678
63679 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63680 " oom=%u\n",
63681- atomic_read(&fscache_n_acquires),
63682- atomic_read(&fscache_n_acquires_null),
63683- atomic_read(&fscache_n_acquires_no_cache),
63684- atomic_read(&fscache_n_acquires_ok),
63685- atomic_read(&fscache_n_acquires_nobufs),
63686- atomic_read(&fscache_n_acquires_oom));
63687+ atomic_read_unchecked(&fscache_n_acquires),
63688+ atomic_read_unchecked(&fscache_n_acquires_null),
63689+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63690+ atomic_read_unchecked(&fscache_n_acquires_ok),
63691+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63692+ atomic_read_unchecked(&fscache_n_acquires_oom));
63693
63694 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63695- atomic_read(&fscache_n_object_lookups),
63696- atomic_read(&fscache_n_object_lookups_negative),
63697- atomic_read(&fscache_n_object_lookups_positive),
63698- atomic_read(&fscache_n_object_created),
63699- atomic_read(&fscache_n_object_lookups_timed_out));
63700+ atomic_read_unchecked(&fscache_n_object_lookups),
63701+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63702+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63703+ atomic_read_unchecked(&fscache_n_object_created),
63704+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63705
63706 seq_printf(m, "Invals : n=%u run=%u\n",
63707- atomic_read(&fscache_n_invalidates),
63708- atomic_read(&fscache_n_invalidates_run));
63709+ atomic_read_unchecked(&fscache_n_invalidates),
63710+ atomic_read_unchecked(&fscache_n_invalidates_run));
63711
63712 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63713- atomic_read(&fscache_n_updates),
63714- atomic_read(&fscache_n_updates_null),
63715- atomic_read(&fscache_n_updates_run));
63716+ atomic_read_unchecked(&fscache_n_updates),
63717+ atomic_read_unchecked(&fscache_n_updates_null),
63718+ atomic_read_unchecked(&fscache_n_updates_run));
63719
63720 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63721- atomic_read(&fscache_n_relinquishes),
63722- atomic_read(&fscache_n_relinquishes_null),
63723- atomic_read(&fscache_n_relinquishes_waitcrt),
63724- atomic_read(&fscache_n_relinquishes_retire));
63725+ atomic_read_unchecked(&fscache_n_relinquishes),
63726+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63727+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63728+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63729
63730 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63731- atomic_read(&fscache_n_attr_changed),
63732- atomic_read(&fscache_n_attr_changed_ok),
63733- atomic_read(&fscache_n_attr_changed_nobufs),
63734- atomic_read(&fscache_n_attr_changed_nomem),
63735- atomic_read(&fscache_n_attr_changed_calls));
63736+ atomic_read_unchecked(&fscache_n_attr_changed),
63737+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63738+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63739+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63740+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63741
63742 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63743- atomic_read(&fscache_n_allocs),
63744- atomic_read(&fscache_n_allocs_ok),
63745- atomic_read(&fscache_n_allocs_wait),
63746- atomic_read(&fscache_n_allocs_nobufs),
63747- atomic_read(&fscache_n_allocs_intr));
63748+ atomic_read_unchecked(&fscache_n_allocs),
63749+ atomic_read_unchecked(&fscache_n_allocs_ok),
63750+ atomic_read_unchecked(&fscache_n_allocs_wait),
63751+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63752+ atomic_read_unchecked(&fscache_n_allocs_intr));
63753 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63754- atomic_read(&fscache_n_alloc_ops),
63755- atomic_read(&fscache_n_alloc_op_waits),
63756- atomic_read(&fscache_n_allocs_object_dead));
63757+ atomic_read_unchecked(&fscache_n_alloc_ops),
63758+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63759+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63760
63761 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63762 " int=%u oom=%u\n",
63763- atomic_read(&fscache_n_retrievals),
63764- atomic_read(&fscache_n_retrievals_ok),
63765- atomic_read(&fscache_n_retrievals_wait),
63766- atomic_read(&fscache_n_retrievals_nodata),
63767- atomic_read(&fscache_n_retrievals_nobufs),
63768- atomic_read(&fscache_n_retrievals_intr),
63769- atomic_read(&fscache_n_retrievals_nomem));
63770+ atomic_read_unchecked(&fscache_n_retrievals),
63771+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63772+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63773+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63774+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63775+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63776+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63777 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63778- atomic_read(&fscache_n_retrieval_ops),
63779- atomic_read(&fscache_n_retrieval_op_waits),
63780- atomic_read(&fscache_n_retrievals_object_dead));
63781+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63782+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63783+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63784
63785 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63786- atomic_read(&fscache_n_stores),
63787- atomic_read(&fscache_n_stores_ok),
63788- atomic_read(&fscache_n_stores_again),
63789- atomic_read(&fscache_n_stores_nobufs),
63790- atomic_read(&fscache_n_stores_oom));
63791+ atomic_read_unchecked(&fscache_n_stores),
63792+ atomic_read_unchecked(&fscache_n_stores_ok),
63793+ atomic_read_unchecked(&fscache_n_stores_again),
63794+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63795+ atomic_read_unchecked(&fscache_n_stores_oom));
63796 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63797- atomic_read(&fscache_n_store_ops),
63798- atomic_read(&fscache_n_store_calls),
63799- atomic_read(&fscache_n_store_pages),
63800- atomic_read(&fscache_n_store_radix_deletes),
63801- atomic_read(&fscache_n_store_pages_over_limit));
63802+ atomic_read_unchecked(&fscache_n_store_ops),
63803+ atomic_read_unchecked(&fscache_n_store_calls),
63804+ atomic_read_unchecked(&fscache_n_store_pages),
63805+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63806+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63807
63808 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63809- atomic_read(&fscache_n_store_vmscan_not_storing),
63810- atomic_read(&fscache_n_store_vmscan_gone),
63811- atomic_read(&fscache_n_store_vmscan_busy),
63812- atomic_read(&fscache_n_store_vmscan_cancelled),
63813- atomic_read(&fscache_n_store_vmscan_wait));
63814+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63815+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63816+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63817+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63818+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63819
63820 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63821- atomic_read(&fscache_n_op_pend),
63822- atomic_read(&fscache_n_op_run),
63823- atomic_read(&fscache_n_op_enqueue),
63824- atomic_read(&fscache_n_op_cancelled),
63825- atomic_read(&fscache_n_op_rejected));
63826+ atomic_read_unchecked(&fscache_n_op_pend),
63827+ atomic_read_unchecked(&fscache_n_op_run),
63828+ atomic_read_unchecked(&fscache_n_op_enqueue),
63829+ atomic_read_unchecked(&fscache_n_op_cancelled),
63830+ atomic_read_unchecked(&fscache_n_op_rejected));
63831 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63832- atomic_read(&fscache_n_op_deferred_release),
63833- atomic_read(&fscache_n_op_release),
63834- atomic_read(&fscache_n_op_gc));
63835+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63836+ atomic_read_unchecked(&fscache_n_op_release),
63837+ atomic_read_unchecked(&fscache_n_op_gc));
63838
63839 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63840 atomic_read(&fscache_n_cop_alloc_object),
63841diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63842index 28d0c7a..04816b7 100644
63843--- a/fs/fuse/cuse.c
63844+++ b/fs/fuse/cuse.c
63845@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63846 INIT_LIST_HEAD(&cuse_conntbl[i]);
63847
63848 /* inherit and extend fuse_dev_operations */
63849- cuse_channel_fops = fuse_dev_operations;
63850- cuse_channel_fops.owner = THIS_MODULE;
63851- cuse_channel_fops.open = cuse_channel_open;
63852- cuse_channel_fops.release = cuse_channel_release;
63853+ pax_open_kernel();
63854+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63855+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63856+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63857+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63858+ pax_close_kernel();
63859
63860 cuse_class = class_create(THIS_MODULE, "cuse");
63861 if (IS_ERR(cuse_class))
63862diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63863index 39706c5..a803c71 100644
63864--- a/fs/fuse/dev.c
63865+++ b/fs/fuse/dev.c
63866@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63867 ret = 0;
63868 pipe_lock(pipe);
63869
63870- if (!pipe->readers) {
63871+ if (!atomic_read(&pipe->readers)) {
63872 send_sig(SIGPIPE, current, 0);
63873 if (!ret)
63874 ret = -EPIPE;
63875@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63876 page_nr++;
63877 ret += buf->len;
63878
63879- if (pipe->files)
63880+ if (atomic_read(&pipe->files))
63881 do_wakeup = 1;
63882 }
63883
63884diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63885index 1545b71..7fabe47 100644
63886--- a/fs/fuse/dir.c
63887+++ b/fs/fuse/dir.c
63888@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63889 return link;
63890 }
63891
63892-static void free_link(char *link)
63893+static void free_link(const char *link)
63894 {
63895 if (!IS_ERR(link))
63896 free_page((unsigned long) link);
63897diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
63898index f42dffb..4a4c435 100644
63899--- a/fs/gfs2/glock.c
63900+++ b/fs/gfs2/glock.c
63901@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
63902 if (held1 != held2) {
63903 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
63904 if (held2)
63905- gl->gl_lockref.count++;
63906+ __lockref_inc(&gl->gl_lockref);
63907 else
63908- gl->gl_lockref.count--;
63909+ __lockref_dec(&gl->gl_lockref);
63910 }
63911 if (held1 && held2 && list_empty(&gl->gl_holders))
63912 clear_bit(GLF_QUEUED, &gl->gl_flags);
63913@@ -614,9 +614,9 @@ out:
63914 out_sched:
63915 clear_bit(GLF_LOCK, &gl->gl_flags);
63916 smp_mb__after_atomic();
63917- gl->gl_lockref.count++;
63918+ __lockref_inc(&gl->gl_lockref);
63919 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63920- gl->gl_lockref.count--;
63921+ __lockref_dec(&gl->gl_lockref);
63922 return;
63923
63924 out_unlock:
63925@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
63926 gl->gl_sbd = sdp;
63927 gl->gl_flags = 0;
63928 gl->gl_name = name;
63929- gl->gl_lockref.count = 1;
63930+ __lockref_set(&gl->gl_lockref, 1);
63931 gl->gl_state = LM_ST_UNLOCKED;
63932 gl->gl_target = LM_ST_UNLOCKED;
63933 gl->gl_demote_state = LM_ST_EXCLUSIVE;
63934@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
63935 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
63936 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
63937 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
63938- gl->gl_lockref.count++;
63939+ __lockref_inc(&gl->gl_lockref);
63940 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63941- gl->gl_lockref.count--;
63942+ __lockref_dec(&gl->gl_lockref);
63943 }
63944 run_queue(gl, 1);
63945 spin_unlock(&gl->gl_spin);
63946@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
63947 }
63948 }
63949
63950- gl->gl_lockref.count++;
63951+ __lockref_inc(&gl->gl_lockref);
63952 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
63953 spin_unlock(&gl->gl_spin);
63954
63955@@ -1384,12 +1384,12 @@ add_back_to_lru:
63956 goto add_back_to_lru;
63957 }
63958 clear_bit(GLF_LRU, &gl->gl_flags);
63959- gl->gl_lockref.count++;
63960+ __lockref_inc(&gl->gl_lockref);
63961 if (demote_ok(gl))
63962 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
63963 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
63964 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
63965- gl->gl_lockref.count--;
63966+ __lockref_dec(&gl->gl_lockref);
63967 spin_unlock(&gl->gl_spin);
63968 cond_resched_lock(&lru_lock);
63969 }
63970@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
63971 state2str(gl->gl_demote_state), dtime,
63972 atomic_read(&gl->gl_ail_count),
63973 atomic_read(&gl->gl_revokes),
63974- (int)gl->gl_lockref.count, gl->gl_hold_time);
63975+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
63976
63977 list_for_each_entry(gh, &gl->gl_holders, gh_list)
63978 dump_holder(seq, gh);
63979diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
63980index fe91951..ce38a6e 100644
63981--- a/fs/gfs2/glops.c
63982+++ b/fs/gfs2/glops.c
63983@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
63984
63985 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
63986 gl->gl_state == LM_ST_SHARED && ip) {
63987- gl->gl_lockref.count++;
63988+ __lockref_inc(&gl->gl_lockref);
63989 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
63990- gl->gl_lockref.count--;
63991+ __lockref_dec(&gl->gl_lockref);
63992 }
63993 }
63994
63995diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
63996index 3aa17d4..b338075 100644
63997--- a/fs/gfs2/quota.c
63998+++ b/fs/gfs2/quota.c
63999@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
64000 if (!spin_trylock(&qd->qd_lockref.lock))
64001 return LRU_SKIP;
64002
64003- if (qd->qd_lockref.count == 0) {
64004+ if (__lockref_read(&qd->qd_lockref) == 0) {
64005 lockref_mark_dead(&qd->qd_lockref);
64006 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
64007 }
64008@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
64009 return NULL;
64010
64011 qd->qd_sbd = sdp;
64012- qd->qd_lockref.count = 1;
64013+ __lockref_set(&qd->qd_lockref, 1);
64014 spin_lock_init(&qd->qd_lockref.lock);
64015 qd->qd_id = qid;
64016 qd->qd_slot = -1;
64017@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
64018 if (lockref_put_or_lock(&qd->qd_lockref))
64019 return;
64020
64021- qd->qd_lockref.count = 0;
64022+ __lockref_set(&qd->qd_lockref, 0);
64023 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
64024 spin_unlock(&qd->qd_lockref.lock);
64025
64026diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64027index fd62cae..3494dfa 100644
64028--- a/fs/hostfs/hostfs_kern.c
64029+++ b/fs/hostfs/hostfs_kern.c
64030@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64031
64032 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64033 {
64034- char *s = nd_get_link(nd);
64035+ const char *s = nd_get_link(nd);
64036 if (!IS_ERR(s))
64037 __putname(s);
64038 }
64039diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64040index c274aca..772fa5e 100644
64041--- a/fs/hugetlbfs/inode.c
64042+++ b/fs/hugetlbfs/inode.c
64043@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64044 struct mm_struct *mm = current->mm;
64045 struct vm_area_struct *vma;
64046 struct hstate *h = hstate_file(file);
64047+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64048 struct vm_unmapped_area_info info;
64049
64050 if (len & ~huge_page_mask(h))
64051@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64052 return addr;
64053 }
64054
64055+#ifdef CONFIG_PAX_RANDMMAP
64056+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64057+#endif
64058+
64059 if (addr) {
64060 addr = ALIGN(addr, huge_page_size(h));
64061 vma = find_vma(mm, addr);
64062- if (TASK_SIZE - len >= addr &&
64063- (!vma || addr + len <= vma->vm_start))
64064+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64065 return addr;
64066 }
64067
64068 info.flags = 0;
64069 info.length = len;
64070 info.low_limit = TASK_UNMAPPED_BASE;
64071+
64072+#ifdef CONFIG_PAX_RANDMMAP
64073+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64074+ info.low_limit += mm->delta_mmap;
64075+#endif
64076+
64077 info.high_limit = TASK_SIZE;
64078 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64079 info.align_offset = 0;
64080@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64081 };
64082 MODULE_ALIAS_FS("hugetlbfs");
64083
64084-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64085+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64086
64087 static int can_do_hugetlb_shm(void)
64088 {
64089diff --git a/fs/inode.c b/fs/inode.c
64090index f00b16f..b653fea 100644
64091--- a/fs/inode.c
64092+++ b/fs/inode.c
64093@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
64094 unsigned int *p = &get_cpu_var(last_ino);
64095 unsigned int res = *p;
64096
64097+start:
64098+
64099 #ifdef CONFIG_SMP
64100 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64101- static atomic_t shared_last_ino;
64102- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64103+ static atomic_unchecked_t shared_last_ino;
64104+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64105
64106 res = next - LAST_INO_BATCH;
64107 }
64108 #endif
64109
64110- *p = ++res;
64111+ if (unlikely(!++res))
64112+ goto start; /* never zero */
64113+ *p = res;
64114 put_cpu_var(last_ino);
64115 return res;
64116 }
64117diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64118index 4a6cf28..d3a29d3 100644
64119--- a/fs/jffs2/erase.c
64120+++ b/fs/jffs2/erase.c
64121@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
64122 struct jffs2_unknown_node marker = {
64123 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
64124 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64125- .totlen = cpu_to_je32(c->cleanmarker_size)
64126+ .totlen = cpu_to_je32(c->cleanmarker_size),
64127+ .hdr_crc = cpu_to_je32(0)
64128 };
64129
64130 jffs2_prealloc_raw_node_refs(c, jeb, 1);
64131diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
64132index 09ed551..45684f8 100644
64133--- a/fs/jffs2/wbuf.c
64134+++ b/fs/jffs2/wbuf.c
64135@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
64136 {
64137 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
64138 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64139- .totlen = constant_cpu_to_je32(8)
64140+ .totlen = constant_cpu_to_je32(8),
64141+ .hdr_crc = constant_cpu_to_je32(0)
64142 };
64143
64144 /*
64145diff --git a/fs/jfs/super.c b/fs/jfs/super.c
64146index 5d30c56..8c45372 100644
64147--- a/fs/jfs/super.c
64148+++ b/fs/jfs/super.c
64149@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
64150
64151 jfs_inode_cachep =
64152 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
64153- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
64154+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
64155 init_once);
64156 if (jfs_inode_cachep == NULL)
64157 return -ENOMEM;
64158diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
64159index 6acc964..eca491f 100644
64160--- a/fs/kernfs/dir.c
64161+++ b/fs/kernfs/dir.c
64162@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
64163 *
64164 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64165 */
64166-static unsigned int kernfs_name_hash(const char *name, const void *ns)
64167+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
64168 {
64169 unsigned long hash = init_name_hash();
64170 unsigned int len = strlen(name);
64171@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
64172 ret = scops->mkdir(parent, dentry->d_name.name, mode);
64173
64174 kernfs_put_active(parent);
64175+
64176+ if (!ret) {
64177+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
64178+ ret = PTR_ERR_OR_ZERO(dentry_ret);
64179+ }
64180+
64181 return ret;
64182 }
64183
64184diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
64185index 2bacb99..f745182 100644
64186--- a/fs/kernfs/file.c
64187+++ b/fs/kernfs/file.c
64188@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
64189
64190 struct kernfs_open_node {
64191 atomic_t refcnt;
64192- atomic_t event;
64193+ atomic_unchecked_t event;
64194 wait_queue_head_t poll;
64195 struct list_head files; /* goes through kernfs_open_file.list */
64196 };
64197@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
64198 {
64199 struct kernfs_open_file *of = sf->private;
64200
64201- of->event = atomic_read(&of->kn->attr.open->event);
64202+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64203
64204 return of->kn->attr.ops->seq_show(sf, v);
64205 }
64206@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
64207 goto out_free;
64208 }
64209
64210- of->event = atomic_read(&of->kn->attr.open->event);
64211+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64212 ops = kernfs_ops(of->kn);
64213 if (ops->read)
64214 len = ops->read(of, buf, len, *ppos);
64215@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
64216 {
64217 struct kernfs_open_file *of = kernfs_of(file);
64218 const struct kernfs_ops *ops;
64219- size_t len;
64220+ ssize_t len;
64221 char *buf;
64222
64223 if (of->atomic_write_len) {
64224@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
64225 return ret;
64226 }
64227
64228-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64229- void *buf, int len, int write)
64230+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64231+ void *buf, size_t len, int write)
64232 {
64233 struct file *file = vma->vm_file;
64234 struct kernfs_open_file *of = kernfs_of(file);
64235- int ret;
64236+ ssize_t ret;
64237
64238 if (!of->vm_ops)
64239 return -EINVAL;
64240@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
64241 return -ENOMEM;
64242
64243 atomic_set(&new_on->refcnt, 0);
64244- atomic_set(&new_on->event, 1);
64245+ atomic_set_unchecked(&new_on->event, 1);
64246 init_waitqueue_head(&new_on->poll);
64247 INIT_LIST_HEAD(&new_on->files);
64248 goto retry;
64249@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64250
64251 kernfs_put_active(kn);
64252
64253- if (of->event != atomic_read(&on->event))
64254+ if (of->event != atomic_read_unchecked(&on->event))
64255 goto trigger;
64256
64257 return DEFAULT_POLLMASK;
64258@@ -824,7 +824,7 @@ repeat:
64259
64260 on = kn->attr.open;
64261 if (on) {
64262- atomic_inc(&on->event);
64263+ atomic_inc_unchecked(&on->event);
64264 wake_up_interruptible(&on->poll);
64265 }
64266
64267diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64268index 8a19889..4c3069a 100644
64269--- a/fs/kernfs/symlink.c
64270+++ b/fs/kernfs/symlink.c
64271@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64272 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64273 void *cookie)
64274 {
64275- char *page = nd_get_link(nd);
64276+ const char *page = nd_get_link(nd);
64277 if (!IS_ERR(page))
64278 free_page((unsigned long)page);
64279 }
64280diff --git a/fs/libfs.c b/fs/libfs.c
64281index 0ab6512..cd9982d 100644
64282--- a/fs/libfs.c
64283+++ b/fs/libfs.c
64284@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64285
64286 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64287 struct dentry *next = list_entry(p, struct dentry, d_child);
64288+ char d_name[sizeof(next->d_iname)];
64289+ const unsigned char *name;
64290+
64291 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64292 if (!simple_positive(next)) {
64293 spin_unlock(&next->d_lock);
64294@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64295
64296 spin_unlock(&next->d_lock);
64297 spin_unlock(&dentry->d_lock);
64298- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64299+ name = next->d_name.name;
64300+ if (name == next->d_iname) {
64301+ memcpy(d_name, name, next->d_name.len);
64302+ name = d_name;
64303+ }
64304+ if (!dir_emit(ctx, name, next->d_name.len,
64305 next->d_inode->i_ino, dt_type(next->d_inode)))
64306 return 0;
64307 spin_lock(&dentry->d_lock);
64308@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64309 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64310 void *cookie)
64311 {
64312- char *s = nd_get_link(nd);
64313+ const char *s = nd_get_link(nd);
64314 if (!IS_ERR(s))
64315 kfree(s);
64316 }
64317diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64318index acd3947..1f896e2 100644
64319--- a/fs/lockd/clntproc.c
64320+++ b/fs/lockd/clntproc.c
64321@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64322 /*
64323 * Cookie counter for NLM requests
64324 */
64325-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64326+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64327
64328 void nlmclnt_next_cookie(struct nlm_cookie *c)
64329 {
64330- u32 cookie = atomic_inc_return(&nlm_cookie);
64331+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64332
64333 memcpy(c->data, &cookie, 4);
64334 c->len=4;
64335diff --git a/fs/mount.h b/fs/mount.h
64336index 6a61c2b..bd79179 100644
64337--- a/fs/mount.h
64338+++ b/fs/mount.h
64339@@ -13,7 +13,7 @@ struct mnt_namespace {
64340 u64 seq; /* Sequence number to prevent loops */
64341 wait_queue_head_t poll;
64342 u64 event;
64343-};
64344+} __randomize_layout;
64345
64346 struct mnt_pcp {
64347 int mnt_count;
64348@@ -65,7 +65,7 @@ struct mount {
64349 struct hlist_head mnt_pins;
64350 struct fs_pin mnt_umount;
64351 struct dentry *mnt_ex_mountpoint;
64352-};
64353+} __randomize_layout;
64354
64355 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64356
64357diff --git a/fs/namei.c b/fs/namei.c
64358index c83145a..a78aa13 100644
64359--- a/fs/namei.c
64360+++ b/fs/namei.c
64361@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
64362 if (ret != -EACCES)
64363 return ret;
64364
64365+#ifdef CONFIG_GRKERNSEC
64366+ /* we'll block if we have to log due to a denied capability use */
64367+ if (mask & MAY_NOT_BLOCK)
64368+ return -ECHILD;
64369+#endif
64370+
64371 if (S_ISDIR(inode->i_mode)) {
64372 /* DACs are overridable for directories */
64373- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64374- return 0;
64375 if (!(mask & MAY_WRITE))
64376- if (capable_wrt_inode_uidgid(inode,
64377- CAP_DAC_READ_SEARCH))
64378+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64379+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64380 return 0;
64381+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64382+ return 0;
64383 return -EACCES;
64384 }
64385 /*
64386+ * Searching includes executable on directories, else just read.
64387+ */
64388+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64389+ if (mask == MAY_READ)
64390+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64391+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64392+ return 0;
64393+
64394+ /*
64395 * Read/write DACs are always overridable.
64396 * Executable DACs are overridable when there is
64397 * at least one exec bit set.
64398@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
64399 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64400 return 0;
64401
64402- /*
64403- * Searching includes executable on directories, else just read.
64404- */
64405- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64406- if (mask == MAY_READ)
64407- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64408- return 0;
64409-
64410 return -EACCES;
64411 }
64412 EXPORT_SYMBOL(generic_permission);
64413@@ -503,7 +510,7 @@ struct nameidata {
64414 int last_type;
64415 unsigned depth;
64416 struct file *base;
64417- char *saved_names[MAX_NESTED_LINKS + 1];
64418+ const char *saved_names[MAX_NESTED_LINKS + 1];
64419 };
64420
64421 /*
64422@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
64423 nd->flags |= LOOKUP_JUMPED;
64424 }
64425
64426-void nd_set_link(struct nameidata *nd, char *path)
64427+void nd_set_link(struct nameidata *nd, const char *path)
64428 {
64429 nd->saved_names[nd->depth] = path;
64430 }
64431 EXPORT_SYMBOL(nd_set_link);
64432
64433-char *nd_get_link(struct nameidata *nd)
64434+const char *nd_get_link(const struct nameidata *nd)
64435 {
64436 return nd->saved_names[nd->depth];
64437 }
64438@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64439 {
64440 struct dentry *dentry = link->dentry;
64441 int error;
64442- char *s;
64443+ const char *s;
64444
64445 BUG_ON(nd->flags & LOOKUP_RCU);
64446
64447@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64448 if (error)
64449 goto out_put_nd_path;
64450
64451+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64452+ dentry->d_inode, dentry, nd->path.mnt)) {
64453+ error = -EACCES;
64454+ goto out_put_nd_path;
64455+ }
64456+
64457 nd->last_type = LAST_BIND;
64458 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64459 error = PTR_ERR(*p);
64460@@ -1639,6 +1652,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64461 if (res)
64462 break;
64463 res = walk_component(nd, path, LOOKUP_FOLLOW);
64464+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64465+ res = -EACCES;
64466 put_link(nd, &link, cookie);
64467 } while (res > 0);
64468
64469@@ -1711,7 +1726,7 @@ EXPORT_SYMBOL(full_name_hash);
64470 static inline u64 hash_name(const char *name)
64471 {
64472 unsigned long a, b, adata, bdata, mask, hash, len;
64473- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64474+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64475
64476 hash = a = 0;
64477 len = -sizeof(unsigned long);
64478@@ -2006,6 +2021,8 @@ static int path_lookupat(int dfd, const char *name,
64479 if (err)
64480 break;
64481 err = lookup_last(nd, &path);
64482+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64483+ err = -EACCES;
64484 put_link(nd, &link, cookie);
64485 }
64486 }
64487@@ -2013,6 +2030,13 @@ static int path_lookupat(int dfd, const char *name,
64488 if (!err)
64489 err = complete_walk(nd);
64490
64491+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64492+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64493+ path_put(&nd->path);
64494+ err = -ENOENT;
64495+ }
64496+ }
64497+
64498 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64499 if (!d_can_lookup(nd->path.dentry)) {
64500 path_put(&nd->path);
64501@@ -2034,8 +2058,15 @@ static int filename_lookup(int dfd, struct filename *name,
64502 retval = path_lookupat(dfd, name->name,
64503 flags | LOOKUP_REVAL, nd);
64504
64505- if (likely(!retval))
64506+ if (likely(!retval)) {
64507 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64508+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64509+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64510+ path_put(&nd->path);
64511+ return -ENOENT;
64512+ }
64513+ }
64514+ }
64515 return retval;
64516 }
64517
64518@@ -2614,6 +2645,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64519 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64520 return -EPERM;
64521
64522+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64523+ return -EPERM;
64524+ if (gr_handle_rawio(inode))
64525+ return -EPERM;
64526+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64527+ return -EACCES;
64528+
64529 return 0;
64530 }
64531
64532@@ -2845,7 +2883,7 @@ looked_up:
64533 * cleared otherwise prior to returning.
64534 */
64535 static int lookup_open(struct nameidata *nd, struct path *path,
64536- struct file *file,
64537+ struct path *link, struct file *file,
64538 const struct open_flags *op,
64539 bool got_write, int *opened)
64540 {
64541@@ -2880,6 +2918,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64542 /* Negative dentry, just create the file */
64543 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64544 umode_t mode = op->mode;
64545+
64546+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64547+ error = -EACCES;
64548+ goto out_dput;
64549+ }
64550+
64551+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64552+ error = -EACCES;
64553+ goto out_dput;
64554+ }
64555+
64556 if (!IS_POSIXACL(dir->d_inode))
64557 mode &= ~current_umask();
64558 /*
64559@@ -2901,6 +2950,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64560 nd->flags & LOOKUP_EXCL);
64561 if (error)
64562 goto out_dput;
64563+ else
64564+ gr_handle_create(dentry, nd->path.mnt);
64565 }
64566 out_no_open:
64567 path->dentry = dentry;
64568@@ -2915,7 +2966,7 @@ out_dput:
64569 /*
64570 * Handle the last step of open()
64571 */
64572-static int do_last(struct nameidata *nd, struct path *path,
64573+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64574 struct file *file, const struct open_flags *op,
64575 int *opened, struct filename *name)
64576 {
64577@@ -2965,6 +3016,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64578 if (error)
64579 return error;
64580
64581+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64582+ error = -ENOENT;
64583+ goto out;
64584+ }
64585+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64586+ error = -EACCES;
64587+ goto out;
64588+ }
64589+
64590 audit_inode(name, dir, LOOKUP_PARENT);
64591 error = -EISDIR;
64592 /* trailing slashes? */
64593@@ -2984,7 +3044,7 @@ retry_lookup:
64594 */
64595 }
64596 mutex_lock(&dir->d_inode->i_mutex);
64597- error = lookup_open(nd, path, file, op, got_write, opened);
64598+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64599 mutex_unlock(&dir->d_inode->i_mutex);
64600
64601 if (error <= 0) {
64602@@ -3008,11 +3068,28 @@ retry_lookup:
64603 goto finish_open_created;
64604 }
64605
64606+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64607+ error = -ENOENT;
64608+ goto exit_dput;
64609+ }
64610+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64611+ error = -EACCES;
64612+ goto exit_dput;
64613+ }
64614+
64615 /*
64616 * create/update audit record if it already exists.
64617 */
64618- if (d_is_positive(path->dentry))
64619+ if (d_is_positive(path->dentry)) {
64620+ /* only check if O_CREAT is specified, all other checks need to go
64621+ into may_open */
64622+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64623+ error = -EACCES;
64624+ goto exit_dput;
64625+ }
64626+
64627 audit_inode(name, path->dentry, 0);
64628+ }
64629
64630 /*
64631 * If atomic_open() acquired write access it is dropped now due to
64632@@ -3053,6 +3130,11 @@ finish_lookup:
64633 }
64634 }
64635 BUG_ON(inode != path->dentry->d_inode);
64636+ /* if we're resolving a symlink to another symlink */
64637+ if (link && gr_handle_symlink_owner(link, inode)) {
64638+ error = -EACCES;
64639+ goto out;
64640+ }
64641 return 1;
64642 }
64643
64644@@ -3072,7 +3154,18 @@ finish_open:
64645 path_put(&save_parent);
64646 return error;
64647 }
64648+
64649+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64650+ error = -ENOENT;
64651+ goto out;
64652+ }
64653+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64654+ error = -EACCES;
64655+ goto out;
64656+ }
64657+
64658 audit_inode(name, nd->path.dentry, 0);
64659+
64660 error = -EISDIR;
64661 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64662 goto out;
64663@@ -3233,7 +3326,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64664 if (unlikely(error))
64665 goto out;
64666
64667- error = do_last(nd, &path, file, op, &opened, pathname);
64668+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64669 while (unlikely(error > 0)) { /* trailing symlink */
64670 struct path link = path;
64671 void *cookie;
64672@@ -3251,7 +3344,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64673 error = follow_link(&link, nd, &cookie);
64674 if (unlikely(error))
64675 break;
64676- error = do_last(nd, &path, file, op, &opened, pathname);
64677+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64678 put_link(nd, &link, cookie);
64679 }
64680 out:
64681@@ -3353,9 +3446,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
64682 goto unlock;
64683
64684 error = -EEXIST;
64685- if (d_is_positive(dentry))
64686+ if (d_is_positive(dentry)) {
64687+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64688+ error = -ENOENT;
64689 goto fail;
64690-
64691+ }
64692 /*
64693 * Special case - lookup gave negative, but... we had foo/bar/
64694 * From the vfs_mknod() POV we just have a negative dentry -
64695@@ -3420,6 +3515,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64696 }
64697 EXPORT_SYMBOL(user_path_create);
64698
64699+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64700+{
64701+ struct filename *tmp = getname(pathname);
64702+ struct dentry *res;
64703+ if (IS_ERR(tmp))
64704+ return ERR_CAST(tmp);
64705+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64706+ if (IS_ERR(res))
64707+ putname(tmp);
64708+ else
64709+ *to = tmp;
64710+ return res;
64711+}
64712+
64713 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64714 {
64715 int error = may_create(dir, dentry);
64716@@ -3483,6 +3592,17 @@ retry:
64717
64718 if (!IS_POSIXACL(path.dentry->d_inode))
64719 mode &= ~current_umask();
64720+
64721+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64722+ error = -EPERM;
64723+ goto out;
64724+ }
64725+
64726+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64727+ error = -EACCES;
64728+ goto out;
64729+ }
64730+
64731 error = security_path_mknod(&path, dentry, mode, dev);
64732 if (error)
64733 goto out;
64734@@ -3498,6 +3618,8 @@ retry:
64735 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64736 break;
64737 }
64738+ if (!error)
64739+ gr_handle_create(dentry, path.mnt);
64740 out:
64741 done_path_create(&path, dentry);
64742 if (retry_estale(error, lookup_flags)) {
64743@@ -3552,9 +3674,16 @@ retry:
64744
64745 if (!IS_POSIXACL(path.dentry->d_inode))
64746 mode &= ~current_umask();
64747+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64748+ error = -EACCES;
64749+ goto out;
64750+ }
64751 error = security_path_mkdir(&path, dentry, mode);
64752 if (!error)
64753 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64754+ if (!error)
64755+ gr_handle_create(dentry, path.mnt);
64756+out:
64757 done_path_create(&path, dentry);
64758 if (retry_estale(error, lookup_flags)) {
64759 lookup_flags |= LOOKUP_REVAL;
64760@@ -3587,7 +3716,7 @@ void dentry_unhash(struct dentry *dentry)
64761 {
64762 shrink_dcache_parent(dentry);
64763 spin_lock(&dentry->d_lock);
64764- if (dentry->d_lockref.count == 1)
64765+ if (__lockref_read(&dentry->d_lockref) == 1)
64766 __d_drop(dentry);
64767 spin_unlock(&dentry->d_lock);
64768 }
64769@@ -3638,6 +3767,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64770 struct filename *name;
64771 struct dentry *dentry;
64772 struct nameidata nd;
64773+ u64 saved_ino = 0;
64774+ dev_t saved_dev = 0;
64775 unsigned int lookup_flags = 0;
64776 retry:
64777 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64778@@ -3670,10 +3801,21 @@ retry:
64779 error = -ENOENT;
64780 goto exit3;
64781 }
64782+
64783+ saved_ino = gr_get_ino_from_dentry(dentry);
64784+ saved_dev = gr_get_dev_from_dentry(dentry);
64785+
64786+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64787+ error = -EACCES;
64788+ goto exit3;
64789+ }
64790+
64791 error = security_path_rmdir(&nd.path, dentry);
64792 if (error)
64793 goto exit3;
64794 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64795+ if (!error && (saved_dev || saved_ino))
64796+ gr_handle_delete(saved_ino, saved_dev);
64797 exit3:
64798 dput(dentry);
64799 exit2:
64800@@ -3766,6 +3908,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64801 struct nameidata nd;
64802 struct inode *inode = NULL;
64803 struct inode *delegated_inode = NULL;
64804+ u64 saved_ino = 0;
64805+ dev_t saved_dev = 0;
64806 unsigned int lookup_flags = 0;
64807 retry:
64808 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64809@@ -3792,10 +3936,22 @@ retry_deleg:
64810 if (d_is_negative(dentry))
64811 goto slashes;
64812 ihold(inode);
64813+
64814+ if (inode->i_nlink <= 1) {
64815+ saved_ino = gr_get_ino_from_dentry(dentry);
64816+ saved_dev = gr_get_dev_from_dentry(dentry);
64817+ }
64818+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64819+ error = -EACCES;
64820+ goto exit2;
64821+ }
64822+
64823 error = security_path_unlink(&nd.path, dentry);
64824 if (error)
64825 goto exit2;
64826 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64827+ if (!error && (saved_ino || saved_dev))
64828+ gr_handle_delete(saved_ino, saved_dev);
64829 exit2:
64830 dput(dentry);
64831 }
64832@@ -3884,9 +4040,17 @@ retry:
64833 if (IS_ERR(dentry))
64834 goto out_putname;
64835
64836+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64837+ error = -EACCES;
64838+ goto out;
64839+ }
64840+
64841 error = security_path_symlink(&path, dentry, from->name);
64842 if (!error)
64843 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64844+ if (!error)
64845+ gr_handle_create(dentry, path.mnt);
64846+out:
64847 done_path_create(&path, dentry);
64848 if (retry_estale(error, lookup_flags)) {
64849 lookup_flags |= LOOKUP_REVAL;
64850@@ -3990,6 +4154,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64851 struct dentry *new_dentry;
64852 struct path old_path, new_path;
64853 struct inode *delegated_inode = NULL;
64854+ struct filename *to = NULL;
64855 int how = 0;
64856 int error;
64857
64858@@ -4013,7 +4178,7 @@ retry:
64859 if (error)
64860 return error;
64861
64862- new_dentry = user_path_create(newdfd, newname, &new_path,
64863+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64864 (how & LOOKUP_REVAL));
64865 error = PTR_ERR(new_dentry);
64866 if (IS_ERR(new_dentry))
64867@@ -4025,11 +4190,28 @@ retry:
64868 error = may_linkat(&old_path);
64869 if (unlikely(error))
64870 goto out_dput;
64871+
64872+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64873+ old_path.dentry->d_inode,
64874+ old_path.dentry->d_inode->i_mode, to)) {
64875+ error = -EACCES;
64876+ goto out_dput;
64877+ }
64878+
64879+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64880+ old_path.dentry, old_path.mnt, to)) {
64881+ error = -EACCES;
64882+ goto out_dput;
64883+ }
64884+
64885 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64886 if (error)
64887 goto out_dput;
64888 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64889+ if (!error)
64890+ gr_handle_create(new_dentry, new_path.mnt);
64891 out_dput:
64892+ putname(to);
64893 done_path_create(&new_path, new_dentry);
64894 if (delegated_inode) {
64895 error = break_deleg_wait(&delegated_inode);
64896@@ -4345,6 +4527,20 @@ retry_deleg:
64897 if (new_dentry == trap)
64898 goto exit5;
64899
64900+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64901+ /* use EXDEV error to cause 'mv' to switch to an alternative
64902+ * method for usability
64903+ */
64904+ error = -EXDEV;
64905+ goto exit5;
64906+ }
64907+
64908+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64909+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64910+ to, flags);
64911+ if (error)
64912+ goto exit5;
64913+
64914 error = security_path_rename(&oldnd.path, old_dentry,
64915 &newnd.path, new_dentry, flags);
64916 if (error)
64917@@ -4352,6 +4548,9 @@ retry_deleg:
64918 error = vfs_rename(old_dir->d_inode, old_dentry,
64919 new_dir->d_inode, new_dentry,
64920 &delegated_inode, flags);
64921+ if (!error)
64922+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64923+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64924 exit5:
64925 dput(new_dentry);
64926 exit4:
64927@@ -4408,14 +4607,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64928
64929 int readlink_copy(char __user *buffer, int buflen, const char *link)
64930 {
64931+ char tmpbuf[64];
64932+ const char *newlink;
64933 int len = PTR_ERR(link);
64934+
64935 if (IS_ERR(link))
64936 goto out;
64937
64938 len = strlen(link);
64939 if (len > (unsigned) buflen)
64940 len = buflen;
64941- if (copy_to_user(buffer, link, len))
64942+
64943+ if (len < sizeof(tmpbuf)) {
64944+ memcpy(tmpbuf, link, len);
64945+ newlink = tmpbuf;
64946+ } else
64947+ newlink = link;
64948+
64949+ if (copy_to_user(buffer, newlink, len))
64950 len = -EFAULT;
64951 out:
64952 return len;
64953diff --git a/fs/namespace.c b/fs/namespace.c
64954index 82ef140..5335e75 100644
64955--- a/fs/namespace.c
64956+++ b/fs/namespace.c
64957@@ -1438,6 +1438,9 @@ static int do_umount(struct mount *mnt, int flags)
64958 if (!(sb->s_flags & MS_RDONLY))
64959 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64960 up_write(&sb->s_umount);
64961+
64962+ gr_log_remount(mnt->mnt_devname, retval);
64963+
64964 return retval;
64965 }
64966
64967@@ -1460,6 +1463,9 @@ static int do_umount(struct mount *mnt, int flags)
64968 }
64969 unlock_mount_hash();
64970 namespace_unlock();
64971+
64972+ gr_log_unmount(mnt->mnt_devname, retval);
64973+
64974 return retval;
64975 }
64976
64977@@ -1510,7 +1516,7 @@ static inline bool may_mount(void)
64978 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64979 */
64980
64981-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64982+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64983 {
64984 struct path path;
64985 struct mount *mnt;
64986@@ -1555,7 +1561,7 @@ out:
64987 /*
64988 * The 2.0 compatible umount. No flags.
64989 */
64990-SYSCALL_DEFINE1(oldumount, char __user *, name)
64991+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64992 {
64993 return sys_umount(name, 0);
64994 }
64995@@ -2621,6 +2627,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64996 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64997 MS_STRICTATIME);
64998
64999+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65000+ retval = -EPERM;
65001+ goto dput_out;
65002+ }
65003+
65004+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65005+ retval = -EPERM;
65006+ goto dput_out;
65007+ }
65008+
65009 if (flags & MS_REMOUNT)
65010 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65011 data_page);
65012@@ -2634,7 +2650,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
65013 retval = do_new_mount(&path, type_page, flags, mnt_flags,
65014 dev_name, data_page);
65015 dput_out:
65016+ gr_log_mount(dev_name, &path, retval);
65017+
65018 path_put(&path);
65019+
65020 return retval;
65021 }
65022
65023@@ -2652,7 +2671,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65024 * number incrementing at 10Ghz will take 12,427 years to wrap which
65025 * is effectively never, so we can ignore the possibility.
65026 */
65027-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65028+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65029
65030 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65031 {
65032@@ -2668,7 +2687,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65033 return ERR_PTR(ret);
65034 }
65035 new_ns->ns.ops = &mntns_operations;
65036- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65037+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
65038 atomic_set(&new_ns->count, 1);
65039 new_ns->root = NULL;
65040 INIT_LIST_HEAD(&new_ns->list);
65041@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65042 return new_ns;
65043 }
65044
65045-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65046+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65047 struct user_namespace *user_ns, struct fs_struct *new_fs)
65048 {
65049 struct mnt_namespace *new_ns;
65050@@ -2799,8 +2818,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65051 }
65052 EXPORT_SYMBOL(mount_subtree);
65053
65054-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65055- char __user *, type, unsigned long, flags, void __user *, data)
65056+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65057+ const char __user *, type, unsigned long, flags, void __user *, data)
65058 {
65059 int ret;
65060 char *kernel_type;
65061@@ -2906,6 +2925,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65062 if (error)
65063 goto out2;
65064
65065+ if (gr_handle_chroot_pivot()) {
65066+ error = -EPERM;
65067+ goto out2;
65068+ }
65069+
65070 get_fs_root(current->fs, &root);
65071 old_mp = lock_mount(&old);
65072 error = PTR_ERR(old_mp);
65073@@ -3180,7 +3204,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
65074 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65075 return -EPERM;
65076
65077- if (fs->users != 1)
65078+ if (atomic_read(&fs->users) != 1)
65079 return -EINVAL;
65080
65081 get_mnt_ns(mnt_ns);
65082diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65083index 19ca95c..b28702c 100644
65084--- a/fs/nfs/callback_xdr.c
65085+++ b/fs/nfs/callback_xdr.c
65086@@ -51,7 +51,7 @@ struct callback_op {
65087 callback_decode_arg_t decode_args;
65088 callback_encode_res_t encode_res;
65089 long res_maxsize;
65090-};
65091+} __do_const;
65092
65093 static struct callback_op callback_ops[];
65094
65095diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65096index d42dff6..ecbdf42 100644
65097--- a/fs/nfs/inode.c
65098+++ b/fs/nfs/inode.c
65099@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
65100 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
65101 }
65102
65103-static atomic_long_t nfs_attr_generation_counter;
65104+static atomic_long_unchecked_t nfs_attr_generation_counter;
65105
65106 static unsigned long nfs_read_attr_generation_counter(void)
65107 {
65108- return atomic_long_read(&nfs_attr_generation_counter);
65109+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65110 }
65111
65112 unsigned long nfs_inc_attr_generation_counter(void)
65113 {
65114- return atomic_long_inc_return(&nfs_attr_generation_counter);
65115+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65116 }
65117 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
65118
65119diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65120index 92b9d97..045e58c 100644
65121--- a/fs/nfsd/nfs4proc.c
65122+++ b/fs/nfsd/nfs4proc.c
65123@@ -1492,7 +1492,7 @@ struct nfsd4_operation {
65124 nfsd4op_rsize op_rsize_bop;
65125 stateid_getter op_get_currentstateid;
65126 stateid_setter op_set_currentstateid;
65127-};
65128+} __do_const;
65129
65130 static struct nfsd4_operation nfsd4_ops[];
65131
65132diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65133index 5fb7e78..cc8a22e 100644
65134--- a/fs/nfsd/nfs4xdr.c
65135+++ b/fs/nfsd/nfs4xdr.c
65136@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65137
65138 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65139
65140-static nfsd4_dec nfsd4_dec_ops[] = {
65141+static const nfsd4_dec nfsd4_dec_ops[] = {
65142 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65143 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
65144 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
65145diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
65146index 46ec934..f384e41 100644
65147--- a/fs/nfsd/nfscache.c
65148+++ b/fs/nfsd/nfscache.c
65149@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65150 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
65151 u32 hash;
65152 struct nfsd_drc_bucket *b;
65153- int len;
65154+ long len;
65155 size_t bufsize = 0;
65156
65157 if (!rp)
65158@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65159 hash = nfsd_cache_hash(rp->c_xid);
65160 b = &drc_hashtbl[hash];
65161
65162- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
65163- len >>= 2;
65164+ if (statp) {
65165+ len = (char*)statp - (char*)resv->iov_base;
65166+ len = resv->iov_len - len;
65167+ len >>= 2;
65168+ }
65169
65170 /* Don't cache excessive amounts of data and XDR failures */
65171- if (!statp || len > (256 >> 2)) {
65172+ if (!statp || len > (256 >> 2) || len < 0) {
65173 nfsd_reply_cache_free(b, rp);
65174 return;
65175 }
65176@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65177 switch (cachetype) {
65178 case RC_REPLSTAT:
65179 if (len != 1)
65180- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
65181+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
65182 rp->c_replstat = *statp;
65183 break;
65184 case RC_REPLBUFF:
65185diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
65186index 3685265..e77261e 100644
65187--- a/fs/nfsd/vfs.c
65188+++ b/fs/nfsd/vfs.c
65189@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
65190
65191 oldfs = get_fs();
65192 set_fs(KERNEL_DS);
65193- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
65194+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
65195 set_fs(oldfs);
65196 return nfsd_finish_read(file, count, host_err);
65197 }
65198@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
65199
65200 /* Write the data. */
65201 oldfs = get_fs(); set_fs(KERNEL_DS);
65202- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
65203+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
65204 set_fs(oldfs);
65205 if (host_err < 0)
65206 goto out_nfserr;
65207@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
65208 */
65209
65210 oldfs = get_fs(); set_fs(KERNEL_DS);
65211- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
65212+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
65213 set_fs(oldfs);
65214
65215 if (host_err < 0)
65216diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
65217index 52ccd34..7a6b202 100644
65218--- a/fs/nls/nls_base.c
65219+++ b/fs/nls/nls_base.c
65220@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
65221
65222 int __register_nls(struct nls_table *nls, struct module *owner)
65223 {
65224- struct nls_table ** tmp = &tables;
65225+ struct nls_table *tmp = tables;
65226
65227 if (nls->next)
65228 return -EBUSY;
65229
65230- nls->owner = owner;
65231+ pax_open_kernel();
65232+ *(void **)&nls->owner = owner;
65233+ pax_close_kernel();
65234 spin_lock(&nls_lock);
65235- while (*tmp) {
65236- if (nls == *tmp) {
65237+ while (tmp) {
65238+ if (nls == tmp) {
65239 spin_unlock(&nls_lock);
65240 return -EBUSY;
65241 }
65242- tmp = &(*tmp)->next;
65243+ tmp = tmp->next;
65244 }
65245- nls->next = tables;
65246+ pax_open_kernel();
65247+ *(struct nls_table **)&nls->next = tables;
65248+ pax_close_kernel();
65249 tables = nls;
65250 spin_unlock(&nls_lock);
65251 return 0;
65252@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65253
65254 int unregister_nls(struct nls_table * nls)
65255 {
65256- struct nls_table ** tmp = &tables;
65257+ struct nls_table * const * tmp = &tables;
65258
65259 spin_lock(&nls_lock);
65260 while (*tmp) {
65261 if (nls == *tmp) {
65262- *tmp = nls->next;
65263+ pax_open_kernel();
65264+ *(struct nls_table **)tmp = nls->next;
65265+ pax_close_kernel();
65266 spin_unlock(&nls_lock);
65267 return 0;
65268 }
65269@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65270 return -EINVAL;
65271 }
65272
65273-static struct nls_table *find_nls(char *charset)
65274+static struct nls_table *find_nls(const char *charset)
65275 {
65276 struct nls_table *nls;
65277 spin_lock(&nls_lock);
65278@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65279 return nls;
65280 }
65281
65282-struct nls_table *load_nls(char *charset)
65283+struct nls_table *load_nls(const char *charset)
65284 {
65285 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65286 }
65287diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65288index 162b3f1..6076a7c 100644
65289--- a/fs/nls/nls_euc-jp.c
65290+++ b/fs/nls/nls_euc-jp.c
65291@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65292 p_nls = load_nls("cp932");
65293
65294 if (p_nls) {
65295- table.charset2upper = p_nls->charset2upper;
65296- table.charset2lower = p_nls->charset2lower;
65297+ pax_open_kernel();
65298+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65299+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65300+ pax_close_kernel();
65301 return register_nls(&table);
65302 }
65303
65304diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65305index a80a741..7b96e1b 100644
65306--- a/fs/nls/nls_koi8-ru.c
65307+++ b/fs/nls/nls_koi8-ru.c
65308@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65309 p_nls = load_nls("koi8-u");
65310
65311 if (p_nls) {
65312- table.charset2upper = p_nls->charset2upper;
65313- table.charset2lower = p_nls->charset2lower;
65314+ pax_open_kernel();
65315+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65316+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65317+ pax_close_kernel();
65318 return register_nls(&table);
65319 }
65320
65321diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65322index cf27550..6c70f29d 100644
65323--- a/fs/notify/fanotify/fanotify_user.c
65324+++ b/fs/notify/fanotify/fanotify_user.c
65325@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65326
65327 fd = fanotify_event_metadata.fd;
65328 ret = -EFAULT;
65329- if (copy_to_user(buf, &fanotify_event_metadata,
65330- fanotify_event_metadata.event_len))
65331+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65332+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65333 goto out_close_fd;
65334
65335 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65336diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65337index a95d8e0..a91a5fd 100644
65338--- a/fs/notify/notification.c
65339+++ b/fs/notify/notification.c
65340@@ -48,7 +48,7 @@
65341 #include <linux/fsnotify_backend.h>
65342 #include "fsnotify.h"
65343
65344-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65345+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65346
65347 /**
65348 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65349@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65350 */
65351 u32 fsnotify_get_cookie(void)
65352 {
65353- return atomic_inc_return(&fsnotify_sync_cookie);
65354+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65355 }
65356 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65357
65358diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65359index 9e38daf..5727cae 100644
65360--- a/fs/ntfs/dir.c
65361+++ b/fs/ntfs/dir.c
65362@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65363 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65364 ~(s64)(ndir->itype.index.block_size - 1)));
65365 /* Bounds checks. */
65366- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65367+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65368 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65369 "inode 0x%lx or driver bug.", vdir->i_ino);
65370 goto err_out;
65371diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65372index 1da9b2d..9cca092a 100644
65373--- a/fs/ntfs/file.c
65374+++ b/fs/ntfs/file.c
65375@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65376 char *addr;
65377 size_t total = 0;
65378 unsigned len;
65379- int left;
65380+ unsigned left;
65381
65382 do {
65383 len = PAGE_CACHE_SIZE - ofs;
65384diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65385index 9e1e112..241a52a 100644
65386--- a/fs/ntfs/super.c
65387+++ b/fs/ntfs/super.c
65388@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65389 if (!silent)
65390 ntfs_error(sb, "Primary boot sector is invalid.");
65391 } else if (!silent)
65392- ntfs_error(sb, read_err_str, "primary");
65393+ ntfs_error(sb, read_err_str, "%s", "primary");
65394 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65395 if (bh_primary)
65396 brelse(bh_primary);
65397@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65398 goto hotfix_primary_boot_sector;
65399 brelse(bh_backup);
65400 } else if (!silent)
65401- ntfs_error(sb, read_err_str, "backup");
65402+ ntfs_error(sb, read_err_str, "%s", "backup");
65403 /* Try to read NT3.51- backup boot sector. */
65404 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65405 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65406@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65407 "sector.");
65408 brelse(bh_backup);
65409 } else if (!silent)
65410- ntfs_error(sb, read_err_str, "backup");
65411+ ntfs_error(sb, read_err_str, "%s", "backup");
65412 /* We failed. Cleanup and return. */
65413 if (bh_primary)
65414 brelse(bh_primary);
65415diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65416index 0440134..d52c93a 100644
65417--- a/fs/ocfs2/localalloc.c
65418+++ b/fs/ocfs2/localalloc.c
65419@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65420 goto bail;
65421 }
65422
65423- atomic_inc(&osb->alloc_stats.moves);
65424+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65425
65426 bail:
65427 if (handle)
65428diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65429index 460c6c3..b4ef513 100644
65430--- a/fs/ocfs2/ocfs2.h
65431+++ b/fs/ocfs2/ocfs2.h
65432@@ -247,11 +247,11 @@ enum ocfs2_vol_state
65433
65434 struct ocfs2_alloc_stats
65435 {
65436- atomic_t moves;
65437- atomic_t local_data;
65438- atomic_t bitmap_data;
65439- atomic_t bg_allocs;
65440- atomic_t bg_extends;
65441+ atomic_unchecked_t moves;
65442+ atomic_unchecked_t local_data;
65443+ atomic_unchecked_t bitmap_data;
65444+ atomic_unchecked_t bg_allocs;
65445+ atomic_unchecked_t bg_extends;
65446 };
65447
65448 enum ocfs2_local_alloc_state
65449diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
65450index ee541f9..df3a500 100644
65451--- a/fs/ocfs2/refcounttree.c
65452+++ b/fs/ocfs2/refcounttree.c
65453@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
65454 error = posix_acl_create(dir, &mode, &default_acl, &acl);
65455 if (error) {
65456 mlog_errno(error);
65457- goto out;
65458+ return error;
65459 }
65460
65461 error = ocfs2_create_inode_in_orphan(dir, mode,
65462diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65463index 0cb889a..6a26b24 100644
65464--- a/fs/ocfs2/suballoc.c
65465+++ b/fs/ocfs2/suballoc.c
65466@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65467 mlog_errno(status);
65468 goto bail;
65469 }
65470- atomic_inc(&osb->alloc_stats.bg_extends);
65471+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65472
65473 /* You should never ask for this much metadata */
65474 BUG_ON(bits_wanted >
65475@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65476 mlog_errno(status);
65477 goto bail;
65478 }
65479- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65480+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65481
65482 *suballoc_loc = res.sr_bg_blkno;
65483 *suballoc_bit_start = res.sr_bit_offset;
65484@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65485 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65486 res->sr_bits);
65487
65488- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65489+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65490
65491 BUG_ON(res->sr_bits != 1);
65492
65493@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65494 mlog_errno(status);
65495 goto bail;
65496 }
65497- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65498+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65499
65500 BUG_ON(res.sr_bits != 1);
65501
65502@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65503 cluster_start,
65504 num_clusters);
65505 if (!status)
65506- atomic_inc(&osb->alloc_stats.local_data);
65507+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65508 } else {
65509 if (min_clusters > (osb->bitmap_cpg - 1)) {
65510 /* The only paths asking for contiguousness
65511@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65512 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65513 res.sr_bg_blkno,
65514 res.sr_bit_offset);
65515- atomic_inc(&osb->alloc_stats.bitmap_data);
65516+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65517 *num_clusters = res.sr_bits;
65518 }
65519 }
65520diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65521index 2667518..24bcf79 100644
65522--- a/fs/ocfs2/super.c
65523+++ b/fs/ocfs2/super.c
65524@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65525 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65526 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65527 "Stats",
65528- atomic_read(&osb->alloc_stats.bitmap_data),
65529- atomic_read(&osb->alloc_stats.local_data),
65530- atomic_read(&osb->alloc_stats.bg_allocs),
65531- atomic_read(&osb->alloc_stats.moves),
65532- atomic_read(&osb->alloc_stats.bg_extends));
65533+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65534+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65535+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65536+ atomic_read_unchecked(&osb->alloc_stats.moves),
65537+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65538
65539 out += snprintf(buf + out, len - out,
65540 "%10s => State: %u Descriptor: %llu Size: %u bits "
65541@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65542
65543 mutex_init(&osb->system_file_mutex);
65544
65545- atomic_set(&osb->alloc_stats.moves, 0);
65546- atomic_set(&osb->alloc_stats.local_data, 0);
65547- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65548- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65549- atomic_set(&osb->alloc_stats.bg_extends, 0);
65550+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65551+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65552+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65553+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65554+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65555
65556 /* Copy the blockcheck stats from the superblock probe */
65557 osb->osb_ecc_stats = *stats;
65558diff --git a/fs/open.c b/fs/open.c
65559index 33f9cbf..8abe053 100644
65560--- a/fs/open.c
65561+++ b/fs/open.c
65562@@ -32,6 +32,8 @@
65563 #include <linux/dnotify.h>
65564 #include <linux/compat.h>
65565
65566+#define CREATE_TRACE_POINTS
65567+#include <trace/events/fs.h>
65568 #include "internal.h"
65569
65570 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65571@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65572 error = locks_verify_truncate(inode, NULL, length);
65573 if (!error)
65574 error = security_path_truncate(path);
65575+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65576+ error = -EACCES;
65577 if (!error)
65578 error = do_truncate(path->dentry, length, 0, NULL);
65579
65580@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65581 error = locks_verify_truncate(inode, f.file, length);
65582 if (!error)
65583 error = security_path_truncate(&f.file->f_path);
65584+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65585+ error = -EACCES;
65586 if (!error)
65587 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65588 sb_end_write(inode->i_sb);
65589@@ -392,6 +398,9 @@ retry:
65590 if (__mnt_is_readonly(path.mnt))
65591 res = -EROFS;
65592
65593+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65594+ res = -EACCES;
65595+
65596 out_path_release:
65597 path_put(&path);
65598 if (retry_estale(res, lookup_flags)) {
65599@@ -423,6 +432,8 @@ retry:
65600 if (error)
65601 goto dput_and_out;
65602
65603+ gr_log_chdir(path.dentry, path.mnt);
65604+
65605 set_fs_pwd(current->fs, &path);
65606
65607 dput_and_out:
65608@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65609 goto out_putf;
65610
65611 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65612+
65613+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65614+ error = -EPERM;
65615+
65616+ if (!error)
65617+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65618+
65619 if (!error)
65620 set_fs_pwd(current->fs, &f.file->f_path);
65621 out_putf:
65622@@ -481,7 +499,13 @@ retry:
65623 if (error)
65624 goto dput_and_out;
65625
65626+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65627+ goto dput_and_out;
65628+
65629 set_fs_root(current->fs, &path);
65630+
65631+ gr_handle_chroot_chdir(&path);
65632+
65633 error = 0;
65634 dput_and_out:
65635 path_put(&path);
65636@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65637 return error;
65638 retry_deleg:
65639 mutex_lock(&inode->i_mutex);
65640+
65641+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65642+ error = -EACCES;
65643+ goto out_unlock;
65644+ }
65645+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65646+ error = -EACCES;
65647+ goto out_unlock;
65648+ }
65649+
65650 error = security_path_chmod(path, mode);
65651 if (error)
65652 goto out_unlock;
65653@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65654 uid = make_kuid(current_user_ns(), user);
65655 gid = make_kgid(current_user_ns(), group);
65656
65657+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65658+ return -EACCES;
65659+
65660 newattrs.ia_valid = ATTR_CTIME;
65661 if (user != (uid_t) -1) {
65662 if (!uid_valid(uid))
65663@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65664 } else {
65665 fsnotify_open(f);
65666 fd_install(fd, f);
65667+ trace_do_sys_open(tmp->name, flags, mode);
65668 }
65669 }
65670 putname(tmp);
65671diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
65672index 5f0d199..13b74b9 100644
65673--- a/fs/overlayfs/super.c
65674+++ b/fs/overlayfs/super.c
65675@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
65676 {
65677 struct ovl_entry *oe = dentry->d_fsdata;
65678
65679- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
65680+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
65681 }
65682
65683 int ovl_want_write(struct dentry *dentry)
65684@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
65685
65686 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
65687 {
65688- struct path upperpath = { NULL, NULL };
65689- struct path workpath = { NULL, NULL };
65690+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
65691+ struct path workpath = { .dentry = NULL, .mnt = NULL };
65692 struct dentry *root_dentry;
65693 struct ovl_entry *oe;
65694 struct ovl_fs *ufs;
65695diff --git a/fs/pipe.c b/fs/pipe.c
65696index 21981e5..3d5f55c 100644
65697--- a/fs/pipe.c
65698+++ b/fs/pipe.c
65699@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65700
65701 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65702 {
65703- if (pipe->files)
65704+ if (atomic_read(&pipe->files))
65705 mutex_lock_nested(&pipe->mutex, subclass);
65706 }
65707
65708@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65709
65710 void pipe_unlock(struct pipe_inode_info *pipe)
65711 {
65712- if (pipe->files)
65713+ if (atomic_read(&pipe->files))
65714 mutex_unlock(&pipe->mutex);
65715 }
65716 EXPORT_SYMBOL(pipe_unlock);
65717@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65718 }
65719 if (bufs) /* More to do? */
65720 continue;
65721- if (!pipe->writers)
65722+ if (!atomic_read(&pipe->writers))
65723 break;
65724- if (!pipe->waiting_writers) {
65725+ if (!atomic_read(&pipe->waiting_writers)) {
65726 /* syscall merging: Usually we must not sleep
65727 * if O_NONBLOCK is set, or if we got some data.
65728 * But if a writer sleeps in kernel space, then
65729@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65730
65731 __pipe_lock(pipe);
65732
65733- if (!pipe->readers) {
65734+ if (!atomic_read(&pipe->readers)) {
65735 send_sig(SIGPIPE, current, 0);
65736 ret = -EPIPE;
65737 goto out;
65738@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65739 for (;;) {
65740 int bufs;
65741
65742- if (!pipe->readers) {
65743+ if (!atomic_read(&pipe->readers)) {
65744 send_sig(SIGPIPE, current, 0);
65745 if (!ret)
65746 ret = -EPIPE;
65747@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65748 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65749 do_wakeup = 0;
65750 }
65751- pipe->waiting_writers++;
65752+ atomic_inc(&pipe->waiting_writers);
65753 pipe_wait(pipe);
65754- pipe->waiting_writers--;
65755+ atomic_dec(&pipe->waiting_writers);
65756 }
65757 out:
65758 __pipe_unlock(pipe);
65759@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65760 mask = 0;
65761 if (filp->f_mode & FMODE_READ) {
65762 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65763- if (!pipe->writers && filp->f_version != pipe->w_counter)
65764+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65765 mask |= POLLHUP;
65766 }
65767
65768@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65769 * Most Unices do not set POLLERR for FIFOs but on Linux they
65770 * behave exactly like pipes for poll().
65771 */
65772- if (!pipe->readers)
65773+ if (!atomic_read(&pipe->readers))
65774 mask |= POLLERR;
65775 }
65776
65777@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65778 int kill = 0;
65779
65780 spin_lock(&inode->i_lock);
65781- if (!--pipe->files) {
65782+ if (atomic_dec_and_test(&pipe->files)) {
65783 inode->i_pipe = NULL;
65784 kill = 1;
65785 }
65786@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65787
65788 __pipe_lock(pipe);
65789 if (file->f_mode & FMODE_READ)
65790- pipe->readers--;
65791+ atomic_dec(&pipe->readers);
65792 if (file->f_mode & FMODE_WRITE)
65793- pipe->writers--;
65794+ atomic_dec(&pipe->writers);
65795
65796- if (pipe->readers || pipe->writers) {
65797+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65798 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65799 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65800 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65801@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65802 kfree(pipe);
65803 }
65804
65805-static struct vfsmount *pipe_mnt __read_mostly;
65806+struct vfsmount *pipe_mnt __read_mostly;
65807
65808 /*
65809 * pipefs_dname() is called from d_path().
65810@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65811 goto fail_iput;
65812
65813 inode->i_pipe = pipe;
65814- pipe->files = 2;
65815- pipe->readers = pipe->writers = 1;
65816+ atomic_set(&pipe->files, 2);
65817+ atomic_set(&pipe->readers, 1);
65818+ atomic_set(&pipe->writers, 1);
65819 inode->i_fop = &pipefifo_fops;
65820
65821 /*
65822@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65823 spin_lock(&inode->i_lock);
65824 if (inode->i_pipe) {
65825 pipe = inode->i_pipe;
65826- pipe->files++;
65827+ atomic_inc(&pipe->files);
65828 spin_unlock(&inode->i_lock);
65829 } else {
65830 spin_unlock(&inode->i_lock);
65831 pipe = alloc_pipe_info();
65832 if (!pipe)
65833 return -ENOMEM;
65834- pipe->files = 1;
65835+ atomic_set(&pipe->files, 1);
65836 spin_lock(&inode->i_lock);
65837 if (unlikely(inode->i_pipe)) {
65838- inode->i_pipe->files++;
65839+ atomic_inc(&inode->i_pipe->files);
65840 spin_unlock(&inode->i_lock);
65841 free_pipe_info(pipe);
65842 pipe = inode->i_pipe;
65843@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65844 * opened, even when there is no process writing the FIFO.
65845 */
65846 pipe->r_counter++;
65847- if (pipe->readers++ == 0)
65848+ if (atomic_inc_return(&pipe->readers) == 1)
65849 wake_up_partner(pipe);
65850
65851- if (!is_pipe && !pipe->writers) {
65852+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65853 if ((filp->f_flags & O_NONBLOCK)) {
65854 /* suppress POLLHUP until we have
65855 * seen a writer */
65856@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65857 * errno=ENXIO when there is no process reading the FIFO.
65858 */
65859 ret = -ENXIO;
65860- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65861+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65862 goto err;
65863
65864 pipe->w_counter++;
65865- if (!pipe->writers++)
65866+ if (atomic_inc_return(&pipe->writers) == 1)
65867 wake_up_partner(pipe);
65868
65869- if (!is_pipe && !pipe->readers) {
65870+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65871 if (wait_for_partner(pipe, &pipe->r_counter))
65872 goto err_wr;
65873 }
65874@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65875 * the process can at least talk to itself.
65876 */
65877
65878- pipe->readers++;
65879- pipe->writers++;
65880+ atomic_inc(&pipe->readers);
65881+ atomic_inc(&pipe->writers);
65882 pipe->r_counter++;
65883 pipe->w_counter++;
65884- if (pipe->readers == 1 || pipe->writers == 1)
65885+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65886 wake_up_partner(pipe);
65887 break;
65888
65889@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65890 return 0;
65891
65892 err_rd:
65893- if (!--pipe->readers)
65894+ if (atomic_dec_and_test(&pipe->readers))
65895 wake_up_interruptible(&pipe->wait);
65896 ret = -ERESTARTSYS;
65897 goto err;
65898
65899 err_wr:
65900- if (!--pipe->writers)
65901+ if (atomic_dec_and_test(&pipe->writers))
65902 wake_up_interruptible(&pipe->wait);
65903 ret = -ERESTARTSYS;
65904 goto err;
65905diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65906index 3a48bb7..403067b 100644
65907--- a/fs/posix_acl.c
65908+++ b/fs/posix_acl.c
65909@@ -20,6 +20,7 @@
65910 #include <linux/xattr.h>
65911 #include <linux/export.h>
65912 #include <linux/user_namespace.h>
65913+#include <linux/grsecurity.h>
65914
65915 struct posix_acl **acl_by_type(struct inode *inode, int type)
65916 {
65917@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65918 }
65919 }
65920 if (mode_p)
65921- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65922+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65923 return not_equiv;
65924 }
65925 EXPORT_SYMBOL(posix_acl_equiv_mode);
65926@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65927 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65928 }
65929
65930- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65931+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65932 return not_equiv;
65933 }
65934
65935@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65936 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65937 int err = -ENOMEM;
65938 if (clone) {
65939+ *mode_p &= ~gr_acl_umask();
65940+
65941 err = posix_acl_create_masq(clone, mode_p);
65942 if (err < 0) {
65943 posix_acl_release(clone);
65944@@ -663,11 +666,12 @@ struct posix_acl *
65945 posix_acl_from_xattr(struct user_namespace *user_ns,
65946 const void *value, size_t size)
65947 {
65948- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65949- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65950+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65951+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65952 int count;
65953 struct posix_acl *acl;
65954 struct posix_acl_entry *acl_e;
65955+ umode_t umask = gr_acl_umask();
65956
65957 if (!value)
65958 return NULL;
65959@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65960
65961 switch(acl_e->e_tag) {
65962 case ACL_USER_OBJ:
65963+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65964+ break;
65965 case ACL_GROUP_OBJ:
65966 case ACL_MASK:
65967+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65968+ break;
65969 case ACL_OTHER:
65970+ acl_e->e_perm &= ~(umask & S_IRWXO);
65971 break;
65972
65973 case ACL_USER:
65974+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65975 acl_e->e_uid =
65976 make_kuid(user_ns,
65977 le32_to_cpu(entry->e_id));
65978@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65979 goto fail;
65980 break;
65981 case ACL_GROUP:
65982+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65983 acl_e->e_gid =
65984 make_kgid(user_ns,
65985 le32_to_cpu(entry->e_id));
65986diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65987index 2183fcf..3c32a98 100644
65988--- a/fs/proc/Kconfig
65989+++ b/fs/proc/Kconfig
65990@@ -30,7 +30,7 @@ config PROC_FS
65991
65992 config PROC_KCORE
65993 bool "/proc/kcore support" if !ARM
65994- depends on PROC_FS && MMU
65995+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65996 help
65997 Provides a virtual ELF core file of the live kernel. This can
65998 be read with gdb and other ELF tools. No modifications can be
65999@@ -38,8 +38,8 @@ config PROC_KCORE
66000
66001 config PROC_VMCORE
66002 bool "/proc/vmcore support"
66003- depends on PROC_FS && CRASH_DUMP
66004- default y
66005+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66006+ default n
66007 help
66008 Exports the dump image of crashed kernel in ELF format.
66009
66010@@ -63,8 +63,8 @@ config PROC_SYSCTL
66011 limited in memory.
66012
66013 config PROC_PAGE_MONITOR
66014- default y
66015- depends on PROC_FS && MMU
66016+ default n
66017+ depends on PROC_FS && MMU && !GRKERNSEC
66018 bool "Enable /proc page monitoring" if EXPERT
66019 help
66020 Various /proc files exist to monitor process memory utilization:
66021diff --git a/fs/proc/array.c b/fs/proc/array.c
66022index 1295a00..4c91a6b 100644
66023--- a/fs/proc/array.c
66024+++ b/fs/proc/array.c
66025@@ -60,6 +60,7 @@
66026 #include <linux/tty.h>
66027 #include <linux/string.h>
66028 #include <linux/mman.h>
66029+#include <linux/grsecurity.h>
66030 #include <linux/proc_fs.h>
66031 #include <linux/ioport.h>
66032 #include <linux/uaccess.h>
66033@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66034 cpumask_pr_args(&task->cpus_allowed));
66035 }
66036
66037+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66038+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66039+{
66040+ if (p->mm)
66041+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66042+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66043+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66044+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66045+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66046+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66047+ else
66048+ seq_printf(m, "PaX:\t-----\n");
66049+}
66050+#endif
66051+
66052 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66053 struct pid *pid, struct task_struct *task)
66054 {
66055@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66056 task_cpus_allowed(m, task);
66057 cpuset_task_status_allowed(m, task);
66058 task_context_switch_counts(m, task);
66059+
66060+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66061+ task_pax(m, task);
66062+#endif
66063+
66064+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66065+ task_grsec_rbac(m, task);
66066+#endif
66067+
66068 return 0;
66069 }
66070
66071+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66072+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66073+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66074+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66075+#endif
66076+
66077 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66078 struct pid *pid, struct task_struct *task, int whole)
66079 {
66080@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66081 char tcomm[sizeof(task->comm)];
66082 unsigned long flags;
66083
66084+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66085+ if (current->exec_id != m->exec_id) {
66086+ gr_log_badprocpid("stat");
66087+ return 0;
66088+ }
66089+#endif
66090+
66091 state = *get_task_state(task);
66092 vsize = eip = esp = 0;
66093 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66094@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66095 gtime = task_gtime(task);
66096 }
66097
66098+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66099+ if (PAX_RAND_FLAGS(mm)) {
66100+ eip = 0;
66101+ esp = 0;
66102+ wchan = 0;
66103+ }
66104+#endif
66105+#ifdef CONFIG_GRKERNSEC_HIDESYM
66106+ wchan = 0;
66107+ eip =0;
66108+ esp =0;
66109+#endif
66110+
66111 /* scale priority and nice values from timeslices to -20..20 */
66112 /* to make it look like a "normal" Unix priority/nice value */
66113 priority = task_prio(task);
66114@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66115 seq_put_decimal_ull(m, ' ', vsize);
66116 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66117 seq_put_decimal_ull(m, ' ', rsslim);
66118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66119+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66120+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66121+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66122+#else
66123 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66124 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66125 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66126+#endif
66127 seq_put_decimal_ull(m, ' ', esp);
66128 seq_put_decimal_ull(m, ' ', eip);
66129 /* The signal information here is obsolete.
66130@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66131 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66132 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66133
66134- if (mm && permitted) {
66135+ if (mm && permitted
66136+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66137+ && !PAX_RAND_FLAGS(mm)
66138+#endif
66139+ ) {
66140 seq_put_decimal_ull(m, ' ', mm->start_data);
66141 seq_put_decimal_ull(m, ' ', mm->end_data);
66142 seq_put_decimal_ull(m, ' ', mm->start_brk);
66143@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66144 struct pid *pid, struct task_struct *task)
66145 {
66146 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66147- struct mm_struct *mm = get_task_mm(task);
66148+ struct mm_struct *mm;
66149
66150+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66151+ if (current->exec_id != m->exec_id) {
66152+ gr_log_badprocpid("statm");
66153+ return 0;
66154+ }
66155+#endif
66156+ mm = get_task_mm(task);
66157 if (mm) {
66158 size = task_statm(mm, &shared, &text, &data, &resident);
66159 mmput(mm);
66160@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66161 return 0;
66162 }
66163
66164+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66165+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
66166+{
66167+ unsigned long flags;
66168+ u32 curr_ip = 0;
66169+
66170+ if (lock_task_sighand(task, &flags)) {
66171+ curr_ip = task->signal->curr_ip;
66172+ unlock_task_sighand(task, &flags);
66173+ }
66174+ return seq_printf(m, "%pI4\n", &curr_ip);
66175+}
66176+#endif
66177+
66178 #ifdef CONFIG_CHECKPOINT_RESTORE
66179 static struct pid *
66180 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66181diff --git a/fs/proc/base.c b/fs/proc/base.c
66182index 3f3d7ae..68de109 100644
66183--- a/fs/proc/base.c
66184+++ b/fs/proc/base.c
66185@@ -113,6 +113,14 @@ struct pid_entry {
66186 union proc_op op;
66187 };
66188
66189+struct getdents_callback {
66190+ struct linux_dirent __user * current_dir;
66191+ struct linux_dirent __user * previous;
66192+ struct file * file;
66193+ int count;
66194+ int error;
66195+};
66196+
66197 #define NOD(NAME, MODE, IOP, FOP, OP) { \
66198 .name = (NAME), \
66199 .len = sizeof(NAME) - 1, \
66200@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
66201 return 0;
66202 }
66203
66204+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66205+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66206+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66207+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66208+#endif
66209+
66210 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66211 struct pid *pid, struct task_struct *task)
66212 {
66213 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66214 if (mm && !IS_ERR(mm)) {
66215 unsigned int nwords = 0;
66216+
66217+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66218+ /* allow if we're currently ptracing this task */
66219+ if (PAX_RAND_FLAGS(mm) &&
66220+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
66221+ mmput(mm);
66222+ return 0;
66223+ }
66224+#endif
66225+
66226 do {
66227 nwords += 2;
66228 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
66229@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66230 }
66231
66232
66233-#ifdef CONFIG_KALLSYMS
66234+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66235 /*
66236 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
66237 * Returns the resolved symbol. If that fails, simply return the address.
66238@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
66239 mutex_unlock(&task->signal->cred_guard_mutex);
66240 }
66241
66242-#ifdef CONFIG_STACKTRACE
66243+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66244
66245 #define MAX_STACK_TRACE_DEPTH 64
66246
66247@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
66248 return 0;
66249 }
66250
66251-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66252+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66253 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66254 struct pid *pid, struct task_struct *task)
66255 {
66256@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66257 /************************************************************************/
66258
66259 /* permission checks */
66260-static int proc_fd_access_allowed(struct inode *inode)
66261+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66262 {
66263 struct task_struct *task;
66264 int allowed = 0;
66265@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66266 */
66267 task = get_proc_task(inode);
66268 if (task) {
66269- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66270+ if (log)
66271+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66272+ else
66273+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66274 put_task_struct(task);
66275 }
66276 return allowed;
66277@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66278 struct task_struct *task,
66279 int hide_pid_min)
66280 {
66281+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66282+ return false;
66283+
66284+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66285+ rcu_read_lock();
66286+ {
66287+ const struct cred *tmpcred = current_cred();
66288+ const struct cred *cred = __task_cred(task);
66289+
66290+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66291+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66292+ || in_group_p(grsec_proc_gid)
66293+#endif
66294+ ) {
66295+ rcu_read_unlock();
66296+ return true;
66297+ }
66298+ }
66299+ rcu_read_unlock();
66300+
66301+ if (!pid->hide_pid)
66302+ return false;
66303+#endif
66304+
66305 if (pid->hide_pid < hide_pid_min)
66306 return true;
66307 if (in_group_p(pid->pid_gid))
66308 return true;
66309+
66310 return ptrace_may_access(task, PTRACE_MODE_READ);
66311 }
66312
66313@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66314 put_task_struct(task);
66315
66316 if (!has_perms) {
66317+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66318+ {
66319+#else
66320 if (pid->hide_pid == 2) {
66321+#endif
66322 /*
66323 * Let's make getdents(), stat(), and open()
66324 * consistent with each other. If a process
66325@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
66326
66327 if (task) {
66328 mm = mm_access(task, mode);
66329+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
66330+ mmput(mm);
66331+ mm = ERR_PTR(-EPERM);
66332+ }
66333 put_task_struct(task);
66334
66335 if (!IS_ERR_OR_NULL(mm)) {
66336@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66337 return PTR_ERR(mm);
66338
66339 file->private_data = mm;
66340+
66341+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66342+ file->f_version = current->exec_id;
66343+#endif
66344+
66345 return 0;
66346 }
66347
66348@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66349 ssize_t copied;
66350 char *page;
66351
66352+#ifdef CONFIG_GRKERNSEC
66353+ if (write)
66354+ return -EPERM;
66355+#endif
66356+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66357+ if (file->f_version != current->exec_id) {
66358+ gr_log_badprocpid("mem");
66359+ return 0;
66360+ }
66361+#endif
66362+
66363 if (!mm)
66364 return 0;
66365
66366@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66367 goto free;
66368
66369 while (count > 0) {
66370- int this_len = min_t(int, count, PAGE_SIZE);
66371+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66372
66373 if (write && copy_from_user(page, buf, this_len)) {
66374 copied = -EFAULT;
66375@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66376 if (!mm)
66377 return 0;
66378
66379+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66380+ if (file->f_version != current->exec_id) {
66381+ gr_log_badprocpid("environ");
66382+ return 0;
66383+ }
66384+#endif
66385+
66386 page = (char *)__get_free_page(GFP_TEMPORARY);
66387 if (!page)
66388 return -ENOMEM;
66389@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66390 goto free;
66391 while (count > 0) {
66392 size_t this_len, max_len;
66393- int retval;
66394+ ssize_t retval;
66395
66396 if (src >= (mm->env_end - mm->env_start))
66397 break;
66398@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66399 int error = -EACCES;
66400
66401 /* Are we allowed to snoop on the tasks file descriptors? */
66402- if (!proc_fd_access_allowed(inode))
66403+ if (!proc_fd_access_allowed(inode, 0))
66404 goto out;
66405
66406 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66407@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66408 struct path path;
66409
66410 /* Are we allowed to snoop on the tasks file descriptors? */
66411- if (!proc_fd_access_allowed(inode))
66412- goto out;
66413+ /* logging this is needed for learning on chromium to work properly,
66414+ but we don't want to flood the logs from 'ps' which does a readlink
66415+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66416+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66417+ */
66418+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66419+ if (!proc_fd_access_allowed(inode,0))
66420+ goto out;
66421+ } else {
66422+ if (!proc_fd_access_allowed(inode,1))
66423+ goto out;
66424+ }
66425
66426 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66427 if (error)
66428@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66429 rcu_read_lock();
66430 cred = __task_cred(task);
66431 inode->i_uid = cred->euid;
66432+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66433+ inode->i_gid = grsec_proc_gid;
66434+#else
66435 inode->i_gid = cred->egid;
66436+#endif
66437 rcu_read_unlock();
66438 }
66439 security_task_to_inode(task, inode);
66440@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66441 return -ENOENT;
66442 }
66443 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66444+#ifdef CONFIG_GRKERNSEC_PROC_USER
66445+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66446+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66447+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66448+#endif
66449 task_dumpable(task)) {
66450 cred = __task_cred(task);
66451 stat->uid = cred->euid;
66452+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66453+ stat->gid = grsec_proc_gid;
66454+#else
66455 stat->gid = cred->egid;
66456+#endif
66457 }
66458 }
66459 rcu_read_unlock();
66460@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66461
66462 if (task) {
66463 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66464+#ifdef CONFIG_GRKERNSEC_PROC_USER
66465+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66466+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66467+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66468+#endif
66469 task_dumpable(task)) {
66470 rcu_read_lock();
66471 cred = __task_cred(task);
66472 inode->i_uid = cred->euid;
66473+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66474+ inode->i_gid = grsec_proc_gid;
66475+#else
66476 inode->i_gid = cred->egid;
66477+#endif
66478 rcu_read_unlock();
66479 } else {
66480 inode->i_uid = GLOBAL_ROOT_UID;
66481@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66482 if (!task)
66483 goto out_no_task;
66484
66485+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66486+ goto out;
66487+
66488 /*
66489 * Yes, it does not scale. And it should not. Don't add
66490 * new entries into /proc/<tgid>/ without very good reasons.
66491@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66492 if (!task)
66493 return -ENOENT;
66494
66495+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66496+ goto out;
66497+
66498 if (!dir_emit_dots(file, ctx))
66499 goto out;
66500
66501@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66502 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66503 #endif
66504 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66505-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66506+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66507 ONE("syscall", S_IRUSR, proc_pid_syscall),
66508 #endif
66509 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66510@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66511 #ifdef CONFIG_SECURITY
66512 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66513 #endif
66514-#ifdef CONFIG_KALLSYMS
66515+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66516 ONE("wchan", S_IRUGO, proc_pid_wchan),
66517 #endif
66518-#ifdef CONFIG_STACKTRACE
66519+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66520 ONE("stack", S_IRUSR, proc_pid_stack),
66521 #endif
66522 #ifdef CONFIG_SCHEDSTATS
66523@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66524 #ifdef CONFIG_HARDWALL
66525 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66526 #endif
66527+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66528+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66529+#endif
66530 #ifdef CONFIG_USER_NS
66531 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66532 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66533@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
66534 if (!inode)
66535 goto out;
66536
66537+#ifdef CONFIG_GRKERNSEC_PROC_USER
66538+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66539+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66540+ inode->i_gid = grsec_proc_gid;
66541+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66542+#else
66543 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66544+#endif
66545 inode->i_op = &proc_tgid_base_inode_operations;
66546 inode->i_fop = &proc_tgid_base_operations;
66547 inode->i_flags|=S_IMMUTABLE;
66548@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66549 if (!task)
66550 goto out;
66551
66552+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66553+ goto out_put_task;
66554+
66555 result = proc_pid_instantiate(dir, dentry, task, NULL);
66556+out_put_task:
66557 put_task_struct(task);
66558 out:
66559 return ERR_PTR(result);
66560@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66561 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66562 #endif
66563 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66564-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66565+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66566 ONE("syscall", S_IRUSR, proc_pid_syscall),
66567 #endif
66568 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66569@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66570 #ifdef CONFIG_SECURITY
66571 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66572 #endif
66573-#ifdef CONFIG_KALLSYMS
66574+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66575 ONE("wchan", S_IRUGO, proc_pid_wchan),
66576 #endif
66577-#ifdef CONFIG_STACKTRACE
66578+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66579 ONE("stack", S_IRUSR, proc_pid_stack),
66580 #endif
66581 #ifdef CONFIG_SCHEDSTATS
66582diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66583index cbd82df..c0407d2 100644
66584--- a/fs/proc/cmdline.c
66585+++ b/fs/proc/cmdline.c
66586@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66587
66588 static int __init proc_cmdline_init(void)
66589 {
66590+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66591+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66592+#else
66593 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66594+#endif
66595 return 0;
66596 }
66597 fs_initcall(proc_cmdline_init);
66598diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66599index 50493ed..248166b 100644
66600--- a/fs/proc/devices.c
66601+++ b/fs/proc/devices.c
66602@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66603
66604 static int __init proc_devices_init(void)
66605 {
66606+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66607+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66608+#else
66609 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66610+#endif
66611 return 0;
66612 }
66613 fs_initcall(proc_devices_init);
66614diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66615index 8e5ad83..1f07a8c 100644
66616--- a/fs/proc/fd.c
66617+++ b/fs/proc/fd.c
66618@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66619 if (!task)
66620 return -ENOENT;
66621
66622- files = get_files_struct(task);
66623+ if (!gr_acl_handle_procpidmem(task))
66624+ files = get_files_struct(task);
66625 put_task_struct(task);
66626
66627 if (files) {
66628@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66629 */
66630 int proc_fd_permission(struct inode *inode, int mask)
66631 {
66632+ struct task_struct *task;
66633 int rv = generic_permission(inode, mask);
66634- if (rv == 0)
66635- return 0;
66636+
66637 if (task_tgid(current) == proc_pid(inode))
66638 rv = 0;
66639+
66640+ task = get_proc_task(inode);
66641+ if (task == NULL)
66642+ return rv;
66643+
66644+ if (gr_acl_handle_procpidmem(task))
66645+ rv = -EACCES;
66646+
66647+ put_task_struct(task);
66648+
66649 return rv;
66650 }
66651
66652diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66653index be65b20..2998ba8 100644
66654--- a/fs/proc/generic.c
66655+++ b/fs/proc/generic.c
66656@@ -22,6 +22,7 @@
66657 #include <linux/bitops.h>
66658 #include <linux/spinlock.h>
66659 #include <linux/completion.h>
66660+#include <linux/grsecurity.h>
66661 #include <asm/uaccess.h>
66662
66663 #include "internal.h"
66664@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66665 return proc_lookup_de(PDE(dir), dir, dentry);
66666 }
66667
66668+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66669+ unsigned int flags)
66670+{
66671+ if (gr_proc_is_restricted())
66672+ return ERR_PTR(-EACCES);
66673+
66674+ return proc_lookup_de(PDE(dir), dir, dentry);
66675+}
66676+
66677 /*
66678 * This returns non-zero if at EOF, so that the /proc
66679 * root directory can use this and check if it should
66680@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66681 return proc_readdir_de(PDE(inode), file, ctx);
66682 }
66683
66684+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66685+{
66686+ struct inode *inode = file_inode(file);
66687+
66688+ if (gr_proc_is_restricted())
66689+ return -EACCES;
66690+
66691+ return proc_readdir_de(PDE(inode), file, ctx);
66692+}
66693+
66694 /*
66695 * These are the generic /proc directory operations. They
66696 * use the in-memory "struct proc_dir_entry" tree to parse
66697@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66698 .iterate = proc_readdir,
66699 };
66700
66701+static const struct file_operations proc_dir_restricted_operations = {
66702+ .llseek = generic_file_llseek,
66703+ .read = generic_read_dir,
66704+ .iterate = proc_readdir_restrict,
66705+};
66706+
66707 /*
66708 * proc directories can do almost nothing..
66709 */
66710@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66711 .setattr = proc_notify_change,
66712 };
66713
66714+static const struct inode_operations proc_dir_restricted_inode_operations = {
66715+ .lookup = proc_lookup_restrict,
66716+ .getattr = proc_getattr,
66717+ .setattr = proc_notify_change,
66718+};
66719+
66720 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66721 {
66722 int ret;
66723@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66724 }
66725 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66726
66727+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66728+ struct proc_dir_entry *parent, void *data)
66729+{
66730+ struct proc_dir_entry *ent;
66731+
66732+ if (mode == 0)
66733+ mode = S_IRUGO | S_IXUGO;
66734+
66735+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66736+ if (ent) {
66737+ ent->data = data;
66738+ ent->restricted = 1;
66739+ ent->proc_fops = &proc_dir_restricted_operations;
66740+ ent->proc_iops = &proc_dir_restricted_inode_operations;
66741+ parent->nlink++;
66742+ if (proc_register(parent, ent) < 0) {
66743+ kfree(ent);
66744+ parent->nlink--;
66745+ ent = NULL;
66746+ }
66747+ }
66748+ return ent;
66749+}
66750+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66751+
66752 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66753 struct proc_dir_entry *parent)
66754 {
66755@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66756 }
66757 EXPORT_SYMBOL(proc_mkdir);
66758
66759+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66760+ struct proc_dir_entry *parent)
66761+{
66762+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66763+}
66764+EXPORT_SYMBOL(proc_mkdir_restrict);
66765+
66766 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66767 struct proc_dir_entry *parent,
66768 const struct file_operations *proc_fops,
66769diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66770index 7697b66..8d8e541 100644
66771--- a/fs/proc/inode.c
66772+++ b/fs/proc/inode.c
66773@@ -24,11 +24,17 @@
66774 #include <linux/mount.h>
66775 #include <linux/magic.h>
66776 #include <linux/namei.h>
66777+#include <linux/grsecurity.h>
66778
66779 #include <asm/uaccess.h>
66780
66781 #include "internal.h"
66782
66783+#ifdef CONFIG_PROC_SYSCTL
66784+extern const struct inode_operations proc_sys_inode_operations;
66785+extern const struct inode_operations proc_sys_dir_operations;
66786+#endif
66787+
66788 static void proc_evict_inode(struct inode *inode)
66789 {
66790 struct proc_dir_entry *de;
66791@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66792 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66793 sysctl_head_put(head);
66794 }
66795+
66796+#ifdef CONFIG_PROC_SYSCTL
66797+ if (inode->i_op == &proc_sys_inode_operations ||
66798+ inode->i_op == &proc_sys_dir_operations)
66799+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66800+#endif
66801+
66802 }
66803
66804 static struct kmem_cache * proc_inode_cachep;
66805@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66806 if (de->mode) {
66807 inode->i_mode = de->mode;
66808 inode->i_uid = de->uid;
66809+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66810+ inode->i_gid = grsec_proc_gid;
66811+#else
66812 inode->i_gid = de->gid;
66813+#endif
66814 }
66815 if (de->size)
66816 inode->i_size = de->size;
66817diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66818index c835b94..c9e01a3 100644
66819--- a/fs/proc/internal.h
66820+++ b/fs/proc/internal.h
66821@@ -47,9 +47,10 @@ struct proc_dir_entry {
66822 struct completion *pde_unload_completion;
66823 struct list_head pde_openers; /* who did ->open, but not ->release */
66824 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66825+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66826 u8 namelen;
66827 char name[];
66828-};
66829+} __randomize_layout;
66830
66831 union proc_op {
66832 int (*proc_get_link)(struct dentry *, struct path *);
66833@@ -67,7 +68,7 @@ struct proc_inode {
66834 struct ctl_table *sysctl_entry;
66835 const struct proc_ns_operations *ns_ops;
66836 struct inode vfs_inode;
66837-};
66838+} __randomize_layout;
66839
66840 /*
66841 * General functions
66842@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66843 struct pid *, struct task_struct *);
66844 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66845 struct pid *, struct task_struct *);
66846+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66847+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66848+ struct pid *, struct task_struct *);
66849+#endif
66850
66851 /*
66852 * base.c
66853@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66854 * generic.c
66855 */
66856 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66857+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66858 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66859 struct dentry *);
66860 extern int proc_readdir(struct file *, struct dir_context *);
66861+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66862 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66863
66864 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66865diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66866index a352d57..cb94a5c 100644
66867--- a/fs/proc/interrupts.c
66868+++ b/fs/proc/interrupts.c
66869@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66870
66871 static int __init proc_interrupts_init(void)
66872 {
66873+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66874+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66875+#else
66876 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66877+#endif
66878 return 0;
66879 }
66880 fs_initcall(proc_interrupts_init);
66881diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66882index 91a4e64..69f1a3e 100644
66883--- a/fs/proc/kcore.c
66884+++ b/fs/proc/kcore.c
66885@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66886 * the addresses in the elf_phdr on our list.
66887 */
66888 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66889- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66890+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66891+ if (tsz > buflen)
66892 tsz = buflen;
66893-
66894+
66895 while (buflen) {
66896 struct kcore_list *m;
66897
66898@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66899 kfree(elf_buf);
66900 } else {
66901 if (kern_addr_valid(start)) {
66902- unsigned long n;
66903+ char *elf_buf;
66904+ mm_segment_t oldfs;
66905
66906- n = copy_to_user(buffer, (char *)start, tsz);
66907- /*
66908- * We cannot distinguish between fault on source
66909- * and fault on destination. When this happens
66910- * we clear too and hope it will trigger the
66911- * EFAULT again.
66912- */
66913- if (n) {
66914- if (clear_user(buffer + tsz - n,
66915- n))
66916+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66917+ if (!elf_buf)
66918+ return -ENOMEM;
66919+ oldfs = get_fs();
66920+ set_fs(KERNEL_DS);
66921+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66922+ set_fs(oldfs);
66923+ if (copy_to_user(buffer, elf_buf, tsz)) {
66924+ kfree(elf_buf);
66925 return -EFAULT;
66926+ }
66927 }
66928+ set_fs(oldfs);
66929+ kfree(elf_buf);
66930 } else {
66931 if (clear_user(buffer, tsz))
66932 return -EFAULT;
66933@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66934
66935 static int open_kcore(struct inode *inode, struct file *filp)
66936 {
66937+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66938+ return -EPERM;
66939+#endif
66940 if (!capable(CAP_SYS_RAWIO))
66941 return -EPERM;
66942 if (kcore_need_update)
66943@@ -580,7 +587,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
66944 return NOTIFY_OK;
66945 }
66946
66947-static struct notifier_block kcore_callback_nb __meminitdata = {
66948+static struct notifier_block kcore_callback_nb __meminitconst = {
66949 .notifier_call = kcore_callback,
66950 .priority = 0,
66951 };
66952diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66953index d3ebf2e..6ad42d1 100644
66954--- a/fs/proc/meminfo.c
66955+++ b/fs/proc/meminfo.c
66956@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66957 vmi.used >> 10,
66958 vmi.largest_chunk >> 10
66959 #ifdef CONFIG_MEMORY_FAILURE
66960- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66961+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66962 #endif
66963 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66964 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66965diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66966index d4a3574..b421ce9 100644
66967--- a/fs/proc/nommu.c
66968+++ b/fs/proc/nommu.c
66969@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66970
66971 if (file) {
66972 seq_pad(m, ' ');
66973- seq_path(m, &file->f_path, "");
66974+ seq_path(m, &file->f_path, "\n\\");
66975 }
66976
66977 seq_putc(m, '\n');
66978diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66979index 1bde894..22ac7eb 100644
66980--- a/fs/proc/proc_net.c
66981+++ b/fs/proc/proc_net.c
66982@@ -23,9 +23,27 @@
66983 #include <linux/nsproxy.h>
66984 #include <net/net_namespace.h>
66985 #include <linux/seq_file.h>
66986+#include <linux/grsecurity.h>
66987
66988 #include "internal.h"
66989
66990+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66991+static struct seq_operations *ipv6_seq_ops_addr;
66992+
66993+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66994+{
66995+ ipv6_seq_ops_addr = addr;
66996+}
66997+
66998+void unregister_ipv6_seq_ops_addr(void)
66999+{
67000+ ipv6_seq_ops_addr = NULL;
67001+}
67002+
67003+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67004+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67005+#endif
67006+
67007 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67008 {
67009 return pde->parent->data;
67010@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67011 return maybe_get_net(PDE_NET(PDE(inode)));
67012 }
67013
67014+extern const struct seq_operations dev_seq_ops;
67015+
67016 int seq_open_net(struct inode *ino, struct file *f,
67017 const struct seq_operations *ops, int size)
67018 {
67019@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67020
67021 BUG_ON(size < sizeof(*p));
67022
67023+ /* only permit access to /proc/net/dev */
67024+ if (
67025+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67026+ ops != ipv6_seq_ops_addr &&
67027+#endif
67028+ ops != &dev_seq_ops && gr_proc_is_restricted())
67029+ return -EACCES;
67030+
67031 net = get_proc_net(ino);
67032 if (net == NULL)
67033 return -ENXIO;
67034@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67035 int err;
67036 struct net *net;
67037
67038+ if (gr_proc_is_restricted())
67039+ return -EACCES;
67040+
67041 err = -ENXIO;
67042 net = get_proc_net(inode);
67043 if (net == NULL)
67044diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67045index f92d5dd..26398ac 100644
67046--- a/fs/proc/proc_sysctl.c
67047+++ b/fs/proc/proc_sysctl.c
67048@@ -11,13 +11,21 @@
67049 #include <linux/namei.h>
67050 #include <linux/mm.h>
67051 #include <linux/module.h>
67052+#include <linux/nsproxy.h>
67053+#ifdef CONFIG_GRKERNSEC
67054+#include <net/net_namespace.h>
67055+#endif
67056 #include "internal.h"
67057
67058+extern int gr_handle_chroot_sysctl(const int op);
67059+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67060+ const int op);
67061+
67062 static const struct dentry_operations proc_sys_dentry_operations;
67063 static const struct file_operations proc_sys_file_operations;
67064-static const struct inode_operations proc_sys_inode_operations;
67065+const struct inode_operations proc_sys_inode_operations;
67066 static const struct file_operations proc_sys_dir_file_operations;
67067-static const struct inode_operations proc_sys_dir_operations;
67068+const struct inode_operations proc_sys_dir_operations;
67069
67070 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67071 {
67072@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67073
67074 err = NULL;
67075 d_set_d_op(dentry, &proc_sys_dentry_operations);
67076+
67077+ gr_handle_proc_create(dentry, inode);
67078+
67079 d_add(dentry, inode);
67080
67081 out:
67082@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67083 struct inode *inode = file_inode(filp);
67084 struct ctl_table_header *head = grab_header(inode);
67085 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67086+ int op = write ? MAY_WRITE : MAY_READ;
67087 ssize_t error;
67088 size_t res;
67089
67090@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67091 * and won't be until we finish.
67092 */
67093 error = -EPERM;
67094- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67095+ if (sysctl_perm(head, table, op))
67096 goto out;
67097
67098 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67099@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67100 if (!table->proc_handler)
67101 goto out;
67102
67103+#ifdef CONFIG_GRKERNSEC
67104+ error = -EPERM;
67105+ if (gr_handle_chroot_sysctl(op))
67106+ goto out;
67107+ dget(filp->f_path.dentry);
67108+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67109+ dput(filp->f_path.dentry);
67110+ goto out;
67111+ }
67112+ dput(filp->f_path.dentry);
67113+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67114+ goto out;
67115+ if (write) {
67116+ if (current->nsproxy->net_ns != table->extra2) {
67117+ if (!capable(CAP_SYS_ADMIN))
67118+ goto out;
67119+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67120+ goto out;
67121+ }
67122+#endif
67123+
67124 /* careful: calling conventions are nasty here */
67125 res = count;
67126 error = table->proc_handler(table, write, buf, &res, ppos);
67127@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67128 return false;
67129 } else {
67130 d_set_d_op(child, &proc_sys_dentry_operations);
67131+
67132+ gr_handle_proc_create(child, inode);
67133+
67134 d_add(child, inode);
67135 }
67136 } else {
67137@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
67138 if ((*pos)++ < ctx->pos)
67139 return true;
67140
67141+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67142+ return 0;
67143+
67144 if (unlikely(S_ISLNK(table->mode)))
67145 res = proc_sys_link_fill_cache(file, ctx, head, table);
67146 else
67147@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67148 if (IS_ERR(head))
67149 return PTR_ERR(head);
67150
67151+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67152+ return -ENOENT;
67153+
67154 generic_fillattr(inode, stat);
67155 if (table)
67156 stat->mode = (stat->mode & S_IFMT) | table->mode;
67157@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67158 .llseek = generic_file_llseek,
67159 };
67160
67161-static const struct inode_operations proc_sys_inode_operations = {
67162+const struct inode_operations proc_sys_inode_operations = {
67163 .permission = proc_sys_permission,
67164 .setattr = proc_sys_setattr,
67165 .getattr = proc_sys_getattr,
67166 };
67167
67168-static const struct inode_operations proc_sys_dir_operations = {
67169+const struct inode_operations proc_sys_dir_operations = {
67170 .lookup = proc_sys_lookup,
67171 .permission = proc_sys_permission,
67172 .setattr = proc_sys_setattr,
67173@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67174 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67175 const char *name, int namelen)
67176 {
67177- struct ctl_table *table;
67178+ ctl_table_no_const *table;
67179 struct ctl_dir *new;
67180 struct ctl_node *node;
67181 char *new_name;
67182@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67183 return NULL;
67184
67185 node = (struct ctl_node *)(new + 1);
67186- table = (struct ctl_table *)(node + 1);
67187+ table = (ctl_table_no_const *)(node + 1);
67188 new_name = (char *)(table + 2);
67189 memcpy(new_name, name, namelen);
67190 new_name[namelen] = '\0';
67191@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
67192 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
67193 struct ctl_table_root *link_root)
67194 {
67195- struct ctl_table *link_table, *entry, *link;
67196+ ctl_table_no_const *link_table, *link;
67197+ struct ctl_table *entry;
67198 struct ctl_table_header *links;
67199 struct ctl_node *node;
67200 char *link_name;
67201@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
67202 return NULL;
67203
67204 node = (struct ctl_node *)(links + 1);
67205- link_table = (struct ctl_table *)(node + nr_entries);
67206+ link_table = (ctl_table_no_const *)(node + nr_entries);
67207 link_name = (char *)&link_table[nr_entries + 1];
67208
67209 for (link = link_table, entry = table; entry->procname; link++, entry++) {
67210@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67211 struct ctl_table_header ***subheader, struct ctl_table_set *set,
67212 struct ctl_table *table)
67213 {
67214- struct ctl_table *ctl_table_arg = NULL;
67215- struct ctl_table *entry, *files;
67216+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
67217+ struct ctl_table *entry;
67218 int nr_files = 0;
67219 int nr_dirs = 0;
67220 int err = -ENOMEM;
67221@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67222 nr_files++;
67223 }
67224
67225- files = table;
67226 /* If there are mixed files and directories we need a new table */
67227 if (nr_dirs && nr_files) {
67228- struct ctl_table *new;
67229+ ctl_table_no_const *new;
67230 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
67231 GFP_KERNEL);
67232 if (!files)
67233@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67234 /* Register everything except a directory full of subdirectories */
67235 if (nr_files || !nr_dirs) {
67236 struct ctl_table_header *header;
67237- header = __register_sysctl_table(set, path, files);
67238+ header = __register_sysctl_table(set, path, files ? files : table);
67239 if (!header) {
67240 kfree(ctl_table_arg);
67241 goto out;
67242diff --git a/fs/proc/root.c b/fs/proc/root.c
67243index e74ac9f..35e89f4 100644
67244--- a/fs/proc/root.c
67245+++ b/fs/proc/root.c
67246@@ -188,7 +188,15 @@ void __init proc_root_init(void)
67247 proc_mkdir("openprom", NULL);
67248 #endif
67249 proc_tty_init();
67250+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67251+#ifdef CONFIG_GRKERNSEC_PROC_USER
67252+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
67253+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67254+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
67255+#endif
67256+#else
67257 proc_mkdir("bus", NULL);
67258+#endif
67259 proc_sys_init();
67260 }
67261
67262diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67263index 510413eb..34d9a8c 100644
67264--- a/fs/proc/stat.c
67265+++ b/fs/proc/stat.c
67266@@ -11,6 +11,7 @@
67267 #include <linux/irqnr.h>
67268 #include <linux/cputime.h>
67269 #include <linux/tick.h>
67270+#include <linux/grsecurity.h>
67271
67272 #ifndef arch_irq_stat_cpu
67273 #define arch_irq_stat_cpu(cpu) 0
67274@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67275 u64 sum_softirq = 0;
67276 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67277 struct timespec boottime;
67278+ int unrestricted = 1;
67279+
67280+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67281+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67282+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67283+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67284+ && !in_group_p(grsec_proc_gid)
67285+#endif
67286+ )
67287+ unrestricted = 0;
67288+#endif
67289+#endif
67290
67291 user = nice = system = idle = iowait =
67292 irq = softirq = steal = 0;
67293@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67294 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67295 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67296 idle += get_idle_time(i);
67297- iowait += get_iowait_time(i);
67298- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67299- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67300- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67301- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67302- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67303- sum += kstat_cpu_irqs_sum(i);
67304- sum += arch_irq_stat_cpu(i);
67305+ if (unrestricted) {
67306+ iowait += get_iowait_time(i);
67307+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67308+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67309+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67310+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67311+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67312+ sum += kstat_cpu_irqs_sum(i);
67313+ sum += arch_irq_stat_cpu(i);
67314+ for (j = 0; j < NR_SOFTIRQS; j++) {
67315+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67316
67317- for (j = 0; j < NR_SOFTIRQS; j++) {
67318- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67319-
67320- per_softirq_sums[j] += softirq_stat;
67321- sum_softirq += softirq_stat;
67322+ per_softirq_sums[j] += softirq_stat;
67323+ sum_softirq += softirq_stat;
67324+ }
67325 }
67326 }
67327- sum += arch_irq_stat();
67328+ if (unrestricted)
67329+ sum += arch_irq_stat();
67330
67331 seq_puts(p, "cpu ");
67332 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67333@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67334 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67335 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67336 idle = get_idle_time(i);
67337- iowait = get_iowait_time(i);
67338- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67339- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67340- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67341- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67342- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67343+ if (unrestricted) {
67344+ iowait = get_iowait_time(i);
67345+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67346+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67347+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67348+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67349+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67350+ }
67351 seq_printf(p, "cpu%d", i);
67352 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67353 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67354@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67355
67356 /* sum again ? it could be updated? */
67357 for_each_irq_nr(j)
67358- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
67359+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
67360
67361 seq_printf(p,
67362 "\nctxt %llu\n"
67363@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67364 "processes %lu\n"
67365 "procs_running %lu\n"
67366 "procs_blocked %lu\n",
67367- nr_context_switches(),
67368+ unrestricted ? nr_context_switches() : 0ULL,
67369 (unsigned long)jif,
67370- total_forks,
67371- nr_running(),
67372- nr_iowait());
67373+ unrestricted ? total_forks : 0UL,
67374+ unrestricted ? nr_running() : 0UL,
67375+ unrestricted ? nr_iowait() : 0UL);
67376
67377 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67378
67379diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67380index 6dee68d..1b4add0 100644
67381--- a/fs/proc/task_mmu.c
67382+++ b/fs/proc/task_mmu.c
67383@@ -13,12 +13,19 @@
67384 #include <linux/swap.h>
67385 #include <linux/swapops.h>
67386 #include <linux/mmu_notifier.h>
67387+#include <linux/grsecurity.h>
67388
67389 #include <asm/elf.h>
67390 #include <asm/uaccess.h>
67391 #include <asm/tlbflush.h>
67392 #include "internal.h"
67393
67394+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67395+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67396+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67397+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67398+#endif
67399+
67400 void task_mem(struct seq_file *m, struct mm_struct *mm)
67401 {
67402 unsigned long data, text, lib, swap, ptes, pmds;
67403@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67404 "VmLib:\t%8lu kB\n"
67405 "VmPTE:\t%8lu kB\n"
67406 "VmPMD:\t%8lu kB\n"
67407- "VmSwap:\t%8lu kB\n",
67408- hiwater_vm << (PAGE_SHIFT-10),
67409+ "VmSwap:\t%8lu kB\n"
67410+
67411+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67412+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67413+#endif
67414+
67415+ ,hiwater_vm << (PAGE_SHIFT-10),
67416 total_vm << (PAGE_SHIFT-10),
67417 mm->locked_vm << (PAGE_SHIFT-10),
67418 mm->pinned_vm << (PAGE_SHIFT-10),
67419@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67420 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67421 ptes >> 10,
67422 pmds >> 10,
67423- swap << (PAGE_SHIFT-10));
67424+ swap << (PAGE_SHIFT-10)
67425+
67426+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67427+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67428+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67429+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67430+#else
67431+ , mm->context.user_cs_base
67432+ , mm->context.user_cs_limit
67433+#endif
67434+#endif
67435+
67436+ );
67437 }
67438
67439 unsigned long task_vsize(struct mm_struct *mm)
67440@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67441 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67442 }
67443
67444- /* We don't show the stack guard page in /proc/maps */
67445+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67446+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67447+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67448+#else
67449 start = vma->vm_start;
67450- if (stack_guard_page_start(vma, start))
67451- start += PAGE_SIZE;
67452 end = vma->vm_end;
67453- if (stack_guard_page_end(vma, end))
67454- end -= PAGE_SIZE;
67455+#endif
67456
67457 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67458 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67459@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67460 flags & VM_WRITE ? 'w' : '-',
67461 flags & VM_EXEC ? 'x' : '-',
67462 flags & VM_MAYSHARE ? 's' : 'p',
67463+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67464+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67465+#else
67466 pgoff,
67467+#endif
67468 MAJOR(dev), MINOR(dev), ino);
67469
67470 /*
67471@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67472 */
67473 if (file) {
67474 seq_pad(m, ' ');
67475- seq_path(m, &file->f_path, "\n");
67476+ seq_path(m, &file->f_path, "\n\\");
67477 goto done;
67478 }
67479
67480@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67481 * Thread stack in /proc/PID/task/TID/maps or
67482 * the main process stack.
67483 */
67484- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67485- vma->vm_end >= mm->start_stack)) {
67486+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67487+ (vma->vm_start <= mm->start_stack &&
67488+ vma->vm_end >= mm->start_stack)) {
67489 name = "[stack]";
67490 } else {
67491 /* Thread stack in /proc/PID/maps */
67492@@ -362,6 +391,12 @@ done:
67493
67494 static int show_map(struct seq_file *m, void *v, int is_pid)
67495 {
67496+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67497+ if (current->exec_id != m->exec_id) {
67498+ gr_log_badprocpid("maps");
67499+ return 0;
67500+ }
67501+#endif
67502 show_map_vma(m, v, is_pid);
67503 m_cache_vma(m, v);
67504 return 0;
67505@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67506 .private = &mss,
67507 };
67508
67509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67510+ if (current->exec_id != m->exec_id) {
67511+ gr_log_badprocpid("smaps");
67512+ return 0;
67513+ }
67514+#endif
67515 memset(&mss, 0, sizeof mss);
67516- /* mmap_sem is held in m_start */
67517- walk_page_vma(vma, &smaps_walk);
67518+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67519+ if (!PAX_RAND_FLAGS(vma->vm_mm))
67520+#endif
67521+ /* mmap_sem is held in m_start */
67522+ walk_page_vma(vma, &smaps_walk);
67523
67524 show_map_vma(m, vma, is_pid);
67525
67526@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67527 "KernelPageSize: %8lu kB\n"
67528 "MMUPageSize: %8lu kB\n"
67529 "Locked: %8lu kB\n",
67530+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67531+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67532+#else
67533 (vma->vm_end - vma->vm_start) >> 10,
67534+#endif
67535 mss.resident >> 10,
67536 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67537 mss.shared_clean >> 10,
67538@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67539 char buffer[64];
67540 int nid;
67541
67542+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67543+ if (current->exec_id != m->exec_id) {
67544+ gr_log_badprocpid("numa_maps");
67545+ return 0;
67546+ }
67547+#endif
67548+
67549 if (!mm)
67550 return 0;
67551
67552@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67553 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67554 }
67555
67556+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67557+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67558+#else
67559 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67560+#endif
67561
67562 if (file) {
67563 seq_puts(m, " file=");
67564- seq_path(m, &file->f_path, "\n\t= ");
67565+ seq_path(m, &file->f_path, "\n\t\\= ");
67566 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67567 seq_puts(m, " heap");
67568 } else {
67569diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67570index 599ec2e..f1413ae 100644
67571--- a/fs/proc/task_nommu.c
67572+++ b/fs/proc/task_nommu.c
67573@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67574 else
67575 bytes += kobjsize(mm);
67576
67577- if (current->fs && current->fs->users > 1)
67578+ if (current->fs && atomic_read(&current->fs->users) > 1)
67579 sbytes += kobjsize(current->fs);
67580 else
67581 bytes += kobjsize(current->fs);
67582@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67583
67584 if (file) {
67585 seq_pad(m, ' ');
67586- seq_path(m, &file->f_path, "");
67587+ seq_path(m, &file->f_path, "\n\\");
67588 } else if (mm) {
67589 pid_t tid = pid_of_stack(priv, vma, is_pid);
67590
67591diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67592index 4e61388..1a2523d 100644
67593--- a/fs/proc/vmcore.c
67594+++ b/fs/proc/vmcore.c
67595@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67596 nr_bytes = count;
67597
67598 /* If pfn is not ram, return zeros for sparse dump files */
67599- if (pfn_is_ram(pfn) == 0)
67600- memset(buf, 0, nr_bytes);
67601- else {
67602+ if (pfn_is_ram(pfn) == 0) {
67603+ if (userbuf) {
67604+ if (clear_user((char __force_user *)buf, nr_bytes))
67605+ return -EFAULT;
67606+ } else
67607+ memset(buf, 0, nr_bytes);
67608+ } else {
67609 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67610 offset, userbuf);
67611 if (tmp < 0)
67612@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67613 static int copy_to(void *target, void *src, size_t size, int userbuf)
67614 {
67615 if (userbuf) {
67616- if (copy_to_user((char __user *) target, src, size))
67617+ if (copy_to_user((char __force_user *) target, src, size))
67618 return -EFAULT;
67619 } else {
67620 memcpy(target, src, size);
67621@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67622 if (*fpos < m->offset + m->size) {
67623 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67624 start = m->paddr + *fpos - m->offset;
67625- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67626+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67627 if (tmp < 0)
67628 return tmp;
67629 buflen -= tsz;
67630@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67631 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67632 size_t buflen, loff_t *fpos)
67633 {
67634- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67635+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67636 }
67637
67638 /*
67639diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67640index d3fb2b6..43a8140 100644
67641--- a/fs/qnx6/qnx6.h
67642+++ b/fs/qnx6/qnx6.h
67643@@ -74,7 +74,7 @@ enum {
67644 BYTESEX_BE,
67645 };
67646
67647-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67648+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67649 {
67650 if (sbi->s_bytesex == BYTESEX_LE)
67651 return le64_to_cpu((__force __le64)n);
67652@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67653 return (__force __fs64)cpu_to_be64(n);
67654 }
67655
67656-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67657+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67658 {
67659 if (sbi->s_bytesex == BYTESEX_LE)
67660 return le32_to_cpu((__force __le32)n);
67661diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67662index bb2869f..d34ada8 100644
67663--- a/fs/quota/netlink.c
67664+++ b/fs/quota/netlink.c
67665@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67666 void quota_send_warning(struct kqid qid, dev_t dev,
67667 const char warntype)
67668 {
67669- static atomic_t seq;
67670+ static atomic_unchecked_t seq;
67671 struct sk_buff *skb;
67672 void *msg_head;
67673 int ret;
67674@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67675 "VFS: Not enough memory to send quota warning.\n");
67676 return;
67677 }
67678- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67679+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67680 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67681 if (!msg_head) {
67682 printk(KERN_ERR
67683diff --git a/fs/read_write.c b/fs/read_write.c
67684index 8e1b687..bad2eec 100644
67685--- a/fs/read_write.c
67686+++ b/fs/read_write.c
67687@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67688
67689 old_fs = get_fs();
67690 set_fs(get_ds());
67691- p = (__force const char __user *)buf;
67692+ p = (const char __force_user *)buf;
67693 if (count > MAX_RW_COUNT)
67694 count = MAX_RW_COUNT;
67695 if (file->f_op->write)
67696diff --git a/fs/readdir.c b/fs/readdir.c
67697index ced6791..936687b 100644
67698--- a/fs/readdir.c
67699+++ b/fs/readdir.c
67700@@ -18,6 +18,7 @@
67701 #include <linux/security.h>
67702 #include <linux/syscalls.h>
67703 #include <linux/unistd.h>
67704+#include <linux/namei.h>
67705
67706 #include <asm/uaccess.h>
67707
67708@@ -71,6 +72,7 @@ struct old_linux_dirent {
67709 struct readdir_callback {
67710 struct dir_context ctx;
67711 struct old_linux_dirent __user * dirent;
67712+ struct file * file;
67713 int result;
67714 };
67715
67716@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67717 buf->result = -EOVERFLOW;
67718 return -EOVERFLOW;
67719 }
67720+
67721+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67722+ return 0;
67723+
67724 buf->result++;
67725 dirent = buf->dirent;
67726 if (!access_ok(VERIFY_WRITE, dirent,
67727@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67728 if (!f.file)
67729 return -EBADF;
67730
67731+ buf.file = f.file;
67732 error = iterate_dir(f.file, &buf.ctx);
67733 if (buf.result)
67734 error = buf.result;
67735@@ -145,6 +152,7 @@ struct getdents_callback {
67736 struct dir_context ctx;
67737 struct linux_dirent __user * current_dir;
67738 struct linux_dirent __user * previous;
67739+ struct file * file;
67740 int count;
67741 int error;
67742 };
67743@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67744 buf->error = -EOVERFLOW;
67745 return -EOVERFLOW;
67746 }
67747+
67748+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67749+ return 0;
67750+
67751 dirent = buf->previous;
67752 if (dirent) {
67753 if (__put_user(offset, &dirent->d_off))
67754@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67755 if (!f.file)
67756 return -EBADF;
67757
67758+ buf.file = f.file;
67759 error = iterate_dir(f.file, &buf.ctx);
67760 if (error >= 0)
67761 error = buf.error;
67762@@ -230,6 +243,7 @@ struct getdents_callback64 {
67763 struct dir_context ctx;
67764 struct linux_dirent64 __user * current_dir;
67765 struct linux_dirent64 __user * previous;
67766+ struct file *file;
67767 int count;
67768 int error;
67769 };
67770@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67771 buf->error = -EINVAL; /* only used if we fail.. */
67772 if (reclen > buf->count)
67773 return -EINVAL;
67774+
67775+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67776+ return 0;
67777+
67778 dirent = buf->previous;
67779 if (dirent) {
67780 if (__put_user(offset, &dirent->d_off))
67781@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67782 if (!f.file)
67783 return -EBADF;
67784
67785+ buf.file = f.file;
67786 error = iterate_dir(f.file, &buf.ctx);
67787 if (error >= 0)
67788 error = buf.error;
67789diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67790index 9c02d96..6562c10 100644
67791--- a/fs/reiserfs/do_balan.c
67792+++ b/fs/reiserfs/do_balan.c
67793@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67794 return;
67795 }
67796
67797- atomic_inc(&fs_generation(tb->tb_sb));
67798+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67799 do_balance_starts(tb);
67800
67801 /*
67802diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67803index aca73dd..e3c558d 100644
67804--- a/fs/reiserfs/item_ops.c
67805+++ b/fs/reiserfs/item_ops.c
67806@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67807 }
67808
67809 static struct item_operations errcatch_ops = {
67810- errcatch_bytes_number,
67811- errcatch_decrement_key,
67812- errcatch_is_left_mergeable,
67813- errcatch_print_item,
67814- errcatch_check_item,
67815+ .bytes_number = errcatch_bytes_number,
67816+ .decrement_key = errcatch_decrement_key,
67817+ .is_left_mergeable = errcatch_is_left_mergeable,
67818+ .print_item = errcatch_print_item,
67819+ .check_item = errcatch_check_item,
67820
67821- errcatch_create_vi,
67822- errcatch_check_left,
67823- errcatch_check_right,
67824- errcatch_part_size,
67825- errcatch_unit_num,
67826- errcatch_print_vi
67827+ .create_vi = errcatch_create_vi,
67828+ .check_left = errcatch_check_left,
67829+ .check_right = errcatch_check_right,
67830+ .part_size = errcatch_part_size,
67831+ .unit_num = errcatch_unit_num,
67832+ .print_vi = errcatch_print_vi
67833 };
67834
67835 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67836diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67837index 621b9f3..af527fd 100644
67838--- a/fs/reiserfs/procfs.c
67839+++ b/fs/reiserfs/procfs.c
67840@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67841 "SMALL_TAILS " : "NO_TAILS ",
67842 replay_only(sb) ? "REPLAY_ONLY " : "",
67843 convert_reiserfs(sb) ? "CONV " : "",
67844- atomic_read(&r->s_generation_counter),
67845+ atomic_read_unchecked(&r->s_generation_counter),
67846 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67847 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67848 SF(s_good_search_by_key_reada), SF(s_bmaps),
67849diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67850index bb79cdd..fcf49ef 100644
67851--- a/fs/reiserfs/reiserfs.h
67852+++ b/fs/reiserfs/reiserfs.h
67853@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67854 /* Comment? -Hans */
67855 wait_queue_head_t s_wait;
67856 /* increased by one every time the tree gets re-balanced */
67857- atomic_t s_generation_counter;
67858+ atomic_unchecked_t s_generation_counter;
67859
67860 /* File system properties. Currently holds on-disk FS format */
67861 unsigned long s_properties;
67862@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67863 #define REISERFS_USER_MEM 1 /* user memory mode */
67864
67865 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67866-#define get_generation(s) atomic_read (&fs_generation(s))
67867+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67868 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67869 #define __fs_changed(gen,s) (gen != get_generation (s))
67870 #define fs_changed(gen,s) \
67871diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67872index 71fbbe3..eff29ba 100644
67873--- a/fs/reiserfs/super.c
67874+++ b/fs/reiserfs/super.c
67875@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67876 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67877 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67878 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67879+#ifdef CONFIG_REISERFS_FS_XATTR
67880+ /* turn on user xattrs by default */
67881+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67882+#endif
67883 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67884 sbi->s_alloc_options.preallocmin = 0;
67885 /* Preallocate by 16 blocks (17-1) at once */
67886diff --git a/fs/select.c b/fs/select.c
67887index f684c75..4117611 100644
67888--- a/fs/select.c
67889+++ b/fs/select.c
67890@@ -20,6 +20,7 @@
67891 #include <linux/export.h>
67892 #include <linux/slab.h>
67893 #include <linux/poll.h>
67894+#include <linux/security.h>
67895 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67896 #include <linux/file.h>
67897 #include <linux/fdtable.h>
67898@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67899 struct poll_list *walk = head;
67900 unsigned long todo = nfds;
67901
67902+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67903 if (nfds > rlimit(RLIMIT_NOFILE))
67904 return -EINVAL;
67905
67906diff --git a/fs/seq_file.c b/fs/seq_file.c
67907index 555f821..34684d7 100644
67908--- a/fs/seq_file.c
67909+++ b/fs/seq_file.c
67910@@ -12,6 +12,8 @@
67911 #include <linux/slab.h>
67912 #include <linux/cred.h>
67913 #include <linux/mm.h>
67914+#include <linux/sched.h>
67915+#include <linux/grsecurity.h>
67916
67917 #include <asm/uaccess.h>
67918 #include <asm/page.h>
67919@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67920
67921 static void *seq_buf_alloc(unsigned long size)
67922 {
67923- void *buf;
67924-
67925- /*
67926- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67927- * it's better to fall back to vmalloc() than to kill things.
67928- */
67929- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67930- if (!buf && size > PAGE_SIZE)
67931- buf = vmalloc(size);
67932- return buf;
67933+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67934 }
67935
67936 /**
67937@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67938 #ifdef CONFIG_USER_NS
67939 p->user_ns = file->f_cred->user_ns;
67940 #endif
67941+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67942+ p->exec_id = current->exec_id;
67943+#endif
67944
67945 /*
67946 * Wrappers around seq_open(e.g. swaps_open) need to be
67947@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67948 }
67949 EXPORT_SYMBOL(seq_open);
67950
67951+
67952+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67953+{
67954+ if (gr_proc_is_restricted())
67955+ return -EACCES;
67956+
67957+ return seq_open(file, op);
67958+}
67959+EXPORT_SYMBOL(seq_open_restrict);
67960+
67961 static int traverse(struct seq_file *m, loff_t offset)
67962 {
67963 loff_t pos = 0, index;
67964@@ -158,7 +164,7 @@ Eoverflow:
67965 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67966 {
67967 struct seq_file *m = file->private_data;
67968- size_t copied = 0;
67969+ ssize_t copied = 0;
67970 loff_t pos;
67971 size_t n;
67972 void *p;
67973@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
67974 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67975 void *data)
67976 {
67977- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67978+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67979 int res = -ENOMEM;
67980
67981 if (op) {
67982@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67983 }
67984 EXPORT_SYMBOL(single_open_size);
67985
67986+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67987+ void *data)
67988+{
67989+ if (gr_proc_is_restricted())
67990+ return -EACCES;
67991+
67992+ return single_open(file, show, data);
67993+}
67994+EXPORT_SYMBOL(single_open_restrict);
67995+
67996+
67997 int single_release(struct inode *inode, struct file *file)
67998 {
67999 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68000diff --git a/fs/splice.c b/fs/splice.c
68001index 7968da9..275187d 100644
68002--- a/fs/splice.c
68003+++ b/fs/splice.c
68004@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68005 pipe_lock(pipe);
68006
68007 for (;;) {
68008- if (!pipe->readers) {
68009+ if (!atomic_read(&pipe->readers)) {
68010 send_sig(SIGPIPE, current, 0);
68011 if (!ret)
68012 ret = -EPIPE;
68013@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68014 page_nr++;
68015 ret += buf->len;
68016
68017- if (pipe->files)
68018+ if (atomic_read(&pipe->files))
68019 do_wakeup = 1;
68020
68021 if (!--spd->nr_pages)
68022@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68023 do_wakeup = 0;
68024 }
68025
68026- pipe->waiting_writers++;
68027+ atomic_inc(&pipe->waiting_writers);
68028 pipe_wait(pipe);
68029- pipe->waiting_writers--;
68030+ atomic_dec(&pipe->waiting_writers);
68031 }
68032
68033 pipe_unlock(pipe);
68034@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68035 old_fs = get_fs();
68036 set_fs(get_ds());
68037 /* The cast to a user pointer is valid due to the set_fs() */
68038- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68039+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68040 set_fs(old_fs);
68041
68042 return res;
68043@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68044 old_fs = get_fs();
68045 set_fs(get_ds());
68046 /* The cast to a user pointer is valid due to the set_fs() */
68047- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68048+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68049 set_fs(old_fs);
68050
68051 return res;
68052@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68053 goto err;
68054
68055 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68056- vec[i].iov_base = (void __user *) page_address(page);
68057+ vec[i].iov_base = (void __force_user *) page_address(page);
68058 vec[i].iov_len = this_len;
68059 spd.pages[i] = page;
68060 spd.nr_pages++;
68061@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68062 ops->release(pipe, buf);
68063 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68064 pipe->nrbufs--;
68065- if (pipe->files)
68066+ if (atomic_read(&pipe->files))
68067 sd->need_wakeup = true;
68068 }
68069
68070@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68071 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68072 {
68073 while (!pipe->nrbufs) {
68074- if (!pipe->writers)
68075+ if (!atomic_read(&pipe->writers))
68076 return 0;
68077
68078- if (!pipe->waiting_writers && sd->num_spliced)
68079+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68080 return 0;
68081
68082 if (sd->flags & SPLICE_F_NONBLOCK)
68083@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68084 ops->release(pipe, buf);
68085 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68086 pipe->nrbufs--;
68087- if (pipe->files)
68088+ if (atomic_read(&pipe->files))
68089 sd.need_wakeup = true;
68090 } else {
68091 buf->offset += ret;
68092@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68093 * out of the pipe right after the splice_to_pipe(). So set
68094 * PIPE_READERS appropriately.
68095 */
68096- pipe->readers = 1;
68097+ atomic_set(&pipe->readers, 1);
68098
68099 current->splice_pipe = pipe;
68100 }
68101@@ -1482,6 +1482,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68102
68103 partial[buffers].offset = off;
68104 partial[buffers].len = plen;
68105+ partial[buffers].private = 0;
68106
68107 off = 0;
68108 len -= plen;
68109@@ -1718,9 +1719,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68110 ret = -ERESTARTSYS;
68111 break;
68112 }
68113- if (!pipe->writers)
68114+ if (!atomic_read(&pipe->writers))
68115 break;
68116- if (!pipe->waiting_writers) {
68117+ if (!atomic_read(&pipe->waiting_writers)) {
68118 if (flags & SPLICE_F_NONBLOCK) {
68119 ret = -EAGAIN;
68120 break;
68121@@ -1752,7 +1753,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68122 pipe_lock(pipe);
68123
68124 while (pipe->nrbufs >= pipe->buffers) {
68125- if (!pipe->readers) {
68126+ if (!atomic_read(&pipe->readers)) {
68127 send_sig(SIGPIPE, current, 0);
68128 ret = -EPIPE;
68129 break;
68130@@ -1765,9 +1766,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68131 ret = -ERESTARTSYS;
68132 break;
68133 }
68134- pipe->waiting_writers++;
68135+ atomic_inc(&pipe->waiting_writers);
68136 pipe_wait(pipe);
68137- pipe->waiting_writers--;
68138+ atomic_dec(&pipe->waiting_writers);
68139 }
68140
68141 pipe_unlock(pipe);
68142@@ -1803,14 +1804,14 @@ retry:
68143 pipe_double_lock(ipipe, opipe);
68144
68145 do {
68146- if (!opipe->readers) {
68147+ if (!atomic_read(&opipe->readers)) {
68148 send_sig(SIGPIPE, current, 0);
68149 if (!ret)
68150 ret = -EPIPE;
68151 break;
68152 }
68153
68154- if (!ipipe->nrbufs && !ipipe->writers)
68155+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68156 break;
68157
68158 /*
68159@@ -1907,7 +1908,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68160 pipe_double_lock(ipipe, opipe);
68161
68162 do {
68163- if (!opipe->readers) {
68164+ if (!atomic_read(&opipe->readers)) {
68165 send_sig(SIGPIPE, current, 0);
68166 if (!ret)
68167 ret = -EPIPE;
68168@@ -1952,7 +1953,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68169 * return EAGAIN if we have the potential of some data in the
68170 * future, otherwise just return 0
68171 */
68172- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68173+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68174 ret = -EAGAIN;
68175
68176 pipe_unlock(ipipe);
68177diff --git a/fs/stat.c b/fs/stat.c
68178index ae0c3ce..9ee641c 100644
68179--- a/fs/stat.c
68180+++ b/fs/stat.c
68181@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68182 stat->gid = inode->i_gid;
68183 stat->rdev = inode->i_rdev;
68184 stat->size = i_size_read(inode);
68185- stat->atime = inode->i_atime;
68186- stat->mtime = inode->i_mtime;
68187+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68188+ stat->atime = inode->i_ctime;
68189+ stat->mtime = inode->i_ctime;
68190+ } else {
68191+ stat->atime = inode->i_atime;
68192+ stat->mtime = inode->i_mtime;
68193+ }
68194 stat->ctime = inode->i_ctime;
68195 stat->blksize = (1 << inode->i_blkbits);
68196 stat->blocks = inode->i_blocks;
68197@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
68198 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
68199 {
68200 struct inode *inode = path->dentry->d_inode;
68201+ int retval;
68202
68203- if (inode->i_op->getattr)
68204- return inode->i_op->getattr(path->mnt, path->dentry, stat);
68205+ if (inode->i_op->getattr) {
68206+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
68207+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68208+ stat->atime = stat->ctime;
68209+ stat->mtime = stat->ctime;
68210+ }
68211+ return retval;
68212+ }
68213
68214 generic_fillattr(inode, stat);
68215 return 0;
68216diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
68217index 0b45ff4..edf9d3a 100644
68218--- a/fs/sysfs/dir.c
68219+++ b/fs/sysfs/dir.c
68220@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68221 kfree(buf);
68222 }
68223
68224+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68225+extern int grsec_enable_sysfs_restrict;
68226+#endif
68227+
68228 /**
68229 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
68230 * @kobj: object we're creating directory for
68231@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68232 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68233 {
68234 struct kernfs_node *parent, *kn;
68235+ const char *name;
68236+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
68237+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68238+ const char *parent_name;
68239+#endif
68240
68241 BUG_ON(!kobj);
68242
68243+ name = kobject_name(kobj);
68244+
68245 if (kobj->parent)
68246 parent = kobj->parent->sd;
68247 else
68248@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68249 if (!parent)
68250 return -ENOENT;
68251
68252- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
68253- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
68254+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68255+ parent_name = parent->name;
68256+ mode = S_IRWXU;
68257+
68258+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
68259+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
68260+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
68261+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
68262+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68263+ if (!grsec_enable_sysfs_restrict)
68264+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68265+#endif
68266+
68267+ kn = kernfs_create_dir_ns(parent, name,
68268+ mode, kobj, ns);
68269 if (IS_ERR(kn)) {
68270 if (PTR_ERR(kn) == -EEXIST)
68271- sysfs_warn_dup(parent, kobject_name(kobj));
68272+ sysfs_warn_dup(parent, name);
68273 return PTR_ERR(kn);
68274 }
68275
68276diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
68277index 69d4889..a810bd4 100644
68278--- a/fs/sysv/sysv.h
68279+++ b/fs/sysv/sysv.h
68280@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68281 #endif
68282 }
68283
68284-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68285+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68286 {
68287 if (sbi->s_bytesex == BYTESEX_PDP)
68288 return PDP_swab((__force __u32)n);
68289diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68290index fb08b0c..65fcc7e 100644
68291--- a/fs/ubifs/io.c
68292+++ b/fs/ubifs/io.c
68293@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68294 return err;
68295 }
68296
68297-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68298+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68299 {
68300 int err;
68301
68302diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68303index c175b4d..8f36a16 100644
68304--- a/fs/udf/misc.c
68305+++ b/fs/udf/misc.c
68306@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68307
68308 u8 udf_tag_checksum(const struct tag *t)
68309 {
68310- u8 *data = (u8 *)t;
68311+ const u8 *data = (const u8 *)t;
68312 u8 checksum = 0;
68313 int i;
68314 for (i = 0; i < sizeof(struct tag); ++i)
68315diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68316index 8d974c4..b82f6ec 100644
68317--- a/fs/ufs/swab.h
68318+++ b/fs/ufs/swab.h
68319@@ -22,7 +22,7 @@ enum {
68320 BYTESEX_BE
68321 };
68322
68323-static inline u64
68324+static inline u64 __intentional_overflow(-1)
68325 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68326 {
68327 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68328@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68329 return (__force __fs64)cpu_to_be64(n);
68330 }
68331
68332-static inline u32
68333+static inline u32 __intentional_overflow(-1)
68334 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68335 {
68336 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68337diff --git a/fs/utimes.c b/fs/utimes.c
68338index aa138d6..5f3a811 100644
68339--- a/fs/utimes.c
68340+++ b/fs/utimes.c
68341@@ -1,6 +1,7 @@
68342 #include <linux/compiler.h>
68343 #include <linux/file.h>
68344 #include <linux/fs.h>
68345+#include <linux/security.h>
68346 #include <linux/linkage.h>
68347 #include <linux/mount.h>
68348 #include <linux/namei.h>
68349@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68350 }
68351 }
68352 retry_deleg:
68353+
68354+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68355+ error = -EACCES;
68356+ goto mnt_drop_write_and_out;
68357+ }
68358+
68359 mutex_lock(&inode->i_mutex);
68360 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68361 mutex_unlock(&inode->i_mutex);
68362diff --git a/fs/xattr.c b/fs/xattr.c
68363index 4ef6985..a6cd6567 100644
68364--- a/fs/xattr.c
68365+++ b/fs/xattr.c
68366@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68367 return rc;
68368 }
68369
68370+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68371+ssize_t
68372+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68373+{
68374+ struct inode *inode = dentry->d_inode;
68375+ ssize_t error;
68376+
68377+ error = inode_permission(inode, MAY_EXEC);
68378+ if (error)
68379+ return error;
68380+
68381+ if (inode->i_op->getxattr)
68382+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68383+ else
68384+ error = -EOPNOTSUPP;
68385+
68386+ return error;
68387+}
68388+EXPORT_SYMBOL(pax_getxattr);
68389+#endif
68390+
68391 ssize_t
68392 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68393 {
68394@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68395 * Extended attribute SET operations
68396 */
68397 static long
68398-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68399+setxattr(struct path *path, const char __user *name, const void __user *value,
68400 size_t size, int flags)
68401 {
68402 int error;
68403@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68404 posix_acl_fix_xattr_from_user(kvalue, size);
68405 }
68406
68407- error = vfs_setxattr(d, kname, kvalue, size, flags);
68408+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68409+ error = -EACCES;
68410+ goto out;
68411+ }
68412+
68413+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68414 out:
68415 if (vvalue)
68416 vfree(vvalue);
68417@@ -376,7 +402,7 @@ retry:
68418 return error;
68419 error = mnt_want_write(path.mnt);
68420 if (!error) {
68421- error = setxattr(path.dentry, name, value, size, flags);
68422+ error = setxattr(&path, name, value, size, flags);
68423 mnt_drop_write(path.mnt);
68424 }
68425 path_put(&path);
68426@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68427 audit_file(f.file);
68428 error = mnt_want_write_file(f.file);
68429 if (!error) {
68430- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
68431+ error = setxattr(&f.file->f_path, name, value, size, flags);
68432 mnt_drop_write_file(f.file);
68433 }
68434 fdput(f);
68435@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68436 * Extended attribute REMOVE operations
68437 */
68438 static long
68439-removexattr(struct dentry *d, const char __user *name)
68440+removexattr(struct path *path, const char __user *name)
68441 {
68442 int error;
68443 char kname[XATTR_NAME_MAX + 1];
68444@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
68445 if (error < 0)
68446 return error;
68447
68448- return vfs_removexattr(d, kname);
68449+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68450+ return -EACCES;
68451+
68452+ return vfs_removexattr(path->dentry, kname);
68453 }
68454
68455 static int path_removexattr(const char __user *pathname,
68456@@ -623,7 +652,7 @@ retry:
68457 return error;
68458 error = mnt_want_write(path.mnt);
68459 if (!error) {
68460- error = removexattr(path.dentry, name);
68461+ error = removexattr(&path, name);
68462 mnt_drop_write(path.mnt);
68463 }
68464 path_put(&path);
68465@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
68466 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68467 {
68468 struct fd f = fdget(fd);
68469+ struct path *path;
68470 int error = -EBADF;
68471
68472 if (!f.file)
68473 return error;
68474+ path = &f.file->f_path;
68475 audit_file(f.file);
68476 error = mnt_want_write_file(f.file);
68477 if (!error) {
68478- error = removexattr(f.file->f_path.dentry, name);
68479+ error = removexattr(path, name);
68480 mnt_drop_write_file(f.file);
68481 }
68482 fdput(f);
68483diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68484index 61ec015..7c18807 100644
68485--- a/fs/xfs/libxfs/xfs_bmap.c
68486+++ b/fs/xfs/libxfs/xfs_bmap.c
68487@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
68488
68489 #else
68490 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68491-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68492+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68493 #endif /* DEBUG */
68494
68495 /*
68496diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68497index 098cd78..724d3f8 100644
68498--- a/fs/xfs/xfs_dir2_readdir.c
68499+++ b/fs/xfs/xfs_dir2_readdir.c
68500@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
68501 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68502 filetype = dp->d_ops->sf_get_ftype(sfep);
68503 ctx->pos = off & 0x7fffffff;
68504- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68505+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68506+ char name[sfep->namelen];
68507+ memcpy(name, sfep->name, sfep->namelen);
68508+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68509+ return 0;
68510+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68511 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68512 return 0;
68513 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68514diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68515index ac4feae..386d551 100644
68516--- a/fs/xfs/xfs_ioctl.c
68517+++ b/fs/xfs/xfs_ioctl.c
68518@@ -120,7 +120,7 @@ xfs_find_handle(
68519 }
68520
68521 error = -EFAULT;
68522- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68523+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68524 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68525 goto out_put;
68526
68527diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68528index c31d2c2..6ec8f62 100644
68529--- a/fs/xfs/xfs_linux.h
68530+++ b/fs/xfs/xfs_linux.h
68531@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68532 * of the compiler which do not like us using do_div in the middle
68533 * of large functions.
68534 */
68535-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68536+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68537 {
68538 __u32 mod;
68539
68540@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68541 return 0;
68542 }
68543 #else
68544-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68545+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68546 {
68547 __u32 mod;
68548
68549diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68550new file mode 100644
68551index 0000000..31f8fe4
68552--- /dev/null
68553+++ b/grsecurity/Kconfig
68554@@ -0,0 +1,1182 @@
68555+#
68556+# grecurity configuration
68557+#
68558+menu "Memory Protections"
68559+depends on GRKERNSEC
68560+
68561+config GRKERNSEC_KMEM
68562+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68563+ default y if GRKERNSEC_CONFIG_AUTO
68564+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68565+ help
68566+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68567+ be written to or read from to modify or leak the contents of the running
68568+ kernel. /dev/port will also not be allowed to be opened, writing to
68569+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68570+ If you have module support disabled, enabling this will close up several
68571+ ways that are currently used to insert malicious code into the running
68572+ kernel.
68573+
68574+ Even with this feature enabled, we still highly recommend that
68575+ you use the RBAC system, as it is still possible for an attacker to
68576+ modify the running kernel through other more obscure methods.
68577+
68578+ It is highly recommended that you say Y here if you meet all the
68579+ conditions above.
68580+
68581+config GRKERNSEC_VM86
68582+ bool "Restrict VM86 mode"
68583+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68584+ depends on X86_32
68585+
68586+ help
68587+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68588+ make use of a special execution mode on 32bit x86 processors called
68589+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68590+ video cards and will still work with this option enabled. The purpose
68591+ of the option is to prevent exploitation of emulation errors in
68592+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68593+ Nearly all users should be able to enable this option.
68594+
68595+config GRKERNSEC_IO
68596+ bool "Disable privileged I/O"
68597+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68598+ depends on X86
68599+ select RTC_CLASS
68600+ select RTC_INTF_DEV
68601+ select RTC_DRV_CMOS
68602+
68603+ help
68604+ If you say Y here, all ioperm and iopl calls will return an error.
68605+ Ioperm and iopl can be used to modify the running kernel.
68606+ Unfortunately, some programs need this access to operate properly,
68607+ the most notable of which are XFree86 and hwclock. hwclock can be
68608+ remedied by having RTC support in the kernel, so real-time
68609+ clock support is enabled if this option is enabled, to ensure
68610+ that hwclock operates correctly. If hwclock still does not work,
68611+ either update udev or symlink /dev/rtc to /dev/rtc0.
68612+
68613+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68614+ you may not be able to boot into a graphical environment with this
68615+ option enabled. In this case, you should use the RBAC system instead.
68616+
68617+config GRKERNSEC_BPF_HARDEN
68618+ bool "Harden BPF interpreter"
68619+ default y if GRKERNSEC_CONFIG_AUTO
68620+ help
68621+ Unlike previous versions of grsecurity that hardened both the BPF
68622+ interpreted code against corruption at rest as well as the JIT code
68623+ against JIT-spray attacks and attacker-controlled immediate values
68624+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68625+ and will ensure the interpreted code is read-only at rest. This feature
68626+ may be removed at a later time when eBPF stabilizes to entirely revert
68627+ back to the more secure pre-3.16 BPF interpreter/JIT.
68628+
68629+ If you're using KERNEXEC, it's recommended that you enable this option
68630+ to supplement the hardening of the kernel.
68631+
68632+config GRKERNSEC_PERF_HARDEN
68633+ bool "Disable unprivileged PERF_EVENTS usage by default"
68634+ default y if GRKERNSEC_CONFIG_AUTO
68635+ depends on PERF_EVENTS
68636+ help
68637+ If you say Y here, the range of acceptable values for the
68638+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68639+ default to a new value: 3. When the sysctl is set to this value, no
68640+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68641+
68642+ Though PERF_EVENTS can be used legitimately for performance monitoring
68643+ and low-level application profiling, it is forced on regardless of
68644+ configuration, has been at fault for several vulnerabilities, and
68645+ creates new opportunities for side channels and other information leaks.
68646+
68647+ This feature puts PERF_EVENTS into a secure default state and permits
68648+ the administrator to change out of it temporarily if unprivileged
68649+ application profiling is needed.
68650+
68651+config GRKERNSEC_RAND_THREADSTACK
68652+ bool "Insert random gaps between thread stacks"
68653+ default y if GRKERNSEC_CONFIG_AUTO
68654+ depends on PAX_RANDMMAP && !PPC
68655+ help
68656+ If you say Y here, a random-sized gap will be enforced between allocated
68657+ thread stacks. Glibc's NPTL and other threading libraries that
68658+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68659+ The implementation currently provides 8 bits of entropy for the gap.
68660+
68661+ Many distributions do not compile threaded remote services with the
68662+ -fstack-check argument to GCC, causing the variable-sized stack-based
68663+ allocator, alloca(), to not probe the stack on allocation. This
68664+ permits an unbounded alloca() to skip over any guard page and potentially
68665+ modify another thread's stack reliably. An enforced random gap
68666+ reduces the reliability of such an attack and increases the chance
68667+ that such a read/write to another thread's stack instead lands in
68668+ an unmapped area, causing a crash and triggering grsecurity's
68669+ anti-bruteforcing logic.
68670+
68671+config GRKERNSEC_PROC_MEMMAP
68672+ bool "Harden ASLR against information leaks and entropy reduction"
68673+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68674+ depends on PAX_NOEXEC || PAX_ASLR
68675+ help
68676+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68677+ give no information about the addresses of its mappings if
68678+ PaX features that rely on random addresses are enabled on the task.
68679+ In addition to sanitizing this information and disabling other
68680+ dangerous sources of information, this option causes reads of sensitive
68681+ /proc/<pid> entries where the file descriptor was opened in a different
68682+ task than the one performing the read. Such attempts are logged.
68683+ This option also limits argv/env strings for suid/sgid binaries
68684+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68685+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68686+ binaries to prevent alternative mmap layouts from being abused.
68687+
68688+ If you use PaX it is essential that you say Y here as it closes up
68689+ several holes that make full ASLR useless locally.
68690+
68691+
68692+config GRKERNSEC_KSTACKOVERFLOW
68693+ bool "Prevent kernel stack overflows"
68694+ default y if GRKERNSEC_CONFIG_AUTO
68695+ depends on !IA64 && 64BIT
68696+ help
68697+ If you say Y here, the kernel's process stacks will be allocated
68698+ with vmalloc instead of the kernel's default allocator. This
68699+ introduces guard pages that in combination with the alloca checking
68700+ of the STACKLEAK feature prevents all forms of kernel process stack
68701+ overflow abuse. Note that this is different from kernel stack
68702+ buffer overflows.
68703+
68704+config GRKERNSEC_BRUTE
68705+ bool "Deter exploit bruteforcing"
68706+ default y if GRKERNSEC_CONFIG_AUTO
68707+ help
68708+ If you say Y here, attempts to bruteforce exploits against forking
68709+ daemons such as apache or sshd, as well as against suid/sgid binaries
68710+ will be deterred. When a child of a forking daemon is killed by PaX
68711+ or crashes due to an illegal instruction or other suspicious signal,
68712+ the parent process will be delayed 30 seconds upon every subsequent
68713+ fork until the administrator is able to assess the situation and
68714+ restart the daemon.
68715+ In the suid/sgid case, the attempt is logged, the user has all their
68716+ existing instances of the suid/sgid binary terminated and will
68717+ be unable to execute any suid/sgid binaries for 15 minutes.
68718+
68719+ It is recommended that you also enable signal logging in the auditing
68720+ section so that logs are generated when a process triggers a suspicious
68721+ signal.
68722+ If the sysctl option is enabled, a sysctl option with name
68723+ "deter_bruteforce" is created.
68724+
68725+config GRKERNSEC_MODHARDEN
68726+ bool "Harden module auto-loading"
68727+ default y if GRKERNSEC_CONFIG_AUTO
68728+ depends on MODULES
68729+ help
68730+ If you say Y here, module auto-loading in response to use of some
68731+ feature implemented by an unloaded module will be restricted to
68732+ root users. Enabling this option helps defend against attacks
68733+ by unprivileged users who abuse the auto-loading behavior to
68734+ cause a vulnerable module to load that is then exploited.
68735+
68736+ If this option prevents a legitimate use of auto-loading for a
68737+ non-root user, the administrator can execute modprobe manually
68738+ with the exact name of the module mentioned in the alert log.
68739+ Alternatively, the administrator can add the module to the list
68740+ of modules loaded at boot by modifying init scripts.
68741+
68742+ Modification of init scripts will most likely be needed on
68743+ Ubuntu servers with encrypted home directory support enabled,
68744+ as the first non-root user logging in will cause the ecb(aes),
68745+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68746+
68747+config GRKERNSEC_HIDESYM
68748+ bool "Hide kernel symbols"
68749+ default y if GRKERNSEC_CONFIG_AUTO
68750+ select PAX_USERCOPY_SLABS
68751+ help
68752+ If you say Y here, getting information on loaded modules, and
68753+ displaying all kernel symbols through a syscall will be restricted
68754+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68755+ /proc/kallsyms will be restricted to the root user. The RBAC
68756+ system can hide that entry even from root.
68757+
68758+ This option also prevents leaking of kernel addresses through
68759+ several /proc entries.
68760+
68761+ Note that this option is only effective provided the following
68762+ conditions are met:
68763+ 1) The kernel using grsecurity is not precompiled by some distribution
68764+ 2) You have also enabled GRKERNSEC_DMESG
68765+ 3) You are using the RBAC system and hiding other files such as your
68766+ kernel image and System.map. Alternatively, enabling this option
68767+ causes the permissions on /boot, /lib/modules, and the kernel
68768+ source directory to change at compile time to prevent
68769+ reading by non-root users.
68770+ If the above conditions are met, this option will aid in providing a
68771+ useful protection against local kernel exploitation of overflows
68772+ and arbitrary read/write vulnerabilities.
68773+
68774+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68775+ in addition to this feature.
68776+
68777+config GRKERNSEC_RANDSTRUCT
68778+ bool "Randomize layout of sensitive kernel structures"
68779+ default y if GRKERNSEC_CONFIG_AUTO
68780+ select GRKERNSEC_HIDESYM
68781+ select MODVERSIONS if MODULES
68782+ help
68783+ If you say Y here, the layouts of a number of sensitive kernel
68784+ structures (task, fs, cred, etc) and all structures composed entirely
68785+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68786+ This can introduce the requirement of an additional infoleak
68787+ vulnerability for exploits targeting these structure types.
68788+
68789+ Enabling this feature will introduce some performance impact, slightly
68790+ increase memory usage, and prevent the use of forensic tools like
68791+ Volatility against the system (unless the kernel source tree isn't
68792+ cleaned after kernel installation).
68793+
68794+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68795+ It remains after a make clean to allow for external modules to be compiled
68796+ with the existing seed and will be removed by a make mrproper or
68797+ make distclean.
68798+
68799+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68800+ to install the supporting headers explicitly in addition to the normal
68801+ gcc package.
68802+
68803+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68804+ bool "Use cacheline-aware structure randomization"
68805+ depends on GRKERNSEC_RANDSTRUCT
68806+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68807+ help
68808+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68809+ at restricting randomization to cacheline-sized groups of elements. It
68810+ will further not randomize bitfields in structures. This reduces the
68811+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68812+
68813+config GRKERNSEC_KERN_LOCKOUT
68814+ bool "Active kernel exploit response"
68815+ default y if GRKERNSEC_CONFIG_AUTO
68816+ depends on X86 || ARM || PPC || SPARC
68817+ help
68818+ If you say Y here, when a PaX alert is triggered due to suspicious
68819+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68820+ or an OOPS occurs due to bad memory accesses, instead of just
68821+ terminating the offending process (and potentially allowing
68822+ a subsequent exploit from the same user), we will take one of two
68823+ actions:
68824+ If the user was root, we will panic the system
68825+ If the user was non-root, we will log the attempt, terminate
68826+ all processes owned by the user, then prevent them from creating
68827+ any new processes until the system is restarted
68828+ This deters repeated kernel exploitation/bruteforcing attempts
68829+ and is useful for later forensics.
68830+
68831+config GRKERNSEC_OLD_ARM_USERLAND
68832+ bool "Old ARM userland compatibility"
68833+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68834+ help
68835+ If you say Y here, stubs of executable code to perform such operations
68836+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68837+ table. This is unfortunately needed for old ARM userland meant to run
68838+ across a wide range of processors. Without this option enabled,
68839+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68840+ which is enough for Linaro userlands or other userlands designed for v6
68841+ and newer ARM CPUs. It's recommended that you try without this option enabled
68842+ first, and only enable it if your userland does not boot (it will likely fail
68843+ at init time).
68844+
68845+endmenu
68846+menu "Role Based Access Control Options"
68847+depends on GRKERNSEC
68848+
68849+config GRKERNSEC_RBAC_DEBUG
68850+ bool
68851+
68852+config GRKERNSEC_NO_RBAC
68853+ bool "Disable RBAC system"
68854+ help
68855+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68856+ preventing the RBAC system from being enabled. You should only say Y
68857+ here if you have no intention of using the RBAC system, so as to prevent
68858+ an attacker with root access from misusing the RBAC system to hide files
68859+ and processes when loadable module support and /dev/[k]mem have been
68860+ locked down.
68861+
68862+config GRKERNSEC_ACL_HIDEKERN
68863+ bool "Hide kernel processes"
68864+ help
68865+ If you say Y here, all kernel threads will be hidden to all
68866+ processes but those whose subject has the "view hidden processes"
68867+ flag.
68868+
68869+config GRKERNSEC_ACL_MAXTRIES
68870+ int "Maximum tries before password lockout"
68871+ default 3
68872+ help
68873+ This option enforces the maximum number of times a user can attempt
68874+ to authorize themselves with the grsecurity RBAC system before being
68875+ denied the ability to attempt authorization again for a specified time.
68876+ The lower the number, the harder it will be to brute-force a password.
68877+
68878+config GRKERNSEC_ACL_TIMEOUT
68879+ int "Time to wait after max password tries, in seconds"
68880+ default 30
68881+ help
68882+ This option specifies the time the user must wait after attempting to
68883+ authorize to the RBAC system with the maximum number of invalid
68884+ passwords. The higher the number, the harder it will be to brute-force
68885+ a password.
68886+
68887+endmenu
68888+menu "Filesystem Protections"
68889+depends on GRKERNSEC
68890+
68891+config GRKERNSEC_PROC
68892+ bool "Proc restrictions"
68893+ default y if GRKERNSEC_CONFIG_AUTO
68894+ help
68895+ If you say Y here, the permissions of the /proc filesystem
68896+ will be altered to enhance system security and privacy. You MUST
68897+ choose either a user only restriction or a user and group restriction.
68898+ Depending upon the option you choose, you can either restrict users to
68899+ see only the processes they themselves run, or choose a group that can
68900+ view all processes and files normally restricted to root if you choose
68901+ the "restrict to user only" option. NOTE: If you're running identd or
68902+ ntpd as a non-root user, you will have to run it as the group you
68903+ specify here.
68904+
68905+config GRKERNSEC_PROC_USER
68906+ bool "Restrict /proc to user only"
68907+ depends on GRKERNSEC_PROC
68908+ help
68909+ If you say Y here, non-root users will only be able to view their own
68910+ processes, and restricts them from viewing network-related information,
68911+ and viewing kernel symbol and module information.
68912+
68913+config GRKERNSEC_PROC_USERGROUP
68914+ bool "Allow special group"
68915+ default y if GRKERNSEC_CONFIG_AUTO
68916+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68917+ help
68918+ If you say Y here, you will be able to select a group that will be
68919+ able to view all processes and network-related information. If you've
68920+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68921+ remain hidden. This option is useful if you want to run identd as
68922+ a non-root user. The group you select may also be chosen at boot time
68923+ via "grsec_proc_gid=" on the kernel commandline.
68924+
68925+config GRKERNSEC_PROC_GID
68926+ int "GID for special group"
68927+ depends on GRKERNSEC_PROC_USERGROUP
68928+ default 1001
68929+
68930+config GRKERNSEC_PROC_ADD
68931+ bool "Additional restrictions"
68932+ default y if GRKERNSEC_CONFIG_AUTO
68933+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68934+ help
68935+ If you say Y here, additional restrictions will be placed on
68936+ /proc that keep normal users from viewing device information and
68937+ slabinfo information that could be useful for exploits.
68938+
68939+config GRKERNSEC_LINK
68940+ bool "Linking restrictions"
68941+ default y if GRKERNSEC_CONFIG_AUTO
68942+ help
68943+ If you say Y here, /tmp race exploits will be prevented, since users
68944+ will no longer be able to follow symlinks owned by other users in
68945+ world-writable +t directories (e.g. /tmp), unless the owner of the
68946+ symlink is the owner of the directory. users will also not be
68947+ able to hardlink to files they do not own. If the sysctl option is
68948+ enabled, a sysctl option with name "linking_restrictions" is created.
68949+
68950+config GRKERNSEC_SYMLINKOWN
68951+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68952+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68953+ help
68954+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68955+ that prevents it from being used as a security feature. As Apache
68956+ verifies the symlink by performing a stat() against the target of
68957+ the symlink before it is followed, an attacker can setup a symlink
68958+ to point to a same-owned file, then replace the symlink with one
68959+ that targets another user's file just after Apache "validates" the
68960+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68961+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68962+ will be in place for the group you specify. If the sysctl option
68963+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68964+ created.
68965+
68966+config GRKERNSEC_SYMLINKOWN_GID
68967+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68968+ depends on GRKERNSEC_SYMLINKOWN
68969+ default 1006
68970+ help
68971+ Setting this GID determines what group kernel-enforced
68972+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68973+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68974+
68975+config GRKERNSEC_FIFO
68976+ bool "FIFO restrictions"
68977+ default y if GRKERNSEC_CONFIG_AUTO
68978+ help
68979+ If you say Y here, users will not be able to write to FIFOs they don't
68980+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68981+ the FIFO is the same owner of the directory it's held in. If the sysctl
68982+ option is enabled, a sysctl option with name "fifo_restrictions" is
68983+ created.
68984+
68985+config GRKERNSEC_SYSFS_RESTRICT
68986+ bool "Sysfs/debugfs restriction"
68987+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68988+ depends on SYSFS
68989+ help
68990+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68991+ any filesystem normally mounted under it (e.g. debugfs) will be
68992+ mostly accessible only by root. These filesystems generally provide access
68993+ to hardware and debug information that isn't appropriate for unprivileged
68994+ users of the system. Sysfs and debugfs have also become a large source
68995+ of new vulnerabilities, ranging from infoleaks to local compromise.
68996+ There has been very little oversight with an eye toward security involved
68997+ in adding new exporters of information to these filesystems, so their
68998+ use is discouraged.
68999+ For reasons of compatibility, a few directories have been whitelisted
69000+ for access by non-root users:
69001+ /sys/fs/selinux
69002+ /sys/fs/fuse
69003+ /sys/devices/system/cpu
69004+
69005+config GRKERNSEC_ROFS
69006+ bool "Runtime read-only mount protection"
69007+ depends on SYSCTL
69008+ help
69009+ If you say Y here, a sysctl option with name "romount_protect" will
69010+ be created. By setting this option to 1 at runtime, filesystems
69011+ will be protected in the following ways:
69012+ * No new writable mounts will be allowed
69013+ * Existing read-only mounts won't be able to be remounted read/write
69014+ * Write operations will be denied on all block devices
69015+ This option acts independently of grsec_lock: once it is set to 1,
69016+ it cannot be turned off. Therefore, please be mindful of the resulting
69017+ behavior if this option is enabled in an init script on a read-only
69018+ filesystem.
69019+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69020+ and GRKERNSEC_IO should be enabled and module loading disabled via
69021+ config or at runtime.
69022+ This feature is mainly intended for secure embedded systems.
69023+
69024+
69025+config GRKERNSEC_DEVICE_SIDECHANNEL
69026+ bool "Eliminate stat/notify-based device sidechannels"
69027+ default y if GRKERNSEC_CONFIG_AUTO
69028+ help
69029+ If you say Y here, timing analyses on block or character
69030+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69031+ will be thwarted for unprivileged users. If a process without
69032+ CAP_MKNOD stats such a device, the last access and last modify times
69033+ will match the device's create time. No access or modify events
69034+ will be triggered through inotify/dnotify/fanotify for such devices.
69035+ This feature will prevent attacks that may at a minimum
69036+ allow an attacker to determine the administrator's password length.
69037+
69038+config GRKERNSEC_CHROOT
69039+ bool "Chroot jail restrictions"
69040+ default y if GRKERNSEC_CONFIG_AUTO
69041+ help
69042+ If you say Y here, you will be able to choose several options that will
69043+ make breaking out of a chrooted jail much more difficult. If you
69044+ encounter no software incompatibilities with the following options, it
69045+ is recommended that you enable each one.
69046+
69047+ Note that the chroot restrictions are not intended to apply to "chroots"
69048+ to directories that are simple bind mounts of the global root filesystem.
69049+ For several other reasons, a user shouldn't expect any significant
69050+ security by performing such a chroot.
69051+
69052+config GRKERNSEC_CHROOT_MOUNT
69053+ bool "Deny mounts"
69054+ default y if GRKERNSEC_CONFIG_AUTO
69055+ depends on GRKERNSEC_CHROOT
69056+ help
69057+ If you say Y here, processes inside a chroot will not be able to
69058+ mount or remount filesystems. If the sysctl option is enabled, a
69059+ sysctl option with name "chroot_deny_mount" is created.
69060+
69061+config GRKERNSEC_CHROOT_DOUBLE
69062+ bool "Deny double-chroots"
69063+ default y if GRKERNSEC_CONFIG_AUTO
69064+ depends on GRKERNSEC_CHROOT
69065+ help
69066+ If you say Y here, processes inside a chroot will not be able to chroot
69067+ again outside the chroot. This is a widely used method of breaking
69068+ out of a chroot jail and should not be allowed. If the sysctl
69069+ option is enabled, a sysctl option with name
69070+ "chroot_deny_chroot" is created.
69071+
69072+config GRKERNSEC_CHROOT_PIVOT
69073+ bool "Deny pivot_root in chroot"
69074+ default y if GRKERNSEC_CONFIG_AUTO
69075+ depends on GRKERNSEC_CHROOT
69076+ help
69077+ If you say Y here, processes inside a chroot will not be able to use
69078+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69079+ works similar to chroot in that it changes the root filesystem. This
69080+ function could be misused in a chrooted process to attempt to break out
69081+ of the chroot, and therefore should not be allowed. If the sysctl
69082+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69083+ created.
69084+
69085+config GRKERNSEC_CHROOT_CHDIR
69086+ bool "Enforce chdir(\"/\") on all chroots"
69087+ default y if GRKERNSEC_CONFIG_AUTO
69088+ depends on GRKERNSEC_CHROOT
69089+ help
69090+ If you say Y here, the current working directory of all newly-chrooted
69091+ applications will be set to the the root directory of the chroot.
69092+ The man page on chroot(2) states:
69093+ Note that this call does not change the current working
69094+ directory, so that `.' can be outside the tree rooted at
69095+ `/'. In particular, the super-user can escape from a
69096+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69097+
69098+ It is recommended that you say Y here, since it's not known to break
69099+ any software. If the sysctl option is enabled, a sysctl option with
69100+ name "chroot_enforce_chdir" is created.
69101+
69102+config GRKERNSEC_CHROOT_CHMOD
69103+ bool "Deny (f)chmod +s"
69104+ default y if GRKERNSEC_CONFIG_AUTO
69105+ depends on GRKERNSEC_CHROOT
69106+ help
69107+ If you say Y here, processes inside a chroot will not be able to chmod
69108+ or fchmod files to make them have suid or sgid bits. This protects
69109+ against another published method of breaking a chroot. If the sysctl
69110+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69111+ created.
69112+
69113+config GRKERNSEC_CHROOT_FCHDIR
69114+ bool "Deny fchdir and fhandle out of chroot"
69115+ default y if GRKERNSEC_CONFIG_AUTO
69116+ depends on GRKERNSEC_CHROOT
69117+ help
69118+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69119+ to a file descriptor of the chrooting process that points to a directory
69120+ outside the filesystem will be stopped. Additionally, this option prevents
69121+ use of the recently-created syscall for opening files by a guessable "file
69122+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69123+ with name "chroot_deny_fchdir" is created.
69124+
69125+config GRKERNSEC_CHROOT_MKNOD
69126+ bool "Deny mknod"
69127+ default y if GRKERNSEC_CONFIG_AUTO
69128+ depends on GRKERNSEC_CHROOT
69129+ help
69130+ If you say Y here, processes inside a chroot will not be allowed to
69131+ mknod. The problem with using mknod inside a chroot is that it
69132+ would allow an attacker to create a device entry that is the same
69133+ as one on the physical root of your system, which could range from
69134+ anything from the console device to a device for your harddrive (which
69135+ they could then use to wipe the drive or steal data). It is recommended
69136+ that you say Y here, unless you run into software incompatibilities.
69137+ If the sysctl option is enabled, a sysctl option with name
69138+ "chroot_deny_mknod" is created.
69139+
69140+config GRKERNSEC_CHROOT_SHMAT
69141+ bool "Deny shmat() out of chroot"
69142+ default y if GRKERNSEC_CONFIG_AUTO
69143+ depends on GRKERNSEC_CHROOT
69144+ help
69145+ If you say Y here, processes inside a chroot will not be able to attach
69146+ to shared memory segments that were created outside of the chroot jail.
69147+ It is recommended that you say Y here. If the sysctl option is enabled,
69148+ a sysctl option with name "chroot_deny_shmat" is created.
69149+
69150+config GRKERNSEC_CHROOT_UNIX
69151+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69152+ default y if GRKERNSEC_CONFIG_AUTO
69153+ depends on GRKERNSEC_CHROOT
69154+ help
69155+ If you say Y here, processes inside a chroot will not be able to
69156+ connect to abstract (meaning not belonging to a filesystem) Unix
69157+ domain sockets that were bound outside of a chroot. It is recommended
69158+ that you say Y here. If the sysctl option is enabled, a sysctl option
69159+ with name "chroot_deny_unix" is created.
69160+
69161+config GRKERNSEC_CHROOT_FINDTASK
69162+ bool "Protect outside processes"
69163+ default y if GRKERNSEC_CONFIG_AUTO
69164+ depends on GRKERNSEC_CHROOT
69165+ help
69166+ If you say Y here, processes inside a chroot will not be able to
69167+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69168+ getsid, or view any process outside of the chroot. If the sysctl
69169+ option is enabled, a sysctl option with name "chroot_findtask" is
69170+ created.
69171+
69172+config GRKERNSEC_CHROOT_NICE
69173+ bool "Restrict priority changes"
69174+ default y if GRKERNSEC_CONFIG_AUTO
69175+ depends on GRKERNSEC_CHROOT
69176+ help
69177+ If you say Y here, processes inside a chroot will not be able to raise
69178+ the priority of processes in the chroot, or alter the priority of
69179+ processes outside the chroot. This provides more security than simply
69180+ removing CAP_SYS_NICE from the process' capability set. If the
69181+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69182+ is created.
69183+
69184+config GRKERNSEC_CHROOT_SYSCTL
69185+ bool "Deny sysctl writes"
69186+ default y if GRKERNSEC_CONFIG_AUTO
69187+ depends on GRKERNSEC_CHROOT
69188+ help
69189+ If you say Y here, an attacker in a chroot will not be able to
69190+ write to sysctl entries, either by sysctl(2) or through a /proc
69191+ interface. It is strongly recommended that you say Y here. If the
69192+ sysctl option is enabled, a sysctl option with name
69193+ "chroot_deny_sysctl" is created.
69194+
69195+config GRKERNSEC_CHROOT_RENAME
69196+ bool "Deny bad renames"
69197+ default y if GRKERNSEC_CONFIG_AUTO
69198+ depends on GRKERNSEC_CHROOT
69199+ help
69200+ If you say Y here, an attacker in a chroot will not be able to
69201+ abuse the ability to create double chroots to break out of the
69202+ chroot by exploiting a race condition between a rename of a directory
69203+ within a chroot against an open of a symlink with relative path
69204+ components. This feature will likewise prevent an accomplice outside
69205+ a chroot from enabling a user inside the chroot to break out and make
69206+ use of their credentials on the global filesystem. Enabling this
69207+ feature is essential to prevent root users from breaking out of a
69208+ chroot. If the sysctl option is enabled, a sysctl option with name
69209+ "chroot_deny_bad_rename" is created.
69210+
69211+config GRKERNSEC_CHROOT_CAPS
69212+ bool "Capability restrictions"
69213+ default y if GRKERNSEC_CONFIG_AUTO
69214+ depends on GRKERNSEC_CHROOT
69215+ help
69216+ If you say Y here, the capabilities on all processes within a
69217+ chroot jail will be lowered to stop module insertion, raw i/o,
69218+ system and net admin tasks, rebooting the system, modifying immutable
69219+ files, modifying IPC owned by another, and changing the system time.
69220+ This is left an option because it can break some apps. Disable this
69221+ if your chrooted apps are having problems performing those kinds of
69222+ tasks. If the sysctl option is enabled, a sysctl option with
69223+ name "chroot_caps" is created.
69224+
69225+config GRKERNSEC_CHROOT_INITRD
69226+ bool "Exempt initrd tasks from restrictions"
69227+ default y if GRKERNSEC_CONFIG_AUTO
69228+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
69229+ help
69230+ If you say Y here, tasks started prior to init will be exempted from
69231+ grsecurity's chroot restrictions. This option is mainly meant to
69232+ resolve Plymouth's performing privileged operations unnecessarily
69233+ in a chroot.
69234+
69235+endmenu
69236+menu "Kernel Auditing"
69237+depends on GRKERNSEC
69238+
69239+config GRKERNSEC_AUDIT_GROUP
69240+ bool "Single group for auditing"
69241+ help
69242+ If you say Y here, the exec and chdir logging features will only operate
69243+ on a group you specify. This option is recommended if you only want to
69244+ watch certain users instead of having a large amount of logs from the
69245+ entire system. If the sysctl option is enabled, a sysctl option with
69246+ name "audit_group" is created.
69247+
69248+config GRKERNSEC_AUDIT_GID
69249+ int "GID for auditing"
69250+ depends on GRKERNSEC_AUDIT_GROUP
69251+ default 1007
69252+
69253+config GRKERNSEC_EXECLOG
69254+ bool "Exec logging"
69255+ help
69256+ If you say Y here, all execve() calls will be logged (since the
69257+ other exec*() calls are frontends to execve(), all execution
69258+ will be logged). Useful for shell-servers that like to keep track
69259+ of their users. If the sysctl option is enabled, a sysctl option with
69260+ name "exec_logging" is created.
69261+ WARNING: This option when enabled will produce a LOT of logs, especially
69262+ on an active system.
69263+
69264+config GRKERNSEC_RESLOG
69265+ bool "Resource logging"
69266+ default y if GRKERNSEC_CONFIG_AUTO
69267+ help
69268+ If you say Y here, all attempts to overstep resource limits will
69269+ be logged with the resource name, the requested size, and the current
69270+ limit. It is highly recommended that you say Y here. If the sysctl
69271+ option is enabled, a sysctl option with name "resource_logging" is
69272+ created. If the RBAC system is enabled, the sysctl value is ignored.
69273+
69274+config GRKERNSEC_CHROOT_EXECLOG
69275+ bool "Log execs within chroot"
69276+ help
69277+ If you say Y here, all executions inside a chroot jail will be logged
69278+ to syslog. This can cause a large amount of logs if certain
69279+ applications (eg. djb's daemontools) are installed on the system, and
69280+ is therefore left as an option. If the sysctl option is enabled, a
69281+ sysctl option with name "chroot_execlog" is created.
69282+
69283+config GRKERNSEC_AUDIT_PTRACE
69284+ bool "Ptrace logging"
69285+ help
69286+ If you say Y here, all attempts to attach to a process via ptrace
69287+ will be logged. If the sysctl option is enabled, a sysctl option
69288+ with name "audit_ptrace" is created.
69289+
69290+config GRKERNSEC_AUDIT_CHDIR
69291+ bool "Chdir logging"
69292+ help
69293+ If you say Y here, all chdir() calls will be logged. If the sysctl
69294+ option is enabled, a sysctl option with name "audit_chdir" is created.
69295+
69296+config GRKERNSEC_AUDIT_MOUNT
69297+ bool "(Un)Mount logging"
69298+ help
69299+ If you say Y here, all mounts and unmounts will be logged. If the
69300+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69301+ created.
69302+
69303+config GRKERNSEC_SIGNAL
69304+ bool "Signal logging"
69305+ default y if GRKERNSEC_CONFIG_AUTO
69306+ help
69307+ If you say Y here, certain important signals will be logged, such as
69308+ SIGSEGV, which will as a result inform you of when a error in a program
69309+ occurred, which in some cases could mean a possible exploit attempt.
69310+ If the sysctl option is enabled, a sysctl option with name
69311+ "signal_logging" is created.
69312+
69313+config GRKERNSEC_FORKFAIL
69314+ bool "Fork failure logging"
69315+ help
69316+ If you say Y here, all failed fork() attempts will be logged.
69317+ This could suggest a fork bomb, or someone attempting to overstep
69318+ their process limit. If the sysctl option is enabled, a sysctl option
69319+ with name "forkfail_logging" is created.
69320+
69321+config GRKERNSEC_TIME
69322+ bool "Time change logging"
69323+ default y if GRKERNSEC_CONFIG_AUTO
69324+ help
69325+ If you say Y here, any changes of the system clock will be logged.
69326+ If the sysctl option is enabled, a sysctl option with name
69327+ "timechange_logging" is created.
69328+
69329+config GRKERNSEC_PROC_IPADDR
69330+ bool "/proc/<pid>/ipaddr support"
69331+ default y if GRKERNSEC_CONFIG_AUTO
69332+ help
69333+ If you say Y here, a new entry will be added to each /proc/<pid>
69334+ directory that contains the IP address of the person using the task.
69335+ The IP is carried across local TCP and AF_UNIX stream sockets.
69336+ This information can be useful for IDS/IPSes to perform remote response
69337+ to a local attack. The entry is readable by only the owner of the
69338+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69339+ the RBAC system), and thus does not create privacy concerns.
69340+
69341+config GRKERNSEC_RWXMAP_LOG
69342+ bool 'Denied RWX mmap/mprotect logging'
69343+ default y if GRKERNSEC_CONFIG_AUTO
69344+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69345+ help
69346+ If you say Y here, calls to mmap() and mprotect() with explicit
69347+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69348+ denied by the PAX_MPROTECT feature. This feature will also
69349+ log other problematic scenarios that can occur when PAX_MPROTECT
69350+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69351+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69352+ is created.
69353+
69354+endmenu
69355+
69356+menu "Executable Protections"
69357+depends on GRKERNSEC
69358+
69359+config GRKERNSEC_DMESG
69360+ bool "Dmesg(8) restriction"
69361+ default y if GRKERNSEC_CONFIG_AUTO
69362+ help
69363+ If you say Y here, non-root users will not be able to use dmesg(8)
69364+ to view the contents of the kernel's circular log buffer.
69365+ The kernel's log buffer often contains kernel addresses and other
69366+ identifying information useful to an attacker in fingerprinting a
69367+ system for a targeted exploit.
69368+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69369+ created.
69370+
69371+config GRKERNSEC_HARDEN_PTRACE
69372+ bool "Deter ptrace-based process snooping"
69373+ default y if GRKERNSEC_CONFIG_AUTO
69374+ help
69375+ If you say Y here, TTY sniffers and other malicious monitoring
69376+ programs implemented through ptrace will be defeated. If you
69377+ have been using the RBAC system, this option has already been
69378+ enabled for several years for all users, with the ability to make
69379+ fine-grained exceptions.
69380+
69381+ This option only affects the ability of non-root users to ptrace
69382+ processes that are not a descendent of the ptracing process.
69383+ This means that strace ./binary and gdb ./binary will still work,
69384+ but attaching to arbitrary processes will not. If the sysctl
69385+ option is enabled, a sysctl option with name "harden_ptrace" is
69386+ created.
69387+
69388+config GRKERNSEC_PTRACE_READEXEC
69389+ bool "Require read access to ptrace sensitive binaries"
69390+ default y if GRKERNSEC_CONFIG_AUTO
69391+ help
69392+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69393+ binaries. This option is useful in environments that
69394+ remove the read bits (e.g. file mode 4711) from suid binaries to
69395+ prevent infoleaking of their contents. This option adds
69396+ consistency to the use of that file mode, as the binary could normally
69397+ be read out when run without privileges while ptracing.
69398+
69399+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69400+ is created.
69401+
69402+config GRKERNSEC_SETXID
69403+ bool "Enforce consistent multithreaded privileges"
69404+ default y if GRKERNSEC_CONFIG_AUTO
69405+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69406+ help
69407+ If you say Y here, a change from a root uid to a non-root uid
69408+ in a multithreaded application will cause the resulting uids,
69409+ gids, supplementary groups, and capabilities in that thread
69410+ to be propagated to the other threads of the process. In most
69411+ cases this is unnecessary, as glibc will emulate this behavior
69412+ on behalf of the application. Other libcs do not act in the
69413+ same way, allowing the other threads of the process to continue
69414+ running with root privileges. If the sysctl option is enabled,
69415+ a sysctl option with name "consistent_setxid" is created.
69416+
69417+config GRKERNSEC_HARDEN_IPC
69418+ bool "Disallow access to overly-permissive IPC objects"
69419+ default y if GRKERNSEC_CONFIG_AUTO
69420+ depends on SYSVIPC
69421+ help
69422+ If you say Y here, access to overly-permissive IPC objects (shared
69423+ memory, message queues, and semaphores) will be denied for processes
69424+ given the following criteria beyond normal permission checks:
69425+ 1) If the IPC object is world-accessible and the euid doesn't match
69426+ that of the creator or current uid for the IPC object
69427+ 2) If the IPC object is group-accessible and the egid doesn't
69428+ match that of the creator or current gid for the IPC object
69429+ It's a common error to grant too much permission to these objects,
69430+ with impact ranging from denial of service and information leaking to
69431+ privilege escalation. This feature was developed in response to
69432+ research by Tim Brown:
69433+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69434+ who found hundreds of such insecure usages. Processes with
69435+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69436+ If the sysctl option is enabled, a sysctl option with name
69437+ "harden_ipc" is created.
69438+
69439+config GRKERNSEC_TPE
69440+ bool "Trusted Path Execution (TPE)"
69441+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69442+ help
69443+ If you say Y here, you will be able to choose a gid to add to the
69444+ supplementary groups of users you want to mark as "untrusted."
69445+ These users will not be able to execute any files that are not in
69446+ root-owned directories writable only by root. If the sysctl option
69447+ is enabled, a sysctl option with name "tpe" is created.
69448+
69449+config GRKERNSEC_TPE_ALL
69450+ bool "Partially restrict all non-root users"
69451+ depends on GRKERNSEC_TPE
69452+ help
69453+ If you say Y here, all non-root users will be covered under
69454+ a weaker TPE restriction. This is separate from, and in addition to,
69455+ the main TPE options that you have selected elsewhere. Thus, if a
69456+ "trusted" GID is chosen, this restriction applies to even that GID.
69457+ Under this restriction, all non-root users will only be allowed to
69458+ execute files in directories they own that are not group or
69459+ world-writable, or in directories owned by root and writable only by
69460+ root. If the sysctl option is enabled, a sysctl option with name
69461+ "tpe_restrict_all" is created.
69462+
69463+config GRKERNSEC_TPE_INVERT
69464+ bool "Invert GID option"
69465+ depends on GRKERNSEC_TPE
69466+ help
69467+ If you say Y here, the group you specify in the TPE configuration will
69468+ decide what group TPE restrictions will be *disabled* for. This
69469+ option is useful if you want TPE restrictions to be applied to most
69470+ users on the system. If the sysctl option is enabled, a sysctl option
69471+ with name "tpe_invert" is created. Unlike other sysctl options, this
69472+ entry will default to on for backward-compatibility.
69473+
69474+config GRKERNSEC_TPE_GID
69475+ int
69476+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69477+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69478+
69479+config GRKERNSEC_TPE_UNTRUSTED_GID
69480+ int "GID for TPE-untrusted users"
69481+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69482+ default 1005
69483+ help
69484+ Setting this GID determines what group TPE restrictions will be
69485+ *enabled* for. If the sysctl option is enabled, a sysctl option
69486+ with name "tpe_gid" is created.
69487+
69488+config GRKERNSEC_TPE_TRUSTED_GID
69489+ int "GID for TPE-trusted users"
69490+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69491+ default 1005
69492+ help
69493+ Setting this GID determines what group TPE restrictions will be
69494+ *disabled* for. If the sysctl option is enabled, a sysctl option
69495+ with name "tpe_gid" is created.
69496+
69497+endmenu
69498+menu "Network Protections"
69499+depends on GRKERNSEC
69500+
69501+config GRKERNSEC_BLACKHOLE
69502+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69503+ default y if GRKERNSEC_CONFIG_AUTO
69504+ depends on NET
69505+ help
69506+ If you say Y here, neither TCP resets nor ICMP
69507+ destination-unreachable packets will be sent in response to packets
69508+ sent to ports for which no associated listening process exists.
69509+ It will also prevent the sending of ICMP protocol unreachable packets
69510+ in response to packets with unknown protocols.
69511+ This feature supports both IPV4 and IPV6 and exempts the
69512+ loopback interface from blackholing. Enabling this feature
69513+ makes a host more resilient to DoS attacks and reduces network
69514+ visibility against scanners.
69515+
69516+ The blackhole feature as-implemented is equivalent to the FreeBSD
69517+ blackhole feature, as it prevents RST responses to all packets, not
69518+ just SYNs. Under most application behavior this causes no
69519+ problems, but applications (like haproxy) may not close certain
69520+ connections in a way that cleanly terminates them on the remote
69521+ end, leaving the remote host in LAST_ACK state. Because of this
69522+ side-effect and to prevent intentional LAST_ACK DoSes, this
69523+ feature also adds automatic mitigation against such attacks.
69524+ The mitigation drastically reduces the amount of time a socket
69525+ can spend in LAST_ACK state. If you're using haproxy and not
69526+ all servers it connects to have this option enabled, consider
69527+ disabling this feature on the haproxy host.
69528+
69529+ If the sysctl option is enabled, two sysctl options with names
69530+ "ip_blackhole" and "lastack_retries" will be created.
69531+ While "ip_blackhole" takes the standard zero/non-zero on/off
69532+ toggle, "lastack_retries" uses the same kinds of values as
69533+ "tcp_retries1" and "tcp_retries2". The default value of 4
69534+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69535+ state.
69536+
69537+config GRKERNSEC_NO_SIMULT_CONNECT
69538+ bool "Disable TCP Simultaneous Connect"
69539+ default y if GRKERNSEC_CONFIG_AUTO
69540+ depends on NET
69541+ help
69542+ If you say Y here, a feature by Willy Tarreau will be enabled that
69543+ removes a weakness in Linux's strict implementation of TCP that
69544+ allows two clients to connect to each other without either entering
69545+ a listening state. The weakness allows an attacker to easily prevent
69546+ a client from connecting to a known server provided the source port
69547+ for the connection is guessed correctly.
69548+
69549+ As the weakness could be used to prevent an antivirus or IPS from
69550+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69551+ it should be eliminated by enabling this option. Though Linux is
69552+ one of few operating systems supporting simultaneous connect, it
69553+ has no legitimate use in practice and is rarely supported by firewalls.
69554+
69555+config GRKERNSEC_SOCKET
69556+ bool "Socket restrictions"
69557+ depends on NET
69558+ help
69559+ If you say Y here, you will be able to choose from several options.
69560+ If you assign a GID on your system and add it to the supplementary
69561+ groups of users you want to restrict socket access to, this patch
69562+ will perform up to three things, based on the option(s) you choose.
69563+
69564+config GRKERNSEC_SOCKET_ALL
69565+ bool "Deny any sockets to group"
69566+ depends on GRKERNSEC_SOCKET
69567+ help
69568+ If you say Y here, you will be able to choose a GID of whose users will
69569+ be unable to connect to other hosts from your machine or run server
69570+ applications from your machine. If the sysctl option is enabled, a
69571+ sysctl option with name "socket_all" is created.
69572+
69573+config GRKERNSEC_SOCKET_ALL_GID
69574+ int "GID to deny all sockets for"
69575+ depends on GRKERNSEC_SOCKET_ALL
69576+ default 1004
69577+ help
69578+ Here you can choose the GID to disable socket access for. Remember to
69579+ add the users you want socket access disabled for to the GID
69580+ specified here. If the sysctl option is enabled, a sysctl option
69581+ with name "socket_all_gid" is created.
69582+
69583+config GRKERNSEC_SOCKET_CLIENT
69584+ bool "Deny client sockets to group"
69585+ depends on GRKERNSEC_SOCKET
69586+ help
69587+ If you say Y here, you will be able to choose a GID of whose users will
69588+ be unable to connect to other hosts from your machine, but will be
69589+ able to run servers. If this option is enabled, all users in the group
69590+ you specify will have to use passive mode when initiating ftp transfers
69591+ from the shell on your machine. If the sysctl option is enabled, a
69592+ sysctl option with name "socket_client" is created.
69593+
69594+config GRKERNSEC_SOCKET_CLIENT_GID
69595+ int "GID to deny client sockets for"
69596+ depends on GRKERNSEC_SOCKET_CLIENT
69597+ default 1003
69598+ help
69599+ Here you can choose the GID to disable client socket access for.
69600+ Remember to add the users you want client socket access disabled for to
69601+ the GID specified here. If the sysctl option is enabled, a sysctl
69602+ option with name "socket_client_gid" is created.
69603+
69604+config GRKERNSEC_SOCKET_SERVER
69605+ bool "Deny server sockets to group"
69606+ depends on GRKERNSEC_SOCKET
69607+ help
69608+ If you say Y here, you will be able to choose a GID of whose users will
69609+ be unable to run server applications from your machine. If the sysctl
69610+ option is enabled, a sysctl option with name "socket_server" is created.
69611+
69612+config GRKERNSEC_SOCKET_SERVER_GID
69613+ int "GID to deny server sockets for"
69614+ depends on GRKERNSEC_SOCKET_SERVER
69615+ default 1002
69616+ help
69617+ Here you can choose the GID to disable server socket access for.
69618+ Remember to add the users you want server socket access disabled for to
69619+ the GID specified here. If the sysctl option is enabled, a sysctl
69620+ option with name "socket_server_gid" is created.
69621+
69622+endmenu
69623+
69624+menu "Physical Protections"
69625+depends on GRKERNSEC
69626+
69627+config GRKERNSEC_DENYUSB
69628+ bool "Deny new USB connections after toggle"
69629+ default y if GRKERNSEC_CONFIG_AUTO
69630+ depends on SYSCTL && USB_SUPPORT
69631+ help
69632+ If you say Y here, a new sysctl option with name "deny_new_usb"
69633+ will be created. Setting its value to 1 will prevent any new
69634+ USB devices from being recognized by the OS. Any attempted USB
69635+ device insertion will be logged. This option is intended to be
69636+ used against custom USB devices designed to exploit vulnerabilities
69637+ in various USB device drivers.
69638+
69639+ For greatest effectiveness, this sysctl should be set after any
69640+ relevant init scripts. This option is safe to enable in distros
69641+ as each user can choose whether or not to toggle the sysctl.
69642+
69643+config GRKERNSEC_DENYUSB_FORCE
69644+ bool "Reject all USB devices not connected at boot"
69645+ select USB
69646+ depends on GRKERNSEC_DENYUSB
69647+ help
69648+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69649+ that doesn't involve a sysctl entry. This option should only be
69650+ enabled if you're sure you want to deny all new USB connections
69651+ at runtime and don't want to modify init scripts. This should not
69652+ be enabled by distros. It forces the core USB code to be built
69653+ into the kernel image so that all devices connected at boot time
69654+ can be recognized and new USB device connections can be prevented
69655+ prior to init running.
69656+
69657+endmenu
69658+
69659+menu "Sysctl Support"
69660+depends on GRKERNSEC && SYSCTL
69661+
69662+config GRKERNSEC_SYSCTL
69663+ bool "Sysctl support"
69664+ default y if GRKERNSEC_CONFIG_AUTO
69665+ help
69666+ If you say Y here, you will be able to change the options that
69667+ grsecurity runs with at bootup, without having to recompile your
69668+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69669+ to enable (1) or disable (0) various features. All the sysctl entries
69670+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69671+ All features enabled in the kernel configuration are disabled at boot
69672+ if you do not say Y to the "Turn on features by default" option.
69673+ All options should be set at startup, and the grsec_lock entry should
69674+ be set to a non-zero value after all the options are set.
69675+ *THIS IS EXTREMELY IMPORTANT*
69676+
69677+config GRKERNSEC_SYSCTL_DISTRO
69678+ bool "Extra sysctl support for distro makers (READ HELP)"
69679+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69680+ help
69681+ If you say Y here, additional sysctl options will be created
69682+ for features that affect processes running as root. Therefore,
69683+ it is critical when using this option that the grsec_lock entry be
69684+ enabled after boot. Only distros with prebuilt kernel packages
69685+ with this option enabled that can ensure grsec_lock is enabled
69686+ after boot should use this option.
69687+ *Failure to set grsec_lock after boot makes all grsec features
69688+ this option covers useless*
69689+
69690+ Currently this option creates the following sysctl entries:
69691+ "Disable Privileged I/O": "disable_priv_io"
69692+
69693+config GRKERNSEC_SYSCTL_ON
69694+ bool "Turn on features by default"
69695+ default y if GRKERNSEC_CONFIG_AUTO
69696+ depends on GRKERNSEC_SYSCTL
69697+ help
69698+ If you say Y here, instead of having all features enabled in the
69699+ kernel configuration disabled at boot time, the features will be
69700+ enabled at boot time. It is recommended you say Y here unless
69701+ there is some reason you would want all sysctl-tunable features to
69702+ be disabled by default. As mentioned elsewhere, it is important
69703+ to enable the grsec_lock entry once you have finished modifying
69704+ the sysctl entries.
69705+
69706+endmenu
69707+menu "Logging Options"
69708+depends on GRKERNSEC
69709+
69710+config GRKERNSEC_FLOODTIME
69711+ int "Seconds in between log messages (minimum)"
69712+ default 10
69713+ help
69714+ This option allows you to enforce the number of seconds between
69715+ grsecurity log messages. The default should be suitable for most
69716+ people, however, if you choose to change it, choose a value small enough
69717+ to allow informative logs to be produced, but large enough to
69718+ prevent flooding.
69719+
69720+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69721+ any rate limiting on grsecurity log messages.
69722+
69723+config GRKERNSEC_FLOODBURST
69724+ int "Number of messages in a burst (maximum)"
69725+ default 6
69726+ help
69727+ This option allows you to choose the maximum number of messages allowed
69728+ within the flood time interval you chose in a separate option. The
69729+ default should be suitable for most people, however if you find that
69730+ many of your logs are being interpreted as flooding, you may want to
69731+ raise this value.
69732+
69733+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69734+ any rate limiting on grsecurity log messages.
69735+
69736+endmenu
69737diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69738new file mode 100644
69739index 0000000..30ababb
69740--- /dev/null
69741+++ b/grsecurity/Makefile
69742@@ -0,0 +1,54 @@
69743+# grsecurity – access control and security hardening for Linux
69744+# All code in this directory and various hooks located throughout the Linux kernel are
69745+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69746+# http://www.grsecurity.net spender@grsecurity.net
69747+#
69748+# This program is free software; you can redistribute it and/or
69749+# modify it under the terms of the GNU General Public License version 2
69750+# as published by the Free Software Foundation.
69751+#
69752+# This program is distributed in the hope that it will be useful,
69753+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69754+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69755+# GNU General Public License for more details.
69756+#
69757+# You should have received a copy of the GNU General Public License
69758+# along with this program; if not, write to the Free Software
69759+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69760+
69761+KBUILD_CFLAGS += -Werror
69762+
69763+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69764+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69765+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69766+ grsec_usb.o grsec_ipc.o grsec_proc.o
69767+
69768+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69769+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69770+ gracl_learn.o grsec_log.o gracl_policy.o
69771+ifdef CONFIG_COMPAT
69772+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69773+endif
69774+
69775+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69776+
69777+ifdef CONFIG_NET
69778+obj-y += grsec_sock.o
69779+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69780+endif
69781+
69782+ifndef CONFIG_GRKERNSEC
69783+obj-y += grsec_disabled.o
69784+endif
69785+
69786+ifdef CONFIG_GRKERNSEC_HIDESYM
69787+extra-y := grsec_hidesym.o
69788+$(obj)/grsec_hidesym.o:
69789+ @-chmod -f 500 /boot
69790+ @-chmod -f 500 /lib/modules
69791+ @-chmod -f 500 /lib64/modules
69792+ @-chmod -f 500 /lib32/modules
69793+ @-chmod -f 700 .
69794+ @-chmod -f 700 $(objtree)
69795+ @echo ' grsec: protected kernel image paths'
69796+endif
69797diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69798new file mode 100644
69799index 0000000..6c1e154
69800--- /dev/null
69801+++ b/grsecurity/gracl.c
69802@@ -0,0 +1,2749 @@
69803+#include <linux/kernel.h>
69804+#include <linux/module.h>
69805+#include <linux/sched.h>
69806+#include <linux/mm.h>
69807+#include <linux/file.h>
69808+#include <linux/fs.h>
69809+#include <linux/namei.h>
69810+#include <linux/mount.h>
69811+#include <linux/tty.h>
69812+#include <linux/proc_fs.h>
69813+#include <linux/lglock.h>
69814+#include <linux/slab.h>
69815+#include <linux/vmalloc.h>
69816+#include <linux/types.h>
69817+#include <linux/sysctl.h>
69818+#include <linux/netdevice.h>
69819+#include <linux/ptrace.h>
69820+#include <linux/gracl.h>
69821+#include <linux/gralloc.h>
69822+#include <linux/security.h>
69823+#include <linux/grinternal.h>
69824+#include <linux/pid_namespace.h>
69825+#include <linux/stop_machine.h>
69826+#include <linux/fdtable.h>
69827+#include <linux/percpu.h>
69828+#include <linux/lglock.h>
69829+#include <linux/hugetlb.h>
69830+#include <linux/posix-timers.h>
69831+#include <linux/prefetch.h>
69832+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69833+#include <linux/magic.h>
69834+#include <linux/pagemap.h>
69835+#include "../fs/btrfs/async-thread.h"
69836+#include "../fs/btrfs/ctree.h"
69837+#include "../fs/btrfs/btrfs_inode.h"
69838+#endif
69839+#include "../fs/mount.h"
69840+
69841+#include <asm/uaccess.h>
69842+#include <asm/errno.h>
69843+#include <asm/mman.h>
69844+
69845+#define FOR_EACH_ROLE_START(role) \
69846+ role = running_polstate.role_list; \
69847+ while (role) {
69848+
69849+#define FOR_EACH_ROLE_END(role) \
69850+ role = role->prev; \
69851+ }
69852+
69853+extern struct path gr_real_root;
69854+
69855+static struct gr_policy_state running_polstate;
69856+struct gr_policy_state *polstate = &running_polstate;
69857+extern struct gr_alloc_state *current_alloc_state;
69858+
69859+extern char *gr_shared_page[4];
69860+DEFINE_RWLOCK(gr_inode_lock);
69861+
69862+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69863+
69864+#ifdef CONFIG_NET
69865+extern struct vfsmount *sock_mnt;
69866+#endif
69867+
69868+extern struct vfsmount *pipe_mnt;
69869+extern struct vfsmount *shm_mnt;
69870+
69871+#ifdef CONFIG_HUGETLBFS
69872+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69873+#endif
69874+
69875+extern u16 acl_sp_role_value;
69876+extern struct acl_object_label *fakefs_obj_rw;
69877+extern struct acl_object_label *fakefs_obj_rwx;
69878+
69879+int gr_acl_is_enabled(void)
69880+{
69881+ return (gr_status & GR_READY);
69882+}
69883+
69884+void gr_enable_rbac_system(void)
69885+{
69886+ pax_open_kernel();
69887+ gr_status |= GR_READY;
69888+ pax_close_kernel();
69889+}
69890+
69891+int gr_rbac_disable(void *unused)
69892+{
69893+ pax_open_kernel();
69894+ gr_status &= ~GR_READY;
69895+ pax_close_kernel();
69896+
69897+ return 0;
69898+}
69899+
69900+static inline dev_t __get_dev(const struct dentry *dentry)
69901+{
69902+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69903+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69904+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69905+ else
69906+#endif
69907+ return dentry->d_sb->s_dev;
69908+}
69909+
69910+static inline u64 __get_ino(const struct dentry *dentry)
69911+{
69912+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69913+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69914+ return btrfs_ino(dentry->d_inode);
69915+ else
69916+#endif
69917+ return dentry->d_inode->i_ino;
69918+}
69919+
69920+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69921+{
69922+ return __get_dev(dentry);
69923+}
69924+
69925+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69926+{
69927+ return __get_ino(dentry);
69928+}
69929+
69930+static char gr_task_roletype_to_char(struct task_struct *task)
69931+{
69932+ switch (task->role->roletype &
69933+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69934+ GR_ROLE_SPECIAL)) {
69935+ case GR_ROLE_DEFAULT:
69936+ return 'D';
69937+ case GR_ROLE_USER:
69938+ return 'U';
69939+ case GR_ROLE_GROUP:
69940+ return 'G';
69941+ case GR_ROLE_SPECIAL:
69942+ return 'S';
69943+ }
69944+
69945+ return 'X';
69946+}
69947+
69948+char gr_roletype_to_char(void)
69949+{
69950+ return gr_task_roletype_to_char(current);
69951+}
69952+
69953+__inline__ int
69954+gr_acl_tpe_check(void)
69955+{
69956+ if (unlikely(!(gr_status & GR_READY)))
69957+ return 0;
69958+ if (current->role->roletype & GR_ROLE_TPE)
69959+ return 1;
69960+ else
69961+ return 0;
69962+}
69963+
69964+int
69965+gr_handle_rawio(const struct inode *inode)
69966+{
69967+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69968+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69969+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69970+ !capable(CAP_SYS_RAWIO))
69971+ return 1;
69972+#endif
69973+ return 0;
69974+}
69975+
69976+int
69977+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69978+{
69979+ if (likely(lena != lenb))
69980+ return 0;
69981+
69982+ return !memcmp(a, b, lena);
69983+}
69984+
69985+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69986+{
69987+ *buflen -= namelen;
69988+ if (*buflen < 0)
69989+ return -ENAMETOOLONG;
69990+ *buffer -= namelen;
69991+ memcpy(*buffer, str, namelen);
69992+ return 0;
69993+}
69994+
69995+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69996+{
69997+ return prepend(buffer, buflen, name->name, name->len);
69998+}
69999+
70000+static int prepend_path(const struct path *path, struct path *root,
70001+ char **buffer, int *buflen)
70002+{
70003+ struct dentry *dentry = path->dentry;
70004+ struct vfsmount *vfsmnt = path->mnt;
70005+ struct mount *mnt = real_mount(vfsmnt);
70006+ bool slash = false;
70007+ int error = 0;
70008+
70009+ while (dentry != root->dentry || vfsmnt != root->mnt) {
70010+ struct dentry * parent;
70011+
70012+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
70013+ /* Global root? */
70014+ if (!mnt_has_parent(mnt)) {
70015+ goto out;
70016+ }
70017+ dentry = mnt->mnt_mountpoint;
70018+ mnt = mnt->mnt_parent;
70019+ vfsmnt = &mnt->mnt;
70020+ continue;
70021+ }
70022+ parent = dentry->d_parent;
70023+ prefetch(parent);
70024+ spin_lock(&dentry->d_lock);
70025+ error = prepend_name(buffer, buflen, &dentry->d_name);
70026+ spin_unlock(&dentry->d_lock);
70027+ if (!error)
70028+ error = prepend(buffer, buflen, "/", 1);
70029+ if (error)
70030+ break;
70031+
70032+ slash = true;
70033+ dentry = parent;
70034+ }
70035+
70036+out:
70037+ if (!error && !slash)
70038+ error = prepend(buffer, buflen, "/", 1);
70039+
70040+ return error;
70041+}
70042+
70043+/* this must be called with mount_lock and rename_lock held */
70044+
70045+static char *__our_d_path(const struct path *path, struct path *root,
70046+ char *buf, int buflen)
70047+{
70048+ char *res = buf + buflen;
70049+ int error;
70050+
70051+ prepend(&res, &buflen, "\0", 1);
70052+ error = prepend_path(path, root, &res, &buflen);
70053+ if (error)
70054+ return ERR_PTR(error);
70055+
70056+ return res;
70057+}
70058+
70059+static char *
70060+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70061+{
70062+ char *retval;
70063+
70064+ retval = __our_d_path(path, root, buf, buflen);
70065+ if (unlikely(IS_ERR(retval)))
70066+ retval = strcpy(buf, "<path too long>");
70067+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70068+ retval[1] = '\0';
70069+
70070+ return retval;
70071+}
70072+
70073+static char *
70074+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70075+ char *buf, int buflen)
70076+{
70077+ struct path path;
70078+ char *res;
70079+
70080+ path.dentry = (struct dentry *)dentry;
70081+ path.mnt = (struct vfsmount *)vfsmnt;
70082+
70083+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70084+ by the RBAC system */
70085+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70086+
70087+ return res;
70088+}
70089+
70090+static char *
70091+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70092+ char *buf, int buflen)
70093+{
70094+ char *res;
70095+ struct path path;
70096+ struct path root;
70097+ struct task_struct *reaper = init_pid_ns.child_reaper;
70098+
70099+ path.dentry = (struct dentry *)dentry;
70100+ path.mnt = (struct vfsmount *)vfsmnt;
70101+
70102+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70103+ get_fs_root(reaper->fs, &root);
70104+
70105+ read_seqlock_excl(&mount_lock);
70106+ write_seqlock(&rename_lock);
70107+ res = gen_full_path(&path, &root, buf, buflen);
70108+ write_sequnlock(&rename_lock);
70109+ read_sequnlock_excl(&mount_lock);
70110+
70111+ path_put(&root);
70112+ return res;
70113+}
70114+
70115+char *
70116+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70117+{
70118+ char *ret;
70119+ read_seqlock_excl(&mount_lock);
70120+ write_seqlock(&rename_lock);
70121+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70122+ PAGE_SIZE);
70123+ write_sequnlock(&rename_lock);
70124+ read_sequnlock_excl(&mount_lock);
70125+ return ret;
70126+}
70127+
70128+static char *
70129+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70130+{
70131+ char *ret;
70132+ char *buf;
70133+ int buflen;
70134+
70135+ read_seqlock_excl(&mount_lock);
70136+ write_seqlock(&rename_lock);
70137+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70138+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70139+ buflen = (int)(ret - buf);
70140+ if (buflen >= 5)
70141+ prepend(&ret, &buflen, "/proc", 5);
70142+ else
70143+ ret = strcpy(buf, "<path too long>");
70144+ write_sequnlock(&rename_lock);
70145+ read_sequnlock_excl(&mount_lock);
70146+ return ret;
70147+}
70148+
70149+char *
70150+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70151+{
70152+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70153+ PAGE_SIZE);
70154+}
70155+
70156+char *
70157+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70158+{
70159+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70160+ PAGE_SIZE);
70161+}
70162+
70163+char *
70164+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70165+{
70166+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70167+ PAGE_SIZE);
70168+}
70169+
70170+char *
70171+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70172+{
70173+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70174+ PAGE_SIZE);
70175+}
70176+
70177+char *
70178+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70179+{
70180+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70181+ PAGE_SIZE);
70182+}
70183+
70184+__inline__ __u32
70185+to_gr_audit(const __u32 reqmode)
70186+{
70187+ /* masks off auditable permission flags, then shifts them to create
70188+ auditing flags, and adds the special case of append auditing if
70189+ we're requesting write */
70190+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70191+}
70192+
70193+struct acl_role_label *
70194+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70195+ const gid_t gid)
70196+{
70197+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70198+ struct acl_role_label *match;
70199+ struct role_allowed_ip *ipp;
70200+ unsigned int x;
70201+ u32 curr_ip = task->signal->saved_ip;
70202+
70203+ match = state->acl_role_set.r_hash[index];
70204+
70205+ while (match) {
70206+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70207+ for (x = 0; x < match->domain_child_num; x++) {
70208+ if (match->domain_children[x] == uid)
70209+ goto found;
70210+ }
70211+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70212+ break;
70213+ match = match->next;
70214+ }
70215+found:
70216+ if (match == NULL) {
70217+ try_group:
70218+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70219+ match = state->acl_role_set.r_hash[index];
70220+
70221+ while (match) {
70222+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70223+ for (x = 0; x < match->domain_child_num; x++) {
70224+ if (match->domain_children[x] == gid)
70225+ goto found2;
70226+ }
70227+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70228+ break;
70229+ match = match->next;
70230+ }
70231+found2:
70232+ if (match == NULL)
70233+ match = state->default_role;
70234+ if (match->allowed_ips == NULL)
70235+ return match;
70236+ else {
70237+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70238+ if (likely
70239+ ((ntohl(curr_ip) & ipp->netmask) ==
70240+ (ntohl(ipp->addr) & ipp->netmask)))
70241+ return match;
70242+ }
70243+ match = state->default_role;
70244+ }
70245+ } else if (match->allowed_ips == NULL) {
70246+ return match;
70247+ } else {
70248+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70249+ if (likely
70250+ ((ntohl(curr_ip) & ipp->netmask) ==
70251+ (ntohl(ipp->addr) & ipp->netmask)))
70252+ return match;
70253+ }
70254+ goto try_group;
70255+ }
70256+
70257+ return match;
70258+}
70259+
70260+static struct acl_role_label *
70261+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
70262+ const gid_t gid)
70263+{
70264+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
70265+}
70266+
70267+struct acl_subject_label *
70268+lookup_acl_subj_label(const u64 ino, const dev_t dev,
70269+ const struct acl_role_label *role)
70270+{
70271+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70272+ struct acl_subject_label *match;
70273+
70274+ match = role->subj_hash[index];
70275+
70276+ while (match && (match->inode != ino || match->device != dev ||
70277+ (match->mode & GR_DELETED))) {
70278+ match = match->next;
70279+ }
70280+
70281+ if (match && !(match->mode & GR_DELETED))
70282+ return match;
70283+ else
70284+ return NULL;
70285+}
70286+
70287+struct acl_subject_label *
70288+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
70289+ const struct acl_role_label *role)
70290+{
70291+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70292+ struct acl_subject_label *match;
70293+
70294+ match = role->subj_hash[index];
70295+
70296+ while (match && (match->inode != ino || match->device != dev ||
70297+ !(match->mode & GR_DELETED))) {
70298+ match = match->next;
70299+ }
70300+
70301+ if (match && (match->mode & GR_DELETED))
70302+ return match;
70303+ else
70304+ return NULL;
70305+}
70306+
70307+static struct acl_object_label *
70308+lookup_acl_obj_label(const u64 ino, const dev_t dev,
70309+ const struct acl_subject_label *subj)
70310+{
70311+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70312+ struct acl_object_label *match;
70313+
70314+ match = subj->obj_hash[index];
70315+
70316+ while (match && (match->inode != ino || match->device != dev ||
70317+ (match->mode & GR_DELETED))) {
70318+ match = match->next;
70319+ }
70320+
70321+ if (match && !(match->mode & GR_DELETED))
70322+ return match;
70323+ else
70324+ return NULL;
70325+}
70326+
70327+static struct acl_object_label *
70328+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
70329+ const struct acl_subject_label *subj)
70330+{
70331+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70332+ struct acl_object_label *match;
70333+
70334+ match = subj->obj_hash[index];
70335+
70336+ while (match && (match->inode != ino || match->device != dev ||
70337+ !(match->mode & GR_DELETED))) {
70338+ match = match->next;
70339+ }
70340+
70341+ if (match && (match->mode & GR_DELETED))
70342+ return match;
70343+
70344+ match = subj->obj_hash[index];
70345+
70346+ while (match && (match->inode != ino || match->device != dev ||
70347+ (match->mode & GR_DELETED))) {
70348+ match = match->next;
70349+ }
70350+
70351+ if (match && !(match->mode & GR_DELETED))
70352+ return match;
70353+ else
70354+ return NULL;
70355+}
70356+
70357+struct name_entry *
70358+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70359+{
70360+ unsigned int len = strlen(name);
70361+ unsigned int key = full_name_hash(name, len);
70362+ unsigned int index = key % state->name_set.n_size;
70363+ struct name_entry *match;
70364+
70365+ match = state->name_set.n_hash[index];
70366+
70367+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70368+ match = match->next;
70369+
70370+ return match;
70371+}
70372+
70373+static struct name_entry *
70374+lookup_name_entry(const char *name)
70375+{
70376+ return __lookup_name_entry(&running_polstate, name);
70377+}
70378+
70379+static struct name_entry *
70380+lookup_name_entry_create(const char *name)
70381+{
70382+ unsigned int len = strlen(name);
70383+ unsigned int key = full_name_hash(name, len);
70384+ unsigned int index = key % running_polstate.name_set.n_size;
70385+ struct name_entry *match;
70386+
70387+ match = running_polstate.name_set.n_hash[index];
70388+
70389+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70390+ !match->deleted))
70391+ match = match->next;
70392+
70393+ if (match && match->deleted)
70394+ return match;
70395+
70396+ match = running_polstate.name_set.n_hash[index];
70397+
70398+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70399+ match->deleted))
70400+ match = match->next;
70401+
70402+ if (match && !match->deleted)
70403+ return match;
70404+ else
70405+ return NULL;
70406+}
70407+
70408+static struct inodev_entry *
70409+lookup_inodev_entry(const u64 ino, const dev_t dev)
70410+{
70411+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70412+ struct inodev_entry *match;
70413+
70414+ match = running_polstate.inodev_set.i_hash[index];
70415+
70416+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70417+ match = match->next;
70418+
70419+ return match;
70420+}
70421+
70422+void
70423+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70424+{
70425+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70426+ state->inodev_set.i_size);
70427+ struct inodev_entry **curr;
70428+
70429+ entry->prev = NULL;
70430+
70431+ curr = &state->inodev_set.i_hash[index];
70432+ if (*curr != NULL)
70433+ (*curr)->prev = entry;
70434+
70435+ entry->next = *curr;
70436+ *curr = entry;
70437+
70438+ return;
70439+}
70440+
70441+static void
70442+insert_inodev_entry(struct inodev_entry *entry)
70443+{
70444+ __insert_inodev_entry(&running_polstate, entry);
70445+}
70446+
70447+void
70448+insert_acl_obj_label(struct acl_object_label *obj,
70449+ struct acl_subject_label *subj)
70450+{
70451+ unsigned int index =
70452+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70453+ struct acl_object_label **curr;
70454+
70455+ obj->prev = NULL;
70456+
70457+ curr = &subj->obj_hash[index];
70458+ if (*curr != NULL)
70459+ (*curr)->prev = obj;
70460+
70461+ obj->next = *curr;
70462+ *curr = obj;
70463+
70464+ return;
70465+}
70466+
70467+void
70468+insert_acl_subj_label(struct acl_subject_label *obj,
70469+ struct acl_role_label *role)
70470+{
70471+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70472+ struct acl_subject_label **curr;
70473+
70474+ obj->prev = NULL;
70475+
70476+ curr = &role->subj_hash[index];
70477+ if (*curr != NULL)
70478+ (*curr)->prev = obj;
70479+
70480+ obj->next = *curr;
70481+ *curr = obj;
70482+
70483+ return;
70484+}
70485+
70486+/* derived from glibc fnmatch() 0: match, 1: no match*/
70487+
70488+static int
70489+glob_match(const char *p, const char *n)
70490+{
70491+ char c;
70492+
70493+ while ((c = *p++) != '\0') {
70494+ switch (c) {
70495+ case '?':
70496+ if (*n == '\0')
70497+ return 1;
70498+ else if (*n == '/')
70499+ return 1;
70500+ break;
70501+ case '\\':
70502+ if (*n != c)
70503+ return 1;
70504+ break;
70505+ case '*':
70506+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70507+ if (*n == '/')
70508+ return 1;
70509+ else if (c == '?') {
70510+ if (*n == '\0')
70511+ return 1;
70512+ else
70513+ ++n;
70514+ }
70515+ }
70516+ if (c == '\0') {
70517+ return 0;
70518+ } else {
70519+ const char *endp;
70520+
70521+ if ((endp = strchr(n, '/')) == NULL)
70522+ endp = n + strlen(n);
70523+
70524+ if (c == '[') {
70525+ for (--p; n < endp; ++n)
70526+ if (!glob_match(p, n))
70527+ return 0;
70528+ } else if (c == '/') {
70529+ while (*n != '\0' && *n != '/')
70530+ ++n;
70531+ if (*n == '/' && !glob_match(p, n + 1))
70532+ return 0;
70533+ } else {
70534+ for (--p; n < endp; ++n)
70535+ if (*n == c && !glob_match(p, n))
70536+ return 0;
70537+ }
70538+
70539+ return 1;
70540+ }
70541+ case '[':
70542+ {
70543+ int not;
70544+ char cold;
70545+
70546+ if (*n == '\0' || *n == '/')
70547+ return 1;
70548+
70549+ not = (*p == '!' || *p == '^');
70550+ if (not)
70551+ ++p;
70552+
70553+ c = *p++;
70554+ for (;;) {
70555+ unsigned char fn = (unsigned char)*n;
70556+
70557+ if (c == '\0')
70558+ return 1;
70559+ else {
70560+ if (c == fn)
70561+ goto matched;
70562+ cold = c;
70563+ c = *p++;
70564+
70565+ if (c == '-' && *p != ']') {
70566+ unsigned char cend = *p++;
70567+
70568+ if (cend == '\0')
70569+ return 1;
70570+
70571+ if (cold <= fn && fn <= cend)
70572+ goto matched;
70573+
70574+ c = *p++;
70575+ }
70576+ }
70577+
70578+ if (c == ']')
70579+ break;
70580+ }
70581+ if (!not)
70582+ return 1;
70583+ break;
70584+ matched:
70585+ while (c != ']') {
70586+ if (c == '\0')
70587+ return 1;
70588+
70589+ c = *p++;
70590+ }
70591+ if (not)
70592+ return 1;
70593+ }
70594+ break;
70595+ default:
70596+ if (c != *n)
70597+ return 1;
70598+ }
70599+
70600+ ++n;
70601+ }
70602+
70603+ if (*n == '\0')
70604+ return 0;
70605+
70606+ if (*n == '/')
70607+ return 0;
70608+
70609+ return 1;
70610+}
70611+
70612+static struct acl_object_label *
70613+chk_glob_label(struct acl_object_label *globbed,
70614+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70615+{
70616+ struct acl_object_label *tmp;
70617+
70618+ if (*path == NULL)
70619+ *path = gr_to_filename_nolock(dentry, mnt);
70620+
70621+ tmp = globbed;
70622+
70623+ while (tmp) {
70624+ if (!glob_match(tmp->filename, *path))
70625+ return tmp;
70626+ tmp = tmp->next;
70627+ }
70628+
70629+ return NULL;
70630+}
70631+
70632+static struct acl_object_label *
70633+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70634+ const u64 curr_ino, const dev_t curr_dev,
70635+ const struct acl_subject_label *subj, char **path, const int checkglob)
70636+{
70637+ struct acl_subject_label *tmpsubj;
70638+ struct acl_object_label *retval;
70639+ struct acl_object_label *retval2;
70640+
70641+ tmpsubj = (struct acl_subject_label *) subj;
70642+ read_lock(&gr_inode_lock);
70643+ do {
70644+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70645+ if (retval) {
70646+ if (checkglob && retval->globbed) {
70647+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70648+ if (retval2)
70649+ retval = retval2;
70650+ }
70651+ break;
70652+ }
70653+ } while ((tmpsubj = tmpsubj->parent_subject));
70654+ read_unlock(&gr_inode_lock);
70655+
70656+ return retval;
70657+}
70658+
70659+static __inline__ struct acl_object_label *
70660+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70661+ struct dentry *curr_dentry,
70662+ const struct acl_subject_label *subj, char **path, const int checkglob)
70663+{
70664+ int newglob = checkglob;
70665+ u64 inode;
70666+ dev_t device;
70667+
70668+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70669+ as we don't want a / * rule to match instead of the / object
70670+ don't do this for create lookups that call this function though, since they're looking up
70671+ on the parent and thus need globbing checks on all paths
70672+ */
70673+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70674+ newglob = GR_NO_GLOB;
70675+
70676+ spin_lock(&curr_dentry->d_lock);
70677+ inode = __get_ino(curr_dentry);
70678+ device = __get_dev(curr_dentry);
70679+ spin_unlock(&curr_dentry->d_lock);
70680+
70681+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70682+}
70683+
70684+#ifdef CONFIG_HUGETLBFS
70685+static inline bool
70686+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70687+{
70688+ int i;
70689+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70690+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70691+ return true;
70692+ }
70693+
70694+ return false;
70695+}
70696+#endif
70697+
70698+static struct acl_object_label *
70699+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70700+ const struct acl_subject_label *subj, char *path, const int checkglob)
70701+{
70702+ struct dentry *dentry = (struct dentry *) l_dentry;
70703+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70704+ struct mount *real_mnt = real_mount(mnt);
70705+ struct acl_object_label *retval;
70706+ struct dentry *parent;
70707+
70708+ read_seqlock_excl(&mount_lock);
70709+ write_seqlock(&rename_lock);
70710+
70711+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70712+#ifdef CONFIG_NET
70713+ mnt == sock_mnt ||
70714+#endif
70715+#ifdef CONFIG_HUGETLBFS
70716+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70717+#endif
70718+ /* ignore Eric Biederman */
70719+ IS_PRIVATE(l_dentry->d_inode))) {
70720+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70721+ goto out;
70722+ }
70723+
70724+ for (;;) {
70725+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70726+ break;
70727+
70728+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70729+ if (!mnt_has_parent(real_mnt))
70730+ break;
70731+
70732+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70733+ if (retval != NULL)
70734+ goto out;
70735+
70736+ dentry = real_mnt->mnt_mountpoint;
70737+ real_mnt = real_mnt->mnt_parent;
70738+ mnt = &real_mnt->mnt;
70739+ continue;
70740+ }
70741+
70742+ parent = dentry->d_parent;
70743+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70744+ if (retval != NULL)
70745+ goto out;
70746+
70747+ dentry = parent;
70748+ }
70749+
70750+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70751+
70752+ /* gr_real_root is pinned so we don't have to hold a reference */
70753+ if (retval == NULL)
70754+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70755+out:
70756+ write_sequnlock(&rename_lock);
70757+ read_sequnlock_excl(&mount_lock);
70758+
70759+ BUG_ON(retval == NULL);
70760+
70761+ return retval;
70762+}
70763+
70764+static __inline__ struct acl_object_label *
70765+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70766+ const struct acl_subject_label *subj)
70767+{
70768+ char *path = NULL;
70769+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70770+}
70771+
70772+static __inline__ struct acl_object_label *
70773+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70774+ const struct acl_subject_label *subj)
70775+{
70776+ char *path = NULL;
70777+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70778+}
70779+
70780+static __inline__ struct acl_object_label *
70781+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70782+ const struct acl_subject_label *subj, char *path)
70783+{
70784+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70785+}
70786+
70787+struct acl_subject_label *
70788+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70789+ const struct acl_role_label *role)
70790+{
70791+ struct dentry *dentry = (struct dentry *) l_dentry;
70792+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70793+ struct mount *real_mnt = real_mount(mnt);
70794+ struct acl_subject_label *retval;
70795+ struct dentry *parent;
70796+
70797+ read_seqlock_excl(&mount_lock);
70798+ write_seqlock(&rename_lock);
70799+
70800+ for (;;) {
70801+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70802+ break;
70803+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70804+ if (!mnt_has_parent(real_mnt))
70805+ break;
70806+
70807+ spin_lock(&dentry->d_lock);
70808+ read_lock(&gr_inode_lock);
70809+ retval =
70810+ lookup_acl_subj_label(__get_ino(dentry),
70811+ __get_dev(dentry), role);
70812+ read_unlock(&gr_inode_lock);
70813+ spin_unlock(&dentry->d_lock);
70814+ if (retval != NULL)
70815+ goto out;
70816+
70817+ dentry = real_mnt->mnt_mountpoint;
70818+ real_mnt = real_mnt->mnt_parent;
70819+ mnt = &real_mnt->mnt;
70820+ continue;
70821+ }
70822+
70823+ spin_lock(&dentry->d_lock);
70824+ read_lock(&gr_inode_lock);
70825+ retval = lookup_acl_subj_label(__get_ino(dentry),
70826+ __get_dev(dentry), role);
70827+ read_unlock(&gr_inode_lock);
70828+ parent = dentry->d_parent;
70829+ spin_unlock(&dentry->d_lock);
70830+
70831+ if (retval != NULL)
70832+ goto out;
70833+
70834+ dentry = parent;
70835+ }
70836+
70837+ spin_lock(&dentry->d_lock);
70838+ read_lock(&gr_inode_lock);
70839+ retval = lookup_acl_subj_label(__get_ino(dentry),
70840+ __get_dev(dentry), role);
70841+ read_unlock(&gr_inode_lock);
70842+ spin_unlock(&dentry->d_lock);
70843+
70844+ if (unlikely(retval == NULL)) {
70845+ /* gr_real_root is pinned, we don't need to hold a reference */
70846+ read_lock(&gr_inode_lock);
70847+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70848+ __get_dev(gr_real_root.dentry), role);
70849+ read_unlock(&gr_inode_lock);
70850+ }
70851+out:
70852+ write_sequnlock(&rename_lock);
70853+ read_sequnlock_excl(&mount_lock);
70854+
70855+ BUG_ON(retval == NULL);
70856+
70857+ return retval;
70858+}
70859+
70860+void
70861+assign_special_role(const char *rolename)
70862+{
70863+ struct acl_object_label *obj;
70864+ struct acl_role_label *r;
70865+ struct acl_role_label *assigned = NULL;
70866+ struct task_struct *tsk;
70867+ struct file *filp;
70868+
70869+ FOR_EACH_ROLE_START(r)
70870+ if (!strcmp(rolename, r->rolename) &&
70871+ (r->roletype & GR_ROLE_SPECIAL)) {
70872+ assigned = r;
70873+ break;
70874+ }
70875+ FOR_EACH_ROLE_END(r)
70876+
70877+ if (!assigned)
70878+ return;
70879+
70880+ read_lock(&tasklist_lock);
70881+ read_lock(&grsec_exec_file_lock);
70882+
70883+ tsk = current->real_parent;
70884+ if (tsk == NULL)
70885+ goto out_unlock;
70886+
70887+ filp = tsk->exec_file;
70888+ if (filp == NULL)
70889+ goto out_unlock;
70890+
70891+ tsk->is_writable = 0;
70892+ tsk->inherited = 0;
70893+
70894+ tsk->acl_sp_role = 1;
70895+ tsk->acl_role_id = ++acl_sp_role_value;
70896+ tsk->role = assigned;
70897+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70898+
70899+ /* ignore additional mmap checks for processes that are writable
70900+ by the default ACL */
70901+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70902+ if (unlikely(obj->mode & GR_WRITE))
70903+ tsk->is_writable = 1;
70904+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70905+ if (unlikely(obj->mode & GR_WRITE))
70906+ tsk->is_writable = 1;
70907+
70908+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70909+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70910+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70911+#endif
70912+
70913+out_unlock:
70914+ read_unlock(&grsec_exec_file_lock);
70915+ read_unlock(&tasklist_lock);
70916+ return;
70917+}
70918+
70919+
70920+static void
70921+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70922+{
70923+ struct task_struct *task = current;
70924+ const struct cred *cred = current_cred();
70925+
70926+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70927+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70928+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70929+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70930+
70931+ return;
70932+}
70933+
70934+static void
70935+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70936+{
70937+ struct task_struct *task = current;
70938+ const struct cred *cred = current_cred();
70939+
70940+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70941+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70942+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70943+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70944+
70945+ return;
70946+}
70947+
70948+static void
70949+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70950+{
70951+ struct task_struct *task = current;
70952+ const struct cred *cred = current_cred();
70953+
70954+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70955+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70956+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70957+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70958+
70959+ return;
70960+}
70961+
70962+static void
70963+gr_set_proc_res(struct task_struct *task)
70964+{
70965+ struct acl_subject_label *proc;
70966+ unsigned short i;
70967+
70968+ proc = task->acl;
70969+
70970+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70971+ return;
70972+
70973+ for (i = 0; i < RLIM_NLIMITS; i++) {
70974+ unsigned long rlim_cur, rlim_max;
70975+
70976+ if (!(proc->resmask & (1U << i)))
70977+ continue;
70978+
70979+ rlim_cur = proc->res[i].rlim_cur;
70980+ rlim_max = proc->res[i].rlim_max;
70981+
70982+ if (i == RLIMIT_NOFILE) {
70983+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70984+ if (rlim_cur > saved_sysctl_nr_open)
70985+ rlim_cur = saved_sysctl_nr_open;
70986+ if (rlim_max > saved_sysctl_nr_open)
70987+ rlim_max = saved_sysctl_nr_open;
70988+ }
70989+
70990+ task->signal->rlim[i].rlim_cur = rlim_cur;
70991+ task->signal->rlim[i].rlim_max = rlim_max;
70992+
70993+ if (i == RLIMIT_CPU)
70994+ update_rlimit_cpu(task, rlim_cur);
70995+ }
70996+
70997+ return;
70998+}
70999+
71000+/* both of the below must be called with
71001+ rcu_read_lock();
71002+ read_lock(&tasklist_lock);
71003+ read_lock(&grsec_exec_file_lock);
71004+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
71005+*/
71006+
71007+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
71008+{
71009+ char *tmpname;
71010+ struct acl_subject_label *tmpsubj;
71011+ struct file *filp;
71012+ struct name_entry *nmatch;
71013+
71014+ filp = task->exec_file;
71015+ if (filp == NULL)
71016+ return NULL;
71017+
71018+ /* the following is to apply the correct subject
71019+ on binaries running when the RBAC system
71020+ is enabled, when the binaries have been
71021+ replaced or deleted since their execution
71022+ -----
71023+ when the RBAC system starts, the inode/dev
71024+ from exec_file will be one the RBAC system
71025+ is unaware of. It only knows the inode/dev
71026+ of the present file on disk, or the absence
71027+ of it.
71028+ */
71029+
71030+ if (filename)
71031+ nmatch = __lookup_name_entry(state, filename);
71032+ else {
71033+ preempt_disable();
71034+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71035+
71036+ nmatch = __lookup_name_entry(state, tmpname);
71037+ preempt_enable();
71038+ }
71039+ tmpsubj = NULL;
71040+ if (nmatch) {
71041+ if (nmatch->deleted)
71042+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71043+ else
71044+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71045+ }
71046+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71047+ then we fall back to a normal lookup based on the binary's ino/dev
71048+ */
71049+ if (tmpsubj == NULL && fallback)
71050+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71051+
71052+ return tmpsubj;
71053+}
71054+
71055+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
71056+{
71057+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
71058+}
71059+
71060+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71061+{
71062+ struct acl_object_label *obj;
71063+ struct file *filp;
71064+
71065+ filp = task->exec_file;
71066+
71067+ task->acl = subj;
71068+ task->is_writable = 0;
71069+ /* ignore additional mmap checks for processes that are writable
71070+ by the default ACL */
71071+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71072+ if (unlikely(obj->mode & GR_WRITE))
71073+ task->is_writable = 1;
71074+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71075+ if (unlikely(obj->mode & GR_WRITE))
71076+ task->is_writable = 1;
71077+
71078+ gr_set_proc_res(task);
71079+
71080+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71081+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71082+#endif
71083+}
71084+
71085+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71086+{
71087+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71088+}
71089+
71090+__u32
71091+gr_search_file(const struct dentry * dentry, const __u32 mode,
71092+ const struct vfsmount * mnt)
71093+{
71094+ __u32 retval = mode;
71095+ struct acl_subject_label *curracl;
71096+ struct acl_object_label *currobj;
71097+
71098+ if (unlikely(!(gr_status & GR_READY)))
71099+ return (mode & ~GR_AUDITS);
71100+
71101+ curracl = current->acl;
71102+
71103+ currobj = chk_obj_label(dentry, mnt, curracl);
71104+ retval = currobj->mode & mode;
71105+
71106+ /* if we're opening a specified transfer file for writing
71107+ (e.g. /dev/initctl), then transfer our role to init
71108+ */
71109+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71110+ current->role->roletype & GR_ROLE_PERSIST)) {
71111+ struct task_struct *task = init_pid_ns.child_reaper;
71112+
71113+ if (task->role != current->role) {
71114+ struct acl_subject_label *subj;
71115+
71116+ task->acl_sp_role = 0;
71117+ task->acl_role_id = current->acl_role_id;
71118+ task->role = current->role;
71119+ rcu_read_lock();
71120+ read_lock(&grsec_exec_file_lock);
71121+ subj = gr_get_subject_for_task(task, NULL, 1);
71122+ gr_apply_subject_to_task(task, subj);
71123+ read_unlock(&grsec_exec_file_lock);
71124+ rcu_read_unlock();
71125+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71126+ }
71127+ }
71128+
71129+ if (unlikely
71130+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71131+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71132+ __u32 new_mode = mode;
71133+
71134+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71135+
71136+ retval = new_mode;
71137+
71138+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71139+ new_mode |= GR_INHERIT;
71140+
71141+ if (!(mode & GR_NOLEARN))
71142+ gr_log_learn(dentry, mnt, new_mode);
71143+ }
71144+
71145+ return retval;
71146+}
71147+
71148+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71149+ const struct dentry *parent,
71150+ const struct vfsmount *mnt)
71151+{
71152+ struct name_entry *match;
71153+ struct acl_object_label *matchpo;
71154+ struct acl_subject_label *curracl;
71155+ char *path;
71156+
71157+ if (unlikely(!(gr_status & GR_READY)))
71158+ return NULL;
71159+
71160+ preempt_disable();
71161+ path = gr_to_filename_rbac(new_dentry, mnt);
71162+ match = lookup_name_entry_create(path);
71163+
71164+ curracl = current->acl;
71165+
71166+ if (match) {
71167+ read_lock(&gr_inode_lock);
71168+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71169+ read_unlock(&gr_inode_lock);
71170+
71171+ if (matchpo) {
71172+ preempt_enable();
71173+ return matchpo;
71174+ }
71175+ }
71176+
71177+ // lookup parent
71178+
71179+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71180+
71181+ preempt_enable();
71182+ return matchpo;
71183+}
71184+
71185+__u32
71186+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71187+ const struct vfsmount * mnt, const __u32 mode)
71188+{
71189+ struct acl_object_label *matchpo;
71190+ __u32 retval;
71191+
71192+ if (unlikely(!(gr_status & GR_READY)))
71193+ return (mode & ~GR_AUDITS);
71194+
71195+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71196+
71197+ retval = matchpo->mode & mode;
71198+
71199+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71200+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71201+ __u32 new_mode = mode;
71202+
71203+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71204+
71205+ gr_log_learn(new_dentry, mnt, new_mode);
71206+ return new_mode;
71207+ }
71208+
71209+ return retval;
71210+}
71211+
71212+__u32
71213+gr_check_link(const struct dentry * new_dentry,
71214+ const struct dentry * parent_dentry,
71215+ const struct vfsmount * parent_mnt,
71216+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71217+{
71218+ struct acl_object_label *obj;
71219+ __u32 oldmode, newmode;
71220+ __u32 needmode;
71221+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71222+ GR_DELETE | GR_INHERIT;
71223+
71224+ if (unlikely(!(gr_status & GR_READY)))
71225+ return (GR_CREATE | GR_LINK);
71226+
71227+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71228+ oldmode = obj->mode;
71229+
71230+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71231+ newmode = obj->mode;
71232+
71233+ needmode = newmode & checkmodes;
71234+
71235+ // old name for hardlink must have at least the permissions of the new name
71236+ if ((oldmode & needmode) != needmode)
71237+ goto bad;
71238+
71239+ // if old name had restrictions/auditing, make sure the new name does as well
71240+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71241+
71242+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71243+ if (is_privileged_binary(old_dentry))
71244+ needmode |= GR_SETID;
71245+
71246+ if ((newmode & needmode) != needmode)
71247+ goto bad;
71248+
71249+ // enforce minimum permissions
71250+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
71251+ return newmode;
71252+bad:
71253+ needmode = oldmode;
71254+ if (is_privileged_binary(old_dentry))
71255+ needmode |= GR_SETID;
71256+
71257+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
71258+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
71259+ return (GR_CREATE | GR_LINK);
71260+ } else if (newmode & GR_SUPPRESS)
71261+ return GR_SUPPRESS;
71262+ else
71263+ return 0;
71264+}
71265+
71266+int
71267+gr_check_hidden_task(const struct task_struct *task)
71268+{
71269+ if (unlikely(!(gr_status & GR_READY)))
71270+ return 0;
71271+
71272+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
71273+ return 1;
71274+
71275+ return 0;
71276+}
71277+
71278+int
71279+gr_check_protected_task(const struct task_struct *task)
71280+{
71281+ if (unlikely(!(gr_status & GR_READY) || !task))
71282+ return 0;
71283+
71284+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71285+ task->acl != current->acl)
71286+ return 1;
71287+
71288+ return 0;
71289+}
71290+
71291+int
71292+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
71293+{
71294+ struct task_struct *p;
71295+ int ret = 0;
71296+
71297+ if (unlikely(!(gr_status & GR_READY) || !pid))
71298+ return ret;
71299+
71300+ read_lock(&tasklist_lock);
71301+ do_each_pid_task(pid, type, p) {
71302+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71303+ p->acl != current->acl) {
71304+ ret = 1;
71305+ goto out;
71306+ }
71307+ } while_each_pid_task(pid, type, p);
71308+out:
71309+ read_unlock(&tasklist_lock);
71310+
71311+ return ret;
71312+}
71313+
71314+void
71315+gr_copy_label(struct task_struct *tsk)
71316+{
71317+ struct task_struct *p = current;
71318+
71319+ tsk->inherited = p->inherited;
71320+ tsk->acl_sp_role = 0;
71321+ tsk->acl_role_id = p->acl_role_id;
71322+ tsk->acl = p->acl;
71323+ tsk->role = p->role;
71324+ tsk->signal->used_accept = 0;
71325+ tsk->signal->curr_ip = p->signal->curr_ip;
71326+ tsk->signal->saved_ip = p->signal->saved_ip;
71327+ if (p->exec_file)
71328+ get_file(p->exec_file);
71329+ tsk->exec_file = p->exec_file;
71330+ tsk->is_writable = p->is_writable;
71331+ if (unlikely(p->signal->used_accept)) {
71332+ p->signal->curr_ip = 0;
71333+ p->signal->saved_ip = 0;
71334+ }
71335+
71336+ return;
71337+}
71338+
71339+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71340+
71341+int
71342+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71343+{
71344+ unsigned int i;
71345+ __u16 num;
71346+ uid_t *uidlist;
71347+ uid_t curuid;
71348+ int realok = 0;
71349+ int effectiveok = 0;
71350+ int fsok = 0;
71351+ uid_t globalreal, globaleffective, globalfs;
71352+
71353+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71354+ struct user_struct *user;
71355+
71356+ if (!uid_valid(real))
71357+ goto skipit;
71358+
71359+ /* find user based on global namespace */
71360+
71361+ globalreal = GR_GLOBAL_UID(real);
71362+
71363+ user = find_user(make_kuid(&init_user_ns, globalreal));
71364+ if (user == NULL)
71365+ goto skipit;
71366+
71367+ if (gr_process_kernel_setuid_ban(user)) {
71368+ /* for find_user */
71369+ free_uid(user);
71370+ return 1;
71371+ }
71372+
71373+ /* for find_user */
71374+ free_uid(user);
71375+
71376+skipit:
71377+#endif
71378+
71379+ if (unlikely(!(gr_status & GR_READY)))
71380+ return 0;
71381+
71382+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71383+ gr_log_learn_uid_change(real, effective, fs);
71384+
71385+ num = current->acl->user_trans_num;
71386+ uidlist = current->acl->user_transitions;
71387+
71388+ if (uidlist == NULL)
71389+ return 0;
71390+
71391+ if (!uid_valid(real)) {
71392+ realok = 1;
71393+ globalreal = (uid_t)-1;
71394+ } else {
71395+ globalreal = GR_GLOBAL_UID(real);
71396+ }
71397+ if (!uid_valid(effective)) {
71398+ effectiveok = 1;
71399+ globaleffective = (uid_t)-1;
71400+ } else {
71401+ globaleffective = GR_GLOBAL_UID(effective);
71402+ }
71403+ if (!uid_valid(fs)) {
71404+ fsok = 1;
71405+ globalfs = (uid_t)-1;
71406+ } else {
71407+ globalfs = GR_GLOBAL_UID(fs);
71408+ }
71409+
71410+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71411+ for (i = 0; i < num; i++) {
71412+ curuid = uidlist[i];
71413+ if (globalreal == curuid)
71414+ realok = 1;
71415+ if (globaleffective == curuid)
71416+ effectiveok = 1;
71417+ if (globalfs == curuid)
71418+ fsok = 1;
71419+ }
71420+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71421+ for (i = 0; i < num; i++) {
71422+ curuid = uidlist[i];
71423+ if (globalreal == curuid)
71424+ break;
71425+ if (globaleffective == curuid)
71426+ break;
71427+ if (globalfs == curuid)
71428+ break;
71429+ }
71430+ /* not in deny list */
71431+ if (i == num) {
71432+ realok = 1;
71433+ effectiveok = 1;
71434+ fsok = 1;
71435+ }
71436+ }
71437+
71438+ if (realok && effectiveok && fsok)
71439+ return 0;
71440+ else {
71441+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71442+ return 1;
71443+ }
71444+}
71445+
71446+int
71447+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71448+{
71449+ unsigned int i;
71450+ __u16 num;
71451+ gid_t *gidlist;
71452+ gid_t curgid;
71453+ int realok = 0;
71454+ int effectiveok = 0;
71455+ int fsok = 0;
71456+ gid_t globalreal, globaleffective, globalfs;
71457+
71458+ if (unlikely(!(gr_status & GR_READY)))
71459+ return 0;
71460+
71461+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71462+ gr_log_learn_gid_change(real, effective, fs);
71463+
71464+ num = current->acl->group_trans_num;
71465+ gidlist = current->acl->group_transitions;
71466+
71467+ if (gidlist == NULL)
71468+ return 0;
71469+
71470+ if (!gid_valid(real)) {
71471+ realok = 1;
71472+ globalreal = (gid_t)-1;
71473+ } else {
71474+ globalreal = GR_GLOBAL_GID(real);
71475+ }
71476+ if (!gid_valid(effective)) {
71477+ effectiveok = 1;
71478+ globaleffective = (gid_t)-1;
71479+ } else {
71480+ globaleffective = GR_GLOBAL_GID(effective);
71481+ }
71482+ if (!gid_valid(fs)) {
71483+ fsok = 1;
71484+ globalfs = (gid_t)-1;
71485+ } else {
71486+ globalfs = GR_GLOBAL_GID(fs);
71487+ }
71488+
71489+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71490+ for (i = 0; i < num; i++) {
71491+ curgid = gidlist[i];
71492+ if (globalreal == curgid)
71493+ realok = 1;
71494+ if (globaleffective == curgid)
71495+ effectiveok = 1;
71496+ if (globalfs == curgid)
71497+ fsok = 1;
71498+ }
71499+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71500+ for (i = 0; i < num; i++) {
71501+ curgid = gidlist[i];
71502+ if (globalreal == curgid)
71503+ break;
71504+ if (globaleffective == curgid)
71505+ break;
71506+ if (globalfs == curgid)
71507+ break;
71508+ }
71509+ /* not in deny list */
71510+ if (i == num) {
71511+ realok = 1;
71512+ effectiveok = 1;
71513+ fsok = 1;
71514+ }
71515+ }
71516+
71517+ if (realok && effectiveok && fsok)
71518+ return 0;
71519+ else {
71520+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71521+ return 1;
71522+ }
71523+}
71524+
71525+extern int gr_acl_is_capable(const int cap);
71526+
71527+void
71528+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71529+{
71530+ struct acl_role_label *role = task->role;
71531+ struct acl_role_label *origrole = role;
71532+ struct acl_subject_label *subj = NULL;
71533+ struct acl_object_label *obj;
71534+ struct file *filp;
71535+ uid_t uid;
71536+ gid_t gid;
71537+
71538+ if (unlikely(!(gr_status & GR_READY)))
71539+ return;
71540+
71541+ uid = GR_GLOBAL_UID(kuid);
71542+ gid = GR_GLOBAL_GID(kgid);
71543+
71544+ filp = task->exec_file;
71545+
71546+ /* kernel process, we'll give them the kernel role */
71547+ if (unlikely(!filp)) {
71548+ task->role = running_polstate.kernel_role;
71549+ task->acl = running_polstate.kernel_role->root_label;
71550+ return;
71551+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71552+ /* save the current ip at time of role lookup so that the proper
71553+ IP will be learned for role_allowed_ip */
71554+ task->signal->saved_ip = task->signal->curr_ip;
71555+ role = lookup_acl_role_label(task, uid, gid);
71556+ }
71557+
71558+ /* don't change the role if we're not a privileged process */
71559+ if (role && task->role != role &&
71560+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71561+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71562+ return;
71563+
71564+ task->role = role;
71565+
71566+ if (task->inherited) {
71567+ /* if we reached our subject through inheritance, then first see
71568+ if there's a subject of the same name in the new role that has
71569+ an object that would result in the same inherited subject
71570+ */
71571+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71572+ if (subj) {
71573+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71574+ if (!(obj->mode & GR_INHERIT))
71575+ subj = NULL;
71576+ }
71577+
71578+ }
71579+ if (subj == NULL) {
71580+ /* otherwise:
71581+ perform subject lookup in possibly new role
71582+ we can use this result below in the case where role == task->role
71583+ */
71584+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71585+ }
71586+
71587+ /* if we changed uid/gid, but result in the same role
71588+ and are using inheritance, don't lose the inherited subject
71589+ if current subject is other than what normal lookup
71590+ would result in, we arrived via inheritance, don't
71591+ lose subject
71592+ */
71593+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71594+ (subj == task->acl)))
71595+ task->acl = subj;
71596+
71597+ /* leave task->inherited unaffected */
71598+
71599+ task->is_writable = 0;
71600+
71601+ /* ignore additional mmap checks for processes that are writable
71602+ by the default ACL */
71603+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71604+ if (unlikely(obj->mode & GR_WRITE))
71605+ task->is_writable = 1;
71606+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71607+ if (unlikely(obj->mode & GR_WRITE))
71608+ task->is_writable = 1;
71609+
71610+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71611+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71612+#endif
71613+
71614+ gr_set_proc_res(task);
71615+
71616+ return;
71617+}
71618+
71619+int
71620+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71621+ const int unsafe_flags)
71622+{
71623+ struct task_struct *task = current;
71624+ struct acl_subject_label *newacl;
71625+ struct acl_object_label *obj;
71626+ __u32 retmode;
71627+
71628+ if (unlikely(!(gr_status & GR_READY)))
71629+ return 0;
71630+
71631+ newacl = chk_subj_label(dentry, mnt, task->role);
71632+
71633+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71634+ did an exec
71635+ */
71636+ rcu_read_lock();
71637+ read_lock(&tasklist_lock);
71638+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71639+ (task->parent->acl->mode & GR_POVERRIDE))) {
71640+ read_unlock(&tasklist_lock);
71641+ rcu_read_unlock();
71642+ goto skip_check;
71643+ }
71644+ read_unlock(&tasklist_lock);
71645+ rcu_read_unlock();
71646+
71647+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71648+ !(task->role->roletype & GR_ROLE_GOD) &&
71649+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71650+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71651+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71652+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71653+ else
71654+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71655+ return -EACCES;
71656+ }
71657+
71658+skip_check:
71659+
71660+ obj = chk_obj_label(dentry, mnt, task->acl);
71661+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71662+
71663+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71664+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71665+ if (obj->nested)
71666+ task->acl = obj->nested;
71667+ else
71668+ task->acl = newacl;
71669+ task->inherited = 0;
71670+ } else {
71671+ task->inherited = 1;
71672+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71673+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71674+ }
71675+
71676+ task->is_writable = 0;
71677+
71678+ /* ignore additional mmap checks for processes that are writable
71679+ by the default ACL */
71680+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71681+ if (unlikely(obj->mode & GR_WRITE))
71682+ task->is_writable = 1;
71683+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71684+ if (unlikely(obj->mode & GR_WRITE))
71685+ task->is_writable = 1;
71686+
71687+ gr_set_proc_res(task);
71688+
71689+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71690+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71691+#endif
71692+ return 0;
71693+}
71694+
71695+/* always called with valid inodev ptr */
71696+static void
71697+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71698+{
71699+ struct acl_object_label *matchpo;
71700+ struct acl_subject_label *matchps;
71701+ struct acl_subject_label *subj;
71702+ struct acl_role_label *role;
71703+ unsigned int x;
71704+
71705+ FOR_EACH_ROLE_START(role)
71706+ FOR_EACH_SUBJECT_START(role, subj, x)
71707+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71708+ matchpo->mode |= GR_DELETED;
71709+ FOR_EACH_SUBJECT_END(subj,x)
71710+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71711+ /* nested subjects aren't in the role's subj_hash table */
71712+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71713+ matchpo->mode |= GR_DELETED;
71714+ FOR_EACH_NESTED_SUBJECT_END(subj)
71715+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71716+ matchps->mode |= GR_DELETED;
71717+ FOR_EACH_ROLE_END(role)
71718+
71719+ inodev->nentry->deleted = 1;
71720+
71721+ return;
71722+}
71723+
71724+void
71725+gr_handle_delete(const u64 ino, const dev_t dev)
71726+{
71727+ struct inodev_entry *inodev;
71728+
71729+ if (unlikely(!(gr_status & GR_READY)))
71730+ return;
71731+
71732+ write_lock(&gr_inode_lock);
71733+ inodev = lookup_inodev_entry(ino, dev);
71734+ if (inodev != NULL)
71735+ do_handle_delete(inodev, ino, dev);
71736+ write_unlock(&gr_inode_lock);
71737+
71738+ return;
71739+}
71740+
71741+static void
71742+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71743+ const u64 newinode, const dev_t newdevice,
71744+ struct acl_subject_label *subj)
71745+{
71746+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71747+ struct acl_object_label *match;
71748+
71749+ match = subj->obj_hash[index];
71750+
71751+ while (match && (match->inode != oldinode ||
71752+ match->device != olddevice ||
71753+ !(match->mode & GR_DELETED)))
71754+ match = match->next;
71755+
71756+ if (match && (match->inode == oldinode)
71757+ && (match->device == olddevice)
71758+ && (match->mode & GR_DELETED)) {
71759+ if (match->prev == NULL) {
71760+ subj->obj_hash[index] = match->next;
71761+ if (match->next != NULL)
71762+ match->next->prev = NULL;
71763+ } else {
71764+ match->prev->next = match->next;
71765+ if (match->next != NULL)
71766+ match->next->prev = match->prev;
71767+ }
71768+ match->prev = NULL;
71769+ match->next = NULL;
71770+ match->inode = newinode;
71771+ match->device = newdevice;
71772+ match->mode &= ~GR_DELETED;
71773+
71774+ insert_acl_obj_label(match, subj);
71775+ }
71776+
71777+ return;
71778+}
71779+
71780+static void
71781+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71782+ const u64 newinode, const dev_t newdevice,
71783+ struct acl_role_label *role)
71784+{
71785+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71786+ struct acl_subject_label *match;
71787+
71788+ match = role->subj_hash[index];
71789+
71790+ while (match && (match->inode != oldinode ||
71791+ match->device != olddevice ||
71792+ !(match->mode & GR_DELETED)))
71793+ match = match->next;
71794+
71795+ if (match && (match->inode == oldinode)
71796+ && (match->device == olddevice)
71797+ && (match->mode & GR_DELETED)) {
71798+ if (match->prev == NULL) {
71799+ role->subj_hash[index] = match->next;
71800+ if (match->next != NULL)
71801+ match->next->prev = NULL;
71802+ } else {
71803+ match->prev->next = match->next;
71804+ if (match->next != NULL)
71805+ match->next->prev = match->prev;
71806+ }
71807+ match->prev = NULL;
71808+ match->next = NULL;
71809+ match->inode = newinode;
71810+ match->device = newdevice;
71811+ match->mode &= ~GR_DELETED;
71812+
71813+ insert_acl_subj_label(match, role);
71814+ }
71815+
71816+ return;
71817+}
71818+
71819+static void
71820+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71821+ const u64 newinode, const dev_t newdevice)
71822+{
71823+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71824+ struct inodev_entry *match;
71825+
71826+ match = running_polstate.inodev_set.i_hash[index];
71827+
71828+ while (match && (match->nentry->inode != oldinode ||
71829+ match->nentry->device != olddevice || !match->nentry->deleted))
71830+ match = match->next;
71831+
71832+ if (match && (match->nentry->inode == oldinode)
71833+ && (match->nentry->device == olddevice) &&
71834+ match->nentry->deleted) {
71835+ if (match->prev == NULL) {
71836+ running_polstate.inodev_set.i_hash[index] = match->next;
71837+ if (match->next != NULL)
71838+ match->next->prev = NULL;
71839+ } else {
71840+ match->prev->next = match->next;
71841+ if (match->next != NULL)
71842+ match->next->prev = match->prev;
71843+ }
71844+ match->prev = NULL;
71845+ match->next = NULL;
71846+ match->nentry->inode = newinode;
71847+ match->nentry->device = newdevice;
71848+ match->nentry->deleted = 0;
71849+
71850+ insert_inodev_entry(match);
71851+ }
71852+
71853+ return;
71854+}
71855+
71856+static void
71857+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71858+{
71859+ struct acl_subject_label *subj;
71860+ struct acl_role_label *role;
71861+ unsigned int x;
71862+
71863+ FOR_EACH_ROLE_START(role)
71864+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71865+
71866+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71867+ if ((subj->inode == ino) && (subj->device == dev)) {
71868+ subj->inode = ino;
71869+ subj->device = dev;
71870+ }
71871+ /* nested subjects aren't in the role's subj_hash table */
71872+ update_acl_obj_label(matchn->inode, matchn->device,
71873+ ino, dev, subj);
71874+ FOR_EACH_NESTED_SUBJECT_END(subj)
71875+ FOR_EACH_SUBJECT_START(role, subj, x)
71876+ update_acl_obj_label(matchn->inode, matchn->device,
71877+ ino, dev, subj);
71878+ FOR_EACH_SUBJECT_END(subj,x)
71879+ FOR_EACH_ROLE_END(role)
71880+
71881+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71882+
71883+ return;
71884+}
71885+
71886+static void
71887+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71888+ const struct vfsmount *mnt)
71889+{
71890+ u64 ino = __get_ino(dentry);
71891+ dev_t dev = __get_dev(dentry);
71892+
71893+ __do_handle_create(matchn, ino, dev);
71894+
71895+ return;
71896+}
71897+
71898+void
71899+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71900+{
71901+ struct name_entry *matchn;
71902+
71903+ if (unlikely(!(gr_status & GR_READY)))
71904+ return;
71905+
71906+ preempt_disable();
71907+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71908+
71909+ if (unlikely((unsigned long)matchn)) {
71910+ write_lock(&gr_inode_lock);
71911+ do_handle_create(matchn, dentry, mnt);
71912+ write_unlock(&gr_inode_lock);
71913+ }
71914+ preempt_enable();
71915+
71916+ return;
71917+}
71918+
71919+void
71920+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71921+{
71922+ struct name_entry *matchn;
71923+
71924+ if (unlikely(!(gr_status & GR_READY)))
71925+ return;
71926+
71927+ preempt_disable();
71928+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71929+
71930+ if (unlikely((unsigned long)matchn)) {
71931+ write_lock(&gr_inode_lock);
71932+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71933+ write_unlock(&gr_inode_lock);
71934+ }
71935+ preempt_enable();
71936+
71937+ return;
71938+}
71939+
71940+void
71941+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71942+ struct dentry *old_dentry,
71943+ struct dentry *new_dentry,
71944+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71945+{
71946+ struct name_entry *matchn;
71947+ struct name_entry *matchn2 = NULL;
71948+ struct inodev_entry *inodev;
71949+ struct inode *inode = new_dentry->d_inode;
71950+ u64 old_ino = __get_ino(old_dentry);
71951+ dev_t old_dev = __get_dev(old_dentry);
71952+ unsigned int exchange = flags & RENAME_EXCHANGE;
71953+
71954+ /* vfs_rename swaps the name and parent link for old_dentry and
71955+ new_dentry
71956+ at this point, old_dentry has the new name, parent link, and inode
71957+ for the renamed file
71958+ if a file is being replaced by a rename, new_dentry has the inode
71959+ and name for the replaced file
71960+ */
71961+
71962+ if (unlikely(!(gr_status & GR_READY)))
71963+ return;
71964+
71965+ preempt_disable();
71966+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71967+
71968+ /* exchange cases:
71969+ a filename exists for the source, but not dest
71970+ do a recreate on source
71971+ a filename exists for the dest, but not source
71972+ do a recreate on dest
71973+ a filename exists for both source and dest
71974+ delete source and dest, then create source and dest
71975+ a filename exists for neither source nor dest
71976+ no updates needed
71977+
71978+ the name entry lookups get us the old inode/dev associated with
71979+ each name, so do the deletes first (if possible) so that when
71980+ we do the create, we pick up on the right entries
71981+ */
71982+
71983+ if (exchange)
71984+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71985+
71986+ /* we wouldn't have to check d_inode if it weren't for
71987+ NFS silly-renaming
71988+ */
71989+
71990+ write_lock(&gr_inode_lock);
71991+ if (unlikely((replace || exchange) && inode)) {
71992+ u64 new_ino = __get_ino(new_dentry);
71993+ dev_t new_dev = __get_dev(new_dentry);
71994+
71995+ inodev = lookup_inodev_entry(new_ino, new_dev);
71996+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71997+ do_handle_delete(inodev, new_ino, new_dev);
71998+ }
71999+
72000+ inodev = lookup_inodev_entry(old_ino, old_dev);
72001+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72002+ do_handle_delete(inodev, old_ino, old_dev);
72003+
72004+ if (unlikely(matchn != NULL))
72005+ do_handle_create(matchn, old_dentry, mnt);
72006+
72007+ if (unlikely(matchn2 != NULL))
72008+ do_handle_create(matchn2, new_dentry, mnt);
72009+
72010+ write_unlock(&gr_inode_lock);
72011+ preempt_enable();
72012+
72013+ return;
72014+}
72015+
72016+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72017+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72018+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72019+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72020+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72021+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72022+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72023+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72024+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72025+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72026+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72027+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72028+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72029+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72030+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72031+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72032+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72033+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72034+};
72035+
72036+void
72037+gr_learn_resource(const struct task_struct *task,
72038+ const int res, const unsigned long wanted, const int gt)
72039+{
72040+ struct acl_subject_label *acl;
72041+ const struct cred *cred;
72042+
72043+ if (unlikely((gr_status & GR_READY) &&
72044+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72045+ goto skip_reslog;
72046+
72047+ gr_log_resource(task, res, wanted, gt);
72048+skip_reslog:
72049+
72050+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72051+ return;
72052+
72053+ acl = task->acl;
72054+
72055+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72056+ !(acl->resmask & (1U << (unsigned short) res))))
72057+ return;
72058+
72059+ if (wanted >= acl->res[res].rlim_cur) {
72060+ unsigned long res_add;
72061+
72062+ res_add = wanted + res_learn_bumps[res];
72063+
72064+ acl->res[res].rlim_cur = res_add;
72065+
72066+ if (wanted > acl->res[res].rlim_max)
72067+ acl->res[res].rlim_max = res_add;
72068+
72069+ /* only log the subject filename, since resource logging is supported for
72070+ single-subject learning only */
72071+ rcu_read_lock();
72072+ cred = __task_cred(task);
72073+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72074+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72075+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72076+ "", (unsigned long) res, &task->signal->saved_ip);
72077+ rcu_read_unlock();
72078+ }
72079+
72080+ return;
72081+}
72082+EXPORT_SYMBOL_GPL(gr_learn_resource);
72083+#endif
72084+
72085+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72086+void
72087+pax_set_initial_flags(struct linux_binprm *bprm)
72088+{
72089+ struct task_struct *task = current;
72090+ struct acl_subject_label *proc;
72091+ unsigned long flags;
72092+
72093+ if (unlikely(!(gr_status & GR_READY)))
72094+ return;
72095+
72096+ flags = pax_get_flags(task);
72097+
72098+ proc = task->acl;
72099+
72100+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72101+ flags &= ~MF_PAX_PAGEEXEC;
72102+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72103+ flags &= ~MF_PAX_SEGMEXEC;
72104+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72105+ flags &= ~MF_PAX_RANDMMAP;
72106+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72107+ flags &= ~MF_PAX_EMUTRAMP;
72108+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72109+ flags &= ~MF_PAX_MPROTECT;
72110+
72111+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72112+ flags |= MF_PAX_PAGEEXEC;
72113+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72114+ flags |= MF_PAX_SEGMEXEC;
72115+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72116+ flags |= MF_PAX_RANDMMAP;
72117+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72118+ flags |= MF_PAX_EMUTRAMP;
72119+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72120+ flags |= MF_PAX_MPROTECT;
72121+
72122+ pax_set_flags(task, flags);
72123+
72124+ return;
72125+}
72126+#endif
72127+
72128+int
72129+gr_handle_proc_ptrace(struct task_struct *task)
72130+{
72131+ struct file *filp;
72132+ struct task_struct *tmp = task;
72133+ struct task_struct *curtemp = current;
72134+ __u32 retmode;
72135+
72136+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72137+ if (unlikely(!(gr_status & GR_READY)))
72138+ return 0;
72139+#endif
72140+
72141+ read_lock(&tasklist_lock);
72142+ read_lock(&grsec_exec_file_lock);
72143+ filp = task->exec_file;
72144+
72145+ while (task_pid_nr(tmp) > 0) {
72146+ if (tmp == curtemp)
72147+ break;
72148+ tmp = tmp->real_parent;
72149+ }
72150+
72151+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72152+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72153+ read_unlock(&grsec_exec_file_lock);
72154+ read_unlock(&tasklist_lock);
72155+ return 1;
72156+ }
72157+
72158+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72159+ if (!(gr_status & GR_READY)) {
72160+ read_unlock(&grsec_exec_file_lock);
72161+ read_unlock(&tasklist_lock);
72162+ return 0;
72163+ }
72164+#endif
72165+
72166+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72167+ read_unlock(&grsec_exec_file_lock);
72168+ read_unlock(&tasklist_lock);
72169+
72170+ if (retmode & GR_NOPTRACE)
72171+ return 1;
72172+
72173+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72174+ && (current->acl != task->acl || (current->acl != current->role->root_label
72175+ && task_pid_nr(current) != task_pid_nr(task))))
72176+ return 1;
72177+
72178+ return 0;
72179+}
72180+
72181+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72182+{
72183+ if (unlikely(!(gr_status & GR_READY)))
72184+ return;
72185+
72186+ if (!(current->role->roletype & GR_ROLE_GOD))
72187+ return;
72188+
72189+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72190+ p->role->rolename, gr_task_roletype_to_char(p),
72191+ p->acl->filename);
72192+}
72193+
72194+int
72195+gr_handle_ptrace(struct task_struct *task, const long request)
72196+{
72197+ struct task_struct *tmp = task;
72198+ struct task_struct *curtemp = current;
72199+ __u32 retmode;
72200+
72201+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72202+ if (unlikely(!(gr_status & GR_READY)))
72203+ return 0;
72204+#endif
72205+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72206+ read_lock(&tasklist_lock);
72207+ while (task_pid_nr(tmp) > 0) {
72208+ if (tmp == curtemp)
72209+ break;
72210+ tmp = tmp->real_parent;
72211+ }
72212+
72213+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72214+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72215+ read_unlock(&tasklist_lock);
72216+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72217+ return 1;
72218+ }
72219+ read_unlock(&tasklist_lock);
72220+ }
72221+
72222+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72223+ if (!(gr_status & GR_READY))
72224+ return 0;
72225+#endif
72226+
72227+ read_lock(&grsec_exec_file_lock);
72228+ if (unlikely(!task->exec_file)) {
72229+ read_unlock(&grsec_exec_file_lock);
72230+ return 0;
72231+ }
72232+
72233+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72234+ read_unlock(&grsec_exec_file_lock);
72235+
72236+ if (retmode & GR_NOPTRACE) {
72237+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72238+ return 1;
72239+ }
72240+
72241+ if (retmode & GR_PTRACERD) {
72242+ switch (request) {
72243+ case PTRACE_SEIZE:
72244+ case PTRACE_POKETEXT:
72245+ case PTRACE_POKEDATA:
72246+ case PTRACE_POKEUSR:
72247+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72248+ case PTRACE_SETREGS:
72249+ case PTRACE_SETFPREGS:
72250+#endif
72251+#ifdef CONFIG_X86
72252+ case PTRACE_SETFPXREGS:
72253+#endif
72254+#ifdef CONFIG_ALTIVEC
72255+ case PTRACE_SETVRREGS:
72256+#endif
72257+ return 1;
72258+ default:
72259+ return 0;
72260+ }
72261+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72262+ !(current->role->roletype & GR_ROLE_GOD) &&
72263+ (current->acl != task->acl)) {
72264+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72265+ return 1;
72266+ }
72267+
72268+ return 0;
72269+}
72270+
72271+static int is_writable_mmap(const struct file *filp)
72272+{
72273+ struct task_struct *task = current;
72274+ struct acl_object_label *obj, *obj2;
72275+
72276+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
72277+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
72278+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72279+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
72280+ task->role->root_label);
72281+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
72282+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
72283+ return 1;
72284+ }
72285+ }
72286+ return 0;
72287+}
72288+
72289+int
72290+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
72291+{
72292+ __u32 mode;
72293+
72294+ if (unlikely(!file || !(prot & PROT_EXEC)))
72295+ return 1;
72296+
72297+ if (is_writable_mmap(file))
72298+ return 0;
72299+
72300+ mode =
72301+ gr_search_file(file->f_path.dentry,
72302+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72303+ file->f_path.mnt);
72304+
72305+ if (!gr_tpe_allow(file))
72306+ return 0;
72307+
72308+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72309+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72310+ return 0;
72311+ } else if (unlikely(!(mode & GR_EXEC))) {
72312+ return 0;
72313+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72314+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72315+ return 1;
72316+ }
72317+
72318+ return 1;
72319+}
72320+
72321+int
72322+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72323+{
72324+ __u32 mode;
72325+
72326+ if (unlikely(!file || !(prot & PROT_EXEC)))
72327+ return 1;
72328+
72329+ if (is_writable_mmap(file))
72330+ return 0;
72331+
72332+ mode =
72333+ gr_search_file(file->f_path.dentry,
72334+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72335+ file->f_path.mnt);
72336+
72337+ if (!gr_tpe_allow(file))
72338+ return 0;
72339+
72340+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72341+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72342+ return 0;
72343+ } else if (unlikely(!(mode & GR_EXEC))) {
72344+ return 0;
72345+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72346+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72347+ return 1;
72348+ }
72349+
72350+ return 1;
72351+}
72352+
72353+void
72354+gr_acl_handle_psacct(struct task_struct *task, const long code)
72355+{
72356+ unsigned long runtime, cputime;
72357+ cputime_t utime, stime;
72358+ unsigned int wday, cday;
72359+ __u8 whr, chr;
72360+ __u8 wmin, cmin;
72361+ __u8 wsec, csec;
72362+ struct timespec curtime, starttime;
72363+
72364+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72365+ !(task->acl->mode & GR_PROCACCT)))
72366+ return;
72367+
72368+ curtime = ns_to_timespec(ktime_get_ns());
72369+ starttime = ns_to_timespec(task->start_time);
72370+ runtime = curtime.tv_sec - starttime.tv_sec;
72371+ wday = runtime / (60 * 60 * 24);
72372+ runtime -= wday * (60 * 60 * 24);
72373+ whr = runtime / (60 * 60);
72374+ runtime -= whr * (60 * 60);
72375+ wmin = runtime / 60;
72376+ runtime -= wmin * 60;
72377+ wsec = runtime;
72378+
72379+ task_cputime(task, &utime, &stime);
72380+ cputime = cputime_to_secs(utime + stime);
72381+ cday = cputime / (60 * 60 * 24);
72382+ cputime -= cday * (60 * 60 * 24);
72383+ chr = cputime / (60 * 60);
72384+ cputime -= chr * (60 * 60);
72385+ cmin = cputime / 60;
72386+ cputime -= cmin * 60;
72387+ csec = cputime;
72388+
72389+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72390+
72391+ return;
72392+}
72393+
72394+#ifdef CONFIG_TASKSTATS
72395+int gr_is_taskstats_denied(int pid)
72396+{
72397+ struct task_struct *task;
72398+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72399+ const struct cred *cred;
72400+#endif
72401+ int ret = 0;
72402+
72403+ /* restrict taskstats viewing to un-chrooted root users
72404+ who have the 'view' subject flag if the RBAC system is enabled
72405+ */
72406+
72407+ rcu_read_lock();
72408+ read_lock(&tasklist_lock);
72409+ task = find_task_by_vpid(pid);
72410+ if (task) {
72411+#ifdef CONFIG_GRKERNSEC_CHROOT
72412+ if (proc_is_chrooted(task))
72413+ ret = -EACCES;
72414+#endif
72415+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72416+ cred = __task_cred(task);
72417+#ifdef CONFIG_GRKERNSEC_PROC_USER
72418+ if (gr_is_global_nonroot(cred->uid))
72419+ ret = -EACCES;
72420+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72421+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72422+ ret = -EACCES;
72423+#endif
72424+#endif
72425+ if (gr_status & GR_READY) {
72426+ if (!(task->acl->mode & GR_VIEW))
72427+ ret = -EACCES;
72428+ }
72429+ } else
72430+ ret = -ENOENT;
72431+
72432+ read_unlock(&tasklist_lock);
72433+ rcu_read_unlock();
72434+
72435+ return ret;
72436+}
72437+#endif
72438+
72439+/* AUXV entries are filled via a descendant of search_binary_handler
72440+ after we've already applied the subject for the target
72441+*/
72442+int gr_acl_enable_at_secure(void)
72443+{
72444+ if (unlikely(!(gr_status & GR_READY)))
72445+ return 0;
72446+
72447+ if (current->acl->mode & GR_ATSECURE)
72448+ return 1;
72449+
72450+ return 0;
72451+}
72452+
72453+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
72454+{
72455+ struct task_struct *task = current;
72456+ struct dentry *dentry = file->f_path.dentry;
72457+ struct vfsmount *mnt = file->f_path.mnt;
72458+ struct acl_object_label *obj, *tmp;
72459+ struct acl_subject_label *subj;
72460+ unsigned int bufsize;
72461+ int is_not_root;
72462+ char *path;
72463+ dev_t dev = __get_dev(dentry);
72464+
72465+ if (unlikely(!(gr_status & GR_READY)))
72466+ return 1;
72467+
72468+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72469+ return 1;
72470+
72471+ /* ignore Eric Biederman */
72472+ if (IS_PRIVATE(dentry->d_inode))
72473+ return 1;
72474+
72475+ subj = task->acl;
72476+ read_lock(&gr_inode_lock);
72477+ do {
72478+ obj = lookup_acl_obj_label(ino, dev, subj);
72479+ if (obj != NULL) {
72480+ read_unlock(&gr_inode_lock);
72481+ return (obj->mode & GR_FIND) ? 1 : 0;
72482+ }
72483+ } while ((subj = subj->parent_subject));
72484+ read_unlock(&gr_inode_lock);
72485+
72486+ /* this is purely an optimization since we're looking for an object
72487+ for the directory we're doing a readdir on
72488+ if it's possible for any globbed object to match the entry we're
72489+ filling into the directory, then the object we find here will be
72490+ an anchor point with attached globbed objects
72491+ */
72492+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72493+ if (obj->globbed == NULL)
72494+ return (obj->mode & GR_FIND) ? 1 : 0;
72495+
72496+ is_not_root = ((obj->filename[0] == '/') &&
72497+ (obj->filename[1] == '\0')) ? 0 : 1;
72498+ bufsize = PAGE_SIZE - namelen - is_not_root;
72499+
72500+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72501+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72502+ return 1;
72503+
72504+ preempt_disable();
72505+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72506+ bufsize);
72507+
72508+ bufsize = strlen(path);
72509+
72510+ /* if base is "/", don't append an additional slash */
72511+ if (is_not_root)
72512+ *(path + bufsize) = '/';
72513+ memcpy(path + bufsize + is_not_root, name, namelen);
72514+ *(path + bufsize + namelen + is_not_root) = '\0';
72515+
72516+ tmp = obj->globbed;
72517+ while (tmp) {
72518+ if (!glob_match(tmp->filename, path)) {
72519+ preempt_enable();
72520+ return (tmp->mode & GR_FIND) ? 1 : 0;
72521+ }
72522+ tmp = tmp->next;
72523+ }
72524+ preempt_enable();
72525+ return (obj->mode & GR_FIND) ? 1 : 0;
72526+}
72527+
72528+void gr_put_exec_file(struct task_struct *task)
72529+{
72530+ struct file *filp;
72531+
72532+ write_lock(&grsec_exec_file_lock);
72533+ filp = task->exec_file;
72534+ task->exec_file = NULL;
72535+ write_unlock(&grsec_exec_file_lock);
72536+
72537+ if (filp)
72538+ fput(filp);
72539+
72540+ return;
72541+}
72542+
72543+
72544+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72545+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72546+#endif
72547+#ifdef CONFIG_SECURITY
72548+EXPORT_SYMBOL_GPL(gr_check_user_change);
72549+EXPORT_SYMBOL_GPL(gr_check_group_change);
72550+#endif
72551+
72552diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72553new file mode 100644
72554index 0000000..18ffbbd
72555--- /dev/null
72556+++ b/grsecurity/gracl_alloc.c
72557@@ -0,0 +1,105 @@
72558+#include <linux/kernel.h>
72559+#include <linux/mm.h>
72560+#include <linux/slab.h>
72561+#include <linux/vmalloc.h>
72562+#include <linux/gracl.h>
72563+#include <linux/grsecurity.h>
72564+
72565+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72566+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72567+
72568+static __inline__ int
72569+alloc_pop(void)
72570+{
72571+ if (current_alloc_state->alloc_stack_next == 1)
72572+ return 0;
72573+
72574+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72575+
72576+ current_alloc_state->alloc_stack_next--;
72577+
72578+ return 1;
72579+}
72580+
72581+static __inline__ int
72582+alloc_push(void *buf)
72583+{
72584+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72585+ return 1;
72586+
72587+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72588+
72589+ current_alloc_state->alloc_stack_next++;
72590+
72591+ return 0;
72592+}
72593+
72594+void *
72595+acl_alloc(unsigned long len)
72596+{
72597+ void *ret = NULL;
72598+
72599+ if (!len || len > PAGE_SIZE)
72600+ goto out;
72601+
72602+ ret = kmalloc(len, GFP_KERNEL);
72603+
72604+ if (ret) {
72605+ if (alloc_push(ret)) {
72606+ kfree(ret);
72607+ ret = NULL;
72608+ }
72609+ }
72610+
72611+out:
72612+ return ret;
72613+}
72614+
72615+void *
72616+acl_alloc_num(unsigned long num, unsigned long len)
72617+{
72618+ if (!len || (num > (PAGE_SIZE / len)))
72619+ return NULL;
72620+
72621+ return acl_alloc(num * len);
72622+}
72623+
72624+void
72625+acl_free_all(void)
72626+{
72627+ if (!current_alloc_state->alloc_stack)
72628+ return;
72629+
72630+ while (alloc_pop()) ;
72631+
72632+ if (current_alloc_state->alloc_stack) {
72633+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72634+ kfree(current_alloc_state->alloc_stack);
72635+ else
72636+ vfree(current_alloc_state->alloc_stack);
72637+ }
72638+
72639+ current_alloc_state->alloc_stack = NULL;
72640+ current_alloc_state->alloc_stack_size = 1;
72641+ current_alloc_state->alloc_stack_next = 1;
72642+
72643+ return;
72644+}
72645+
72646+int
72647+acl_alloc_stack_init(unsigned long size)
72648+{
72649+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72650+ current_alloc_state->alloc_stack =
72651+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72652+ else
72653+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72654+
72655+ current_alloc_state->alloc_stack_size = size;
72656+ current_alloc_state->alloc_stack_next = 1;
72657+
72658+ if (!current_alloc_state->alloc_stack)
72659+ return 0;
72660+ else
72661+ return 1;
72662+}
72663diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72664new file mode 100644
72665index 0000000..1a94c11
72666--- /dev/null
72667+++ b/grsecurity/gracl_cap.c
72668@@ -0,0 +1,127 @@
72669+#include <linux/kernel.h>
72670+#include <linux/module.h>
72671+#include <linux/sched.h>
72672+#include <linux/gracl.h>
72673+#include <linux/grsecurity.h>
72674+#include <linux/grinternal.h>
72675+
72676+extern const char *captab_log[];
72677+extern int captab_log_entries;
72678+
72679+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72680+{
72681+ struct acl_subject_label *curracl;
72682+
72683+ if (!gr_acl_is_enabled())
72684+ return 1;
72685+
72686+ curracl = task->acl;
72687+
72688+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72689+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72690+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72691+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72692+ gr_to_filename(task->exec_file->f_path.dentry,
72693+ task->exec_file->f_path.mnt) : curracl->filename,
72694+ curracl->filename, 0UL,
72695+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72696+ return 1;
72697+ }
72698+
72699+ return 0;
72700+}
72701+
72702+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72703+{
72704+ struct acl_subject_label *curracl;
72705+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72706+ kernel_cap_t cap_audit = __cap_empty_set;
72707+
72708+ if (!gr_acl_is_enabled())
72709+ return 1;
72710+
72711+ curracl = task->acl;
72712+
72713+ cap_drop = curracl->cap_lower;
72714+ cap_mask = curracl->cap_mask;
72715+ cap_audit = curracl->cap_invert_audit;
72716+
72717+ while ((curracl = curracl->parent_subject)) {
72718+ /* if the cap isn't specified in the current computed mask but is specified in the
72719+ current level subject, and is lowered in the current level subject, then add
72720+ it to the set of dropped capabilities
72721+ otherwise, add the current level subject's mask to the current computed mask
72722+ */
72723+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72724+ cap_raise(cap_mask, cap);
72725+ if (cap_raised(curracl->cap_lower, cap))
72726+ cap_raise(cap_drop, cap);
72727+ if (cap_raised(curracl->cap_invert_audit, cap))
72728+ cap_raise(cap_audit, cap);
72729+ }
72730+ }
72731+
72732+ if (!cap_raised(cap_drop, cap)) {
72733+ if (cap_raised(cap_audit, cap))
72734+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72735+ return 1;
72736+ }
72737+
72738+ /* only learn the capability use if the process has the capability in the
72739+ general case, the two uses in sys.c of gr_learn_cap are an exception
72740+ to this rule to ensure any role transition involves what the full-learned
72741+ policy believes in a privileged process
72742+ */
72743+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72744+ return 1;
72745+
72746+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72747+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72748+
72749+ return 0;
72750+}
72751+
72752+int
72753+gr_acl_is_capable(const int cap)
72754+{
72755+ return gr_task_acl_is_capable(current, current_cred(), cap);
72756+}
72757+
72758+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72759+{
72760+ struct acl_subject_label *curracl;
72761+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72762+
72763+ if (!gr_acl_is_enabled())
72764+ return 1;
72765+
72766+ curracl = task->acl;
72767+
72768+ cap_drop = curracl->cap_lower;
72769+ cap_mask = curracl->cap_mask;
72770+
72771+ while ((curracl = curracl->parent_subject)) {
72772+ /* if the cap isn't specified in the current computed mask but is specified in the
72773+ current level subject, and is lowered in the current level subject, then add
72774+ it to the set of dropped capabilities
72775+ otherwise, add the current level subject's mask to the current computed mask
72776+ */
72777+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72778+ cap_raise(cap_mask, cap);
72779+ if (cap_raised(curracl->cap_lower, cap))
72780+ cap_raise(cap_drop, cap);
72781+ }
72782+ }
72783+
72784+ if (!cap_raised(cap_drop, cap))
72785+ return 1;
72786+
72787+ return 0;
72788+}
72789+
72790+int
72791+gr_acl_is_capable_nolog(const int cap)
72792+{
72793+ return gr_task_acl_is_capable_nolog(current, cap);
72794+}
72795+
72796diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72797new file mode 100644
72798index 0000000..a43dd06
72799--- /dev/null
72800+++ b/grsecurity/gracl_compat.c
72801@@ -0,0 +1,269 @@
72802+#include <linux/kernel.h>
72803+#include <linux/gracl.h>
72804+#include <linux/compat.h>
72805+#include <linux/gracl_compat.h>
72806+
72807+#include <asm/uaccess.h>
72808+
72809+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72810+{
72811+ struct gr_arg_wrapper_compat uwrapcompat;
72812+
72813+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72814+ return -EFAULT;
72815+
72816+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72817+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72818+ return -EINVAL;
72819+
72820+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72821+ uwrap->version = uwrapcompat.version;
72822+ uwrap->size = sizeof(struct gr_arg);
72823+
72824+ return 0;
72825+}
72826+
72827+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72828+{
72829+ struct gr_arg_compat argcompat;
72830+
72831+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72832+ return -EFAULT;
72833+
72834+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72835+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72836+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72837+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72838+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72839+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72840+
72841+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72842+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72843+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72844+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72845+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72846+ arg->segv_device = argcompat.segv_device;
72847+ arg->segv_inode = argcompat.segv_inode;
72848+ arg->segv_uid = argcompat.segv_uid;
72849+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72850+ arg->mode = argcompat.mode;
72851+
72852+ return 0;
72853+}
72854+
72855+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72856+{
72857+ struct acl_object_label_compat objcompat;
72858+
72859+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72860+ return -EFAULT;
72861+
72862+ obj->filename = compat_ptr(objcompat.filename);
72863+ obj->inode = objcompat.inode;
72864+ obj->device = objcompat.device;
72865+ obj->mode = objcompat.mode;
72866+
72867+ obj->nested = compat_ptr(objcompat.nested);
72868+ obj->globbed = compat_ptr(objcompat.globbed);
72869+
72870+ obj->prev = compat_ptr(objcompat.prev);
72871+ obj->next = compat_ptr(objcompat.next);
72872+
72873+ return 0;
72874+}
72875+
72876+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72877+{
72878+ unsigned int i;
72879+ struct acl_subject_label_compat subjcompat;
72880+
72881+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72882+ return -EFAULT;
72883+
72884+ subj->filename = compat_ptr(subjcompat.filename);
72885+ subj->inode = subjcompat.inode;
72886+ subj->device = subjcompat.device;
72887+ subj->mode = subjcompat.mode;
72888+ subj->cap_mask = subjcompat.cap_mask;
72889+ subj->cap_lower = subjcompat.cap_lower;
72890+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72891+
72892+ for (i = 0; i < GR_NLIMITS; i++) {
72893+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72894+ subj->res[i].rlim_cur = RLIM_INFINITY;
72895+ else
72896+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72897+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72898+ subj->res[i].rlim_max = RLIM_INFINITY;
72899+ else
72900+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72901+ }
72902+ subj->resmask = subjcompat.resmask;
72903+
72904+ subj->user_trans_type = subjcompat.user_trans_type;
72905+ subj->group_trans_type = subjcompat.group_trans_type;
72906+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72907+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72908+ subj->user_trans_num = subjcompat.user_trans_num;
72909+ subj->group_trans_num = subjcompat.group_trans_num;
72910+
72911+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72912+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72913+ subj->ip_type = subjcompat.ip_type;
72914+ subj->ips = compat_ptr(subjcompat.ips);
72915+ subj->ip_num = subjcompat.ip_num;
72916+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72917+
72918+ subj->crashes = subjcompat.crashes;
72919+ subj->expires = subjcompat.expires;
72920+
72921+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72922+ subj->hash = compat_ptr(subjcompat.hash);
72923+ subj->prev = compat_ptr(subjcompat.prev);
72924+ subj->next = compat_ptr(subjcompat.next);
72925+
72926+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72927+ subj->obj_hash_size = subjcompat.obj_hash_size;
72928+ subj->pax_flags = subjcompat.pax_flags;
72929+
72930+ return 0;
72931+}
72932+
72933+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72934+{
72935+ struct acl_role_label_compat rolecompat;
72936+
72937+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72938+ return -EFAULT;
72939+
72940+ role->rolename = compat_ptr(rolecompat.rolename);
72941+ role->uidgid = rolecompat.uidgid;
72942+ role->roletype = rolecompat.roletype;
72943+
72944+ role->auth_attempts = rolecompat.auth_attempts;
72945+ role->expires = rolecompat.expires;
72946+
72947+ role->root_label = compat_ptr(rolecompat.root_label);
72948+ role->hash = compat_ptr(rolecompat.hash);
72949+
72950+ role->prev = compat_ptr(rolecompat.prev);
72951+ role->next = compat_ptr(rolecompat.next);
72952+
72953+ role->transitions = compat_ptr(rolecompat.transitions);
72954+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72955+ role->domain_children = compat_ptr(rolecompat.domain_children);
72956+ role->domain_child_num = rolecompat.domain_child_num;
72957+
72958+ role->umask = rolecompat.umask;
72959+
72960+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72961+ role->subj_hash_size = rolecompat.subj_hash_size;
72962+
72963+ return 0;
72964+}
72965+
72966+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72967+{
72968+ struct role_allowed_ip_compat roleip_compat;
72969+
72970+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72971+ return -EFAULT;
72972+
72973+ roleip->addr = roleip_compat.addr;
72974+ roleip->netmask = roleip_compat.netmask;
72975+
72976+ roleip->prev = compat_ptr(roleip_compat.prev);
72977+ roleip->next = compat_ptr(roleip_compat.next);
72978+
72979+ return 0;
72980+}
72981+
72982+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72983+{
72984+ struct role_transition_compat trans_compat;
72985+
72986+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72987+ return -EFAULT;
72988+
72989+ trans->rolename = compat_ptr(trans_compat.rolename);
72990+
72991+ trans->prev = compat_ptr(trans_compat.prev);
72992+ trans->next = compat_ptr(trans_compat.next);
72993+
72994+ return 0;
72995+
72996+}
72997+
72998+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72999+{
73000+ struct gr_hash_struct_compat hash_compat;
73001+
73002+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73003+ return -EFAULT;
73004+
73005+ hash->table = compat_ptr(hash_compat.table);
73006+ hash->nametable = compat_ptr(hash_compat.nametable);
73007+ hash->first = compat_ptr(hash_compat.first);
73008+
73009+ hash->table_size = hash_compat.table_size;
73010+ hash->used_size = hash_compat.used_size;
73011+
73012+ hash->type = hash_compat.type;
73013+
73014+ return 0;
73015+}
73016+
73017+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73018+{
73019+ compat_uptr_t ptrcompat;
73020+
73021+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73022+ return -EFAULT;
73023+
73024+ *(void **)ptr = compat_ptr(ptrcompat);
73025+
73026+ return 0;
73027+}
73028+
73029+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73030+{
73031+ struct acl_ip_label_compat ip_compat;
73032+
73033+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73034+ return -EFAULT;
73035+
73036+ ip->iface = compat_ptr(ip_compat.iface);
73037+ ip->addr = ip_compat.addr;
73038+ ip->netmask = ip_compat.netmask;
73039+ ip->low = ip_compat.low;
73040+ ip->high = ip_compat.high;
73041+ ip->mode = ip_compat.mode;
73042+ ip->type = ip_compat.type;
73043+
73044+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73045+
73046+ ip->prev = compat_ptr(ip_compat.prev);
73047+ ip->next = compat_ptr(ip_compat.next);
73048+
73049+ return 0;
73050+}
73051+
73052+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73053+{
73054+ struct sprole_pw_compat pw_compat;
73055+
73056+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73057+ return -EFAULT;
73058+
73059+ pw->rolename = compat_ptr(pw_compat.rolename);
73060+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73061+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73062+
73063+ return 0;
73064+}
73065+
73066+size_t get_gr_arg_wrapper_size_compat(void)
73067+{
73068+ return sizeof(struct gr_arg_wrapper_compat);
73069+}
73070+
73071diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73072new file mode 100644
73073index 0000000..8ee8e4f
73074--- /dev/null
73075+++ b/grsecurity/gracl_fs.c
73076@@ -0,0 +1,447 @@
73077+#include <linux/kernel.h>
73078+#include <linux/sched.h>
73079+#include <linux/types.h>
73080+#include <linux/fs.h>
73081+#include <linux/file.h>
73082+#include <linux/stat.h>
73083+#include <linux/grsecurity.h>
73084+#include <linux/grinternal.h>
73085+#include <linux/gracl.h>
73086+
73087+umode_t
73088+gr_acl_umask(void)
73089+{
73090+ if (unlikely(!gr_acl_is_enabled()))
73091+ return 0;
73092+
73093+ return current->role->umask;
73094+}
73095+
73096+__u32
73097+gr_acl_handle_hidden_file(const struct dentry * dentry,
73098+ const struct vfsmount * mnt)
73099+{
73100+ __u32 mode;
73101+
73102+ if (unlikely(d_is_negative(dentry)))
73103+ return GR_FIND;
73104+
73105+ mode =
73106+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73107+
73108+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73109+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73110+ return mode;
73111+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73112+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73113+ return 0;
73114+ } else if (unlikely(!(mode & GR_FIND)))
73115+ return 0;
73116+
73117+ return GR_FIND;
73118+}
73119+
73120+__u32
73121+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73122+ int acc_mode)
73123+{
73124+ __u32 reqmode = GR_FIND;
73125+ __u32 mode;
73126+
73127+ if (unlikely(d_is_negative(dentry)))
73128+ return reqmode;
73129+
73130+ if (acc_mode & MAY_APPEND)
73131+ reqmode |= GR_APPEND;
73132+ else if (acc_mode & MAY_WRITE)
73133+ reqmode |= GR_WRITE;
73134+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73135+ reqmode |= GR_READ;
73136+
73137+ mode =
73138+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73139+ mnt);
73140+
73141+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73142+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73143+ reqmode & GR_READ ? " reading" : "",
73144+ reqmode & GR_WRITE ? " writing" : reqmode &
73145+ GR_APPEND ? " appending" : "");
73146+ return reqmode;
73147+ } else
73148+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73149+ {
73150+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73151+ reqmode & GR_READ ? " reading" : "",
73152+ reqmode & GR_WRITE ? " writing" : reqmode &
73153+ GR_APPEND ? " appending" : "");
73154+ return 0;
73155+ } else if (unlikely((mode & reqmode) != reqmode))
73156+ return 0;
73157+
73158+ return reqmode;
73159+}
73160+
73161+__u32
73162+gr_acl_handle_creat(const struct dentry * dentry,
73163+ const struct dentry * p_dentry,
73164+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73165+ const int imode)
73166+{
73167+ __u32 reqmode = GR_WRITE | GR_CREATE;
73168+ __u32 mode;
73169+
73170+ if (acc_mode & MAY_APPEND)
73171+ reqmode |= GR_APPEND;
73172+ // if a directory was required or the directory already exists, then
73173+ // don't count this open as a read
73174+ if ((acc_mode & MAY_READ) &&
73175+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73176+ reqmode |= GR_READ;
73177+ if ((open_flags & O_CREAT) &&
73178+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73179+ reqmode |= GR_SETID;
73180+
73181+ mode =
73182+ gr_check_create(dentry, p_dentry, p_mnt,
73183+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73184+
73185+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73186+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73187+ reqmode & GR_READ ? " reading" : "",
73188+ reqmode & GR_WRITE ? " writing" : reqmode &
73189+ GR_APPEND ? " appending" : "");
73190+ return reqmode;
73191+ } else
73192+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73193+ {
73194+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73195+ reqmode & GR_READ ? " reading" : "",
73196+ reqmode & GR_WRITE ? " writing" : reqmode &
73197+ GR_APPEND ? " appending" : "");
73198+ return 0;
73199+ } else if (unlikely((mode & reqmode) != reqmode))
73200+ return 0;
73201+
73202+ return reqmode;
73203+}
73204+
73205+__u32
73206+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73207+ const int fmode)
73208+{
73209+ __u32 mode, reqmode = GR_FIND;
73210+
73211+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73212+ reqmode |= GR_EXEC;
73213+ if (fmode & S_IWOTH)
73214+ reqmode |= GR_WRITE;
73215+ if (fmode & S_IROTH)
73216+ reqmode |= GR_READ;
73217+
73218+ mode =
73219+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73220+ mnt);
73221+
73222+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73223+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73224+ reqmode & GR_READ ? " reading" : "",
73225+ reqmode & GR_WRITE ? " writing" : "",
73226+ reqmode & GR_EXEC ? " executing" : "");
73227+ return reqmode;
73228+ } else
73229+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73230+ {
73231+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73232+ reqmode & GR_READ ? " reading" : "",
73233+ reqmode & GR_WRITE ? " writing" : "",
73234+ reqmode & GR_EXEC ? " executing" : "");
73235+ return 0;
73236+ } else if (unlikely((mode & reqmode) != reqmode))
73237+ return 0;
73238+
73239+ return reqmode;
73240+}
73241+
73242+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73243+{
73244+ __u32 mode;
73245+
73246+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73247+
73248+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73249+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73250+ return mode;
73251+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73252+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73253+ return 0;
73254+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73255+ return 0;
73256+
73257+ return (reqmode);
73258+}
73259+
73260+__u32
73261+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73262+{
73263+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
73264+}
73265+
73266+__u32
73267+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
73268+{
73269+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
73270+}
73271+
73272+__u32
73273+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
73274+{
73275+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
73276+}
73277+
73278+__u32
73279+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
73280+{
73281+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
73282+}
73283+
73284+__u32
73285+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
73286+ umode_t *modeptr)
73287+{
73288+ umode_t mode;
73289+
73290+ *modeptr &= ~gr_acl_umask();
73291+ mode = *modeptr;
73292+
73293+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
73294+ return 1;
73295+
73296+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
73297+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
73298+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
73299+ GR_CHMOD_ACL_MSG);
73300+ } else {
73301+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
73302+ }
73303+}
73304+
73305+__u32
73306+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
73307+{
73308+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
73309+}
73310+
73311+__u32
73312+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73313+{
73314+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73315+}
73316+
73317+__u32
73318+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73319+{
73320+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73321+}
73322+
73323+__u32
73324+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73325+{
73326+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73327+}
73328+
73329+__u32
73330+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73331+{
73332+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73333+ GR_UNIXCONNECT_ACL_MSG);
73334+}
73335+
73336+/* hardlinks require at minimum create and link permission,
73337+ any additional privilege required is based on the
73338+ privilege of the file being linked to
73339+*/
73340+__u32
73341+gr_acl_handle_link(const struct dentry * new_dentry,
73342+ const struct dentry * parent_dentry,
73343+ const struct vfsmount * parent_mnt,
73344+ const struct dentry * old_dentry,
73345+ const struct vfsmount * old_mnt, const struct filename *to)
73346+{
73347+ __u32 mode;
73348+ __u32 needmode = GR_CREATE | GR_LINK;
73349+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73350+
73351+ mode =
73352+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73353+ old_mnt);
73354+
73355+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73356+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73357+ return mode;
73358+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73359+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73360+ return 0;
73361+ } else if (unlikely((mode & needmode) != needmode))
73362+ return 0;
73363+
73364+ return 1;
73365+}
73366+
73367+__u32
73368+gr_acl_handle_symlink(const struct dentry * new_dentry,
73369+ const struct dentry * parent_dentry,
73370+ const struct vfsmount * parent_mnt, const struct filename *from)
73371+{
73372+ __u32 needmode = GR_WRITE | GR_CREATE;
73373+ __u32 mode;
73374+
73375+ mode =
73376+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73377+ GR_CREATE | GR_AUDIT_CREATE |
73378+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73379+
73380+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73381+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73382+ return mode;
73383+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73384+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73385+ return 0;
73386+ } else if (unlikely((mode & needmode) != needmode))
73387+ return 0;
73388+
73389+ return (GR_WRITE | GR_CREATE);
73390+}
73391+
73392+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73393+{
73394+ __u32 mode;
73395+
73396+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73397+
73398+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73399+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73400+ return mode;
73401+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73402+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73403+ return 0;
73404+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73405+ return 0;
73406+
73407+ return (reqmode);
73408+}
73409+
73410+__u32
73411+gr_acl_handle_mknod(const struct dentry * new_dentry,
73412+ const struct dentry * parent_dentry,
73413+ const struct vfsmount * parent_mnt,
73414+ const int mode)
73415+{
73416+ __u32 reqmode = GR_WRITE | GR_CREATE;
73417+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73418+ reqmode |= GR_SETID;
73419+
73420+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73421+ reqmode, GR_MKNOD_ACL_MSG);
73422+}
73423+
73424+__u32
73425+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73426+ const struct dentry *parent_dentry,
73427+ const struct vfsmount *parent_mnt)
73428+{
73429+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73430+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73431+}
73432+
73433+#define RENAME_CHECK_SUCCESS(old, new) \
73434+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73435+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73436+
73437+int
73438+gr_acl_handle_rename(struct dentry *new_dentry,
73439+ struct dentry *parent_dentry,
73440+ const struct vfsmount *parent_mnt,
73441+ struct dentry *old_dentry,
73442+ struct inode *old_parent_inode,
73443+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73444+{
73445+ __u32 comp1, comp2;
73446+ int error = 0;
73447+
73448+ if (unlikely(!gr_acl_is_enabled()))
73449+ return 0;
73450+
73451+ if (flags & RENAME_EXCHANGE) {
73452+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73453+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73454+ GR_SUPPRESS, parent_mnt);
73455+ comp2 =
73456+ gr_search_file(old_dentry,
73457+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73458+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73459+ } else if (d_is_negative(new_dentry)) {
73460+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73461+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73462+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73463+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73464+ GR_DELETE | GR_AUDIT_DELETE |
73465+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73466+ GR_SUPPRESS, old_mnt);
73467+ } else {
73468+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73469+ GR_CREATE | GR_DELETE |
73470+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73471+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73472+ GR_SUPPRESS, parent_mnt);
73473+ comp2 =
73474+ gr_search_file(old_dentry,
73475+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73476+ GR_DELETE | GR_AUDIT_DELETE |
73477+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73478+ }
73479+
73480+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73481+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73482+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73483+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73484+ && !(comp2 & GR_SUPPRESS)) {
73485+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73486+ error = -EACCES;
73487+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73488+ error = -EACCES;
73489+
73490+ return error;
73491+}
73492+
73493+void
73494+gr_acl_handle_exit(void)
73495+{
73496+ u16 id;
73497+ char *rolename;
73498+
73499+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73500+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73501+ id = current->acl_role_id;
73502+ rolename = current->role->rolename;
73503+ gr_set_acls(1);
73504+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73505+ }
73506+
73507+ gr_put_exec_file(current);
73508+ return;
73509+}
73510+
73511+int
73512+gr_acl_handle_procpidmem(const struct task_struct *task)
73513+{
73514+ if (unlikely(!gr_acl_is_enabled()))
73515+ return 0;
73516+
73517+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
73518+ !(current->acl->mode & GR_POVERRIDE) &&
73519+ !(current->role->roletype & GR_ROLE_GOD))
73520+ return -EACCES;
73521+
73522+ return 0;
73523+}
73524diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73525new file mode 100644
73526index 0000000..f056b81
73527--- /dev/null
73528+++ b/grsecurity/gracl_ip.c
73529@@ -0,0 +1,386 @@
73530+#include <linux/kernel.h>
73531+#include <asm/uaccess.h>
73532+#include <asm/errno.h>
73533+#include <net/sock.h>
73534+#include <linux/file.h>
73535+#include <linux/fs.h>
73536+#include <linux/net.h>
73537+#include <linux/in.h>
73538+#include <linux/skbuff.h>
73539+#include <linux/ip.h>
73540+#include <linux/udp.h>
73541+#include <linux/types.h>
73542+#include <linux/sched.h>
73543+#include <linux/netdevice.h>
73544+#include <linux/inetdevice.h>
73545+#include <linux/gracl.h>
73546+#include <linux/grsecurity.h>
73547+#include <linux/grinternal.h>
73548+
73549+#define GR_BIND 0x01
73550+#define GR_CONNECT 0x02
73551+#define GR_INVERT 0x04
73552+#define GR_BINDOVERRIDE 0x08
73553+#define GR_CONNECTOVERRIDE 0x10
73554+#define GR_SOCK_FAMILY 0x20
73555+
73556+static const char * gr_protocols[IPPROTO_MAX] = {
73557+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73558+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73559+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73560+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73561+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73562+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73563+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73564+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73565+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73566+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73567+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73568+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73569+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73570+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73571+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73572+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73573+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73574+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73575+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73576+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73577+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73578+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73579+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73580+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73581+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73582+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73583+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73584+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73585+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73586+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73587+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73588+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73589+ };
73590+
73591+static const char * gr_socktypes[SOCK_MAX] = {
73592+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73593+ "unknown:7", "unknown:8", "unknown:9", "packet"
73594+ };
73595+
73596+static const char * gr_sockfamilies[AF_MAX+1] = {
73597+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73598+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73599+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73600+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73601+ };
73602+
73603+const char *
73604+gr_proto_to_name(unsigned char proto)
73605+{
73606+ return gr_protocols[proto];
73607+}
73608+
73609+const char *
73610+gr_socktype_to_name(unsigned char type)
73611+{
73612+ return gr_socktypes[type];
73613+}
73614+
73615+const char *
73616+gr_sockfamily_to_name(unsigned char family)
73617+{
73618+ return gr_sockfamilies[family];
73619+}
73620+
73621+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73622+
73623+int
73624+gr_search_socket(const int domain, const int type, const int protocol)
73625+{
73626+ struct acl_subject_label *curr;
73627+ const struct cred *cred = current_cred();
73628+
73629+ if (unlikely(!gr_acl_is_enabled()))
73630+ goto exit;
73631+
73632+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73633+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73634+ goto exit; // let the kernel handle it
73635+
73636+ curr = current->acl;
73637+
73638+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73639+ /* the family is allowed, if this is PF_INET allow it only if
73640+ the extra sock type/protocol checks pass */
73641+ if (domain == PF_INET)
73642+ goto inet_check;
73643+ goto exit;
73644+ } else {
73645+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73646+ __u32 fakeip = 0;
73647+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73648+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73649+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73650+ gr_to_filename(current->exec_file->f_path.dentry,
73651+ current->exec_file->f_path.mnt) :
73652+ curr->filename, curr->filename,
73653+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73654+ &current->signal->saved_ip);
73655+ goto exit;
73656+ }
73657+ goto exit_fail;
73658+ }
73659+
73660+inet_check:
73661+ /* the rest of this checking is for IPv4 only */
73662+ if (!curr->ips)
73663+ goto exit;
73664+
73665+ if ((curr->ip_type & (1U << type)) &&
73666+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73667+ goto exit;
73668+
73669+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73670+ /* we don't place acls on raw sockets , and sometimes
73671+ dgram/ip sockets are opened for ioctl and not
73672+ bind/connect, so we'll fake a bind learn log */
73673+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73674+ __u32 fakeip = 0;
73675+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73676+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73677+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73678+ gr_to_filename(current->exec_file->f_path.dentry,
73679+ current->exec_file->f_path.mnt) :
73680+ curr->filename, curr->filename,
73681+ &fakeip, 0, type,
73682+ protocol, GR_CONNECT, &current->signal->saved_ip);
73683+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73684+ __u32 fakeip = 0;
73685+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73686+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73687+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73688+ gr_to_filename(current->exec_file->f_path.dentry,
73689+ current->exec_file->f_path.mnt) :
73690+ curr->filename, curr->filename,
73691+ &fakeip, 0, type,
73692+ protocol, GR_BIND, &current->signal->saved_ip);
73693+ }
73694+ /* we'll log when they use connect or bind */
73695+ goto exit;
73696+ }
73697+
73698+exit_fail:
73699+ if (domain == PF_INET)
73700+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73701+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73702+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73703+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73704+ gr_socktype_to_name(type), protocol);
73705+
73706+ return 0;
73707+exit:
73708+ return 1;
73709+}
73710+
73711+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73712+{
73713+ if ((ip->mode & mode) &&
73714+ (ip_port >= ip->low) &&
73715+ (ip_port <= ip->high) &&
73716+ ((ntohl(ip_addr) & our_netmask) ==
73717+ (ntohl(our_addr) & our_netmask))
73718+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73719+ && (ip->type & (1U << type))) {
73720+ if (ip->mode & GR_INVERT)
73721+ return 2; // specifically denied
73722+ else
73723+ return 1; // allowed
73724+ }
73725+
73726+ return 0; // not specifically allowed, may continue parsing
73727+}
73728+
73729+static int
73730+gr_search_connectbind(const int full_mode, struct sock *sk,
73731+ struct sockaddr_in *addr, const int type)
73732+{
73733+ char iface[IFNAMSIZ] = {0};
73734+ struct acl_subject_label *curr;
73735+ struct acl_ip_label *ip;
73736+ struct inet_sock *isk;
73737+ struct net_device *dev;
73738+ struct in_device *idev;
73739+ unsigned long i;
73740+ int ret;
73741+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73742+ __u32 ip_addr = 0;
73743+ __u32 our_addr;
73744+ __u32 our_netmask;
73745+ char *p;
73746+ __u16 ip_port = 0;
73747+ const struct cred *cred = current_cred();
73748+
73749+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73750+ return 0;
73751+
73752+ curr = current->acl;
73753+ isk = inet_sk(sk);
73754+
73755+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73756+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73757+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73758+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73759+ struct sockaddr_in saddr;
73760+ int err;
73761+
73762+ saddr.sin_family = AF_INET;
73763+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73764+ saddr.sin_port = isk->inet_sport;
73765+
73766+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73767+ if (err)
73768+ return err;
73769+
73770+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73771+ if (err)
73772+ return err;
73773+ }
73774+
73775+ if (!curr->ips)
73776+ return 0;
73777+
73778+ ip_addr = addr->sin_addr.s_addr;
73779+ ip_port = ntohs(addr->sin_port);
73780+
73781+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73782+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73783+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73784+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73785+ gr_to_filename(current->exec_file->f_path.dentry,
73786+ current->exec_file->f_path.mnt) :
73787+ curr->filename, curr->filename,
73788+ &ip_addr, ip_port, type,
73789+ sk->sk_protocol, mode, &current->signal->saved_ip);
73790+ return 0;
73791+ }
73792+
73793+ for (i = 0; i < curr->ip_num; i++) {
73794+ ip = *(curr->ips + i);
73795+ if (ip->iface != NULL) {
73796+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73797+ p = strchr(iface, ':');
73798+ if (p != NULL)
73799+ *p = '\0';
73800+ dev = dev_get_by_name(sock_net(sk), iface);
73801+ if (dev == NULL)
73802+ continue;
73803+ idev = in_dev_get(dev);
73804+ if (idev == NULL) {
73805+ dev_put(dev);
73806+ continue;
73807+ }
73808+ rcu_read_lock();
73809+ for_ifa(idev) {
73810+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73811+ our_addr = ifa->ifa_address;
73812+ our_netmask = 0xffffffff;
73813+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73814+ if (ret == 1) {
73815+ rcu_read_unlock();
73816+ in_dev_put(idev);
73817+ dev_put(dev);
73818+ return 0;
73819+ } else if (ret == 2) {
73820+ rcu_read_unlock();
73821+ in_dev_put(idev);
73822+ dev_put(dev);
73823+ goto denied;
73824+ }
73825+ }
73826+ } endfor_ifa(idev);
73827+ rcu_read_unlock();
73828+ in_dev_put(idev);
73829+ dev_put(dev);
73830+ } else {
73831+ our_addr = ip->addr;
73832+ our_netmask = ip->netmask;
73833+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73834+ if (ret == 1)
73835+ return 0;
73836+ else if (ret == 2)
73837+ goto denied;
73838+ }
73839+ }
73840+
73841+denied:
73842+ if (mode == GR_BIND)
73843+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73844+ else if (mode == GR_CONNECT)
73845+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73846+
73847+ return -EACCES;
73848+}
73849+
73850+int
73851+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73852+{
73853+ /* always allow disconnection of dgram sockets with connect */
73854+ if (addr->sin_family == AF_UNSPEC)
73855+ return 0;
73856+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73857+}
73858+
73859+int
73860+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73861+{
73862+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73863+}
73864+
73865+int gr_search_listen(struct socket *sock)
73866+{
73867+ struct sock *sk = sock->sk;
73868+ struct sockaddr_in addr;
73869+
73870+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73871+ addr.sin_port = inet_sk(sk)->inet_sport;
73872+
73873+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73874+}
73875+
73876+int gr_search_accept(struct socket *sock)
73877+{
73878+ struct sock *sk = sock->sk;
73879+ struct sockaddr_in addr;
73880+
73881+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73882+ addr.sin_port = inet_sk(sk)->inet_sport;
73883+
73884+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73885+}
73886+
73887+int
73888+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73889+{
73890+ if (addr)
73891+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73892+ else {
73893+ struct sockaddr_in sin;
73894+ const struct inet_sock *inet = inet_sk(sk);
73895+
73896+ sin.sin_addr.s_addr = inet->inet_daddr;
73897+ sin.sin_port = inet->inet_dport;
73898+
73899+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73900+ }
73901+}
73902+
73903+int
73904+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73905+{
73906+ struct sockaddr_in sin;
73907+
73908+ if (unlikely(skb->len < sizeof (struct udphdr)))
73909+ return 0; // skip this packet
73910+
73911+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73912+ sin.sin_port = udp_hdr(skb)->source;
73913+
73914+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73915+}
73916diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73917new file mode 100644
73918index 0000000..25f54ef
73919--- /dev/null
73920+++ b/grsecurity/gracl_learn.c
73921@@ -0,0 +1,207 @@
73922+#include <linux/kernel.h>
73923+#include <linux/mm.h>
73924+#include <linux/sched.h>
73925+#include <linux/poll.h>
73926+#include <linux/string.h>
73927+#include <linux/file.h>
73928+#include <linux/types.h>
73929+#include <linux/vmalloc.h>
73930+#include <linux/grinternal.h>
73931+
73932+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73933+ size_t count, loff_t *ppos);
73934+extern int gr_acl_is_enabled(void);
73935+
73936+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73937+static int gr_learn_attached;
73938+
73939+/* use a 512k buffer */
73940+#define LEARN_BUFFER_SIZE (512 * 1024)
73941+
73942+static DEFINE_SPINLOCK(gr_learn_lock);
73943+static DEFINE_MUTEX(gr_learn_user_mutex);
73944+
73945+/* we need to maintain two buffers, so that the kernel context of grlearn
73946+ uses a semaphore around the userspace copying, and the other kernel contexts
73947+ use a spinlock when copying into the buffer, since they cannot sleep
73948+*/
73949+static char *learn_buffer;
73950+static char *learn_buffer_user;
73951+static int learn_buffer_len;
73952+static int learn_buffer_user_len;
73953+
73954+static ssize_t
73955+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73956+{
73957+ DECLARE_WAITQUEUE(wait, current);
73958+ ssize_t retval = 0;
73959+
73960+ add_wait_queue(&learn_wait, &wait);
73961+ set_current_state(TASK_INTERRUPTIBLE);
73962+ do {
73963+ mutex_lock(&gr_learn_user_mutex);
73964+ spin_lock(&gr_learn_lock);
73965+ if (learn_buffer_len)
73966+ break;
73967+ spin_unlock(&gr_learn_lock);
73968+ mutex_unlock(&gr_learn_user_mutex);
73969+ if (file->f_flags & O_NONBLOCK) {
73970+ retval = -EAGAIN;
73971+ goto out;
73972+ }
73973+ if (signal_pending(current)) {
73974+ retval = -ERESTARTSYS;
73975+ goto out;
73976+ }
73977+
73978+ schedule();
73979+ } while (1);
73980+
73981+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73982+ learn_buffer_user_len = learn_buffer_len;
73983+ retval = learn_buffer_len;
73984+ learn_buffer_len = 0;
73985+
73986+ spin_unlock(&gr_learn_lock);
73987+
73988+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73989+ retval = -EFAULT;
73990+
73991+ mutex_unlock(&gr_learn_user_mutex);
73992+out:
73993+ set_current_state(TASK_RUNNING);
73994+ remove_wait_queue(&learn_wait, &wait);
73995+ return retval;
73996+}
73997+
73998+static unsigned int
73999+poll_learn(struct file * file, poll_table * wait)
74000+{
74001+ poll_wait(file, &learn_wait, wait);
74002+
74003+ if (learn_buffer_len)
74004+ return (POLLIN | POLLRDNORM);
74005+
74006+ return 0;
74007+}
74008+
74009+void
74010+gr_clear_learn_entries(void)
74011+{
74012+ char *tmp;
74013+
74014+ mutex_lock(&gr_learn_user_mutex);
74015+ spin_lock(&gr_learn_lock);
74016+ tmp = learn_buffer;
74017+ learn_buffer = NULL;
74018+ spin_unlock(&gr_learn_lock);
74019+ if (tmp)
74020+ vfree(tmp);
74021+ if (learn_buffer_user != NULL) {
74022+ vfree(learn_buffer_user);
74023+ learn_buffer_user = NULL;
74024+ }
74025+ learn_buffer_len = 0;
74026+ mutex_unlock(&gr_learn_user_mutex);
74027+
74028+ return;
74029+}
74030+
74031+void
74032+gr_add_learn_entry(const char *fmt, ...)
74033+{
74034+ va_list args;
74035+ unsigned int len;
74036+
74037+ if (!gr_learn_attached)
74038+ return;
74039+
74040+ spin_lock(&gr_learn_lock);
74041+
74042+ /* leave a gap at the end so we know when it's "full" but don't have to
74043+ compute the exact length of the string we're trying to append
74044+ */
74045+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74046+ spin_unlock(&gr_learn_lock);
74047+ wake_up_interruptible(&learn_wait);
74048+ return;
74049+ }
74050+ if (learn_buffer == NULL) {
74051+ spin_unlock(&gr_learn_lock);
74052+ return;
74053+ }
74054+
74055+ va_start(args, fmt);
74056+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74057+ va_end(args);
74058+
74059+ learn_buffer_len += len + 1;
74060+
74061+ spin_unlock(&gr_learn_lock);
74062+ wake_up_interruptible(&learn_wait);
74063+
74064+ return;
74065+}
74066+
74067+static int
74068+open_learn(struct inode *inode, struct file *file)
74069+{
74070+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74071+ return -EBUSY;
74072+ if (file->f_mode & FMODE_READ) {
74073+ int retval = 0;
74074+ mutex_lock(&gr_learn_user_mutex);
74075+ if (learn_buffer == NULL)
74076+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74077+ if (learn_buffer_user == NULL)
74078+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74079+ if (learn_buffer == NULL) {
74080+ retval = -ENOMEM;
74081+ goto out_error;
74082+ }
74083+ if (learn_buffer_user == NULL) {
74084+ retval = -ENOMEM;
74085+ goto out_error;
74086+ }
74087+ learn_buffer_len = 0;
74088+ learn_buffer_user_len = 0;
74089+ gr_learn_attached = 1;
74090+out_error:
74091+ mutex_unlock(&gr_learn_user_mutex);
74092+ return retval;
74093+ }
74094+ return 0;
74095+}
74096+
74097+static int
74098+close_learn(struct inode *inode, struct file *file)
74099+{
74100+ if (file->f_mode & FMODE_READ) {
74101+ char *tmp = NULL;
74102+ mutex_lock(&gr_learn_user_mutex);
74103+ spin_lock(&gr_learn_lock);
74104+ tmp = learn_buffer;
74105+ learn_buffer = NULL;
74106+ spin_unlock(&gr_learn_lock);
74107+ if (tmp)
74108+ vfree(tmp);
74109+ if (learn_buffer_user != NULL) {
74110+ vfree(learn_buffer_user);
74111+ learn_buffer_user = NULL;
74112+ }
74113+ learn_buffer_len = 0;
74114+ learn_buffer_user_len = 0;
74115+ gr_learn_attached = 0;
74116+ mutex_unlock(&gr_learn_user_mutex);
74117+ }
74118+
74119+ return 0;
74120+}
74121+
74122+const struct file_operations grsec_fops = {
74123+ .read = read_learn,
74124+ .write = write_grsec_handler,
74125+ .open = open_learn,
74126+ .release = close_learn,
74127+ .poll = poll_learn,
74128+};
74129diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74130new file mode 100644
74131index 0000000..fd26052
74132--- /dev/null
74133+++ b/grsecurity/gracl_policy.c
74134@@ -0,0 +1,1781 @@
74135+#include <linux/kernel.h>
74136+#include <linux/module.h>
74137+#include <linux/sched.h>
74138+#include <linux/mm.h>
74139+#include <linux/file.h>
74140+#include <linux/fs.h>
74141+#include <linux/namei.h>
74142+#include <linux/mount.h>
74143+#include <linux/tty.h>
74144+#include <linux/proc_fs.h>
74145+#include <linux/lglock.h>
74146+#include <linux/slab.h>
74147+#include <linux/vmalloc.h>
74148+#include <linux/types.h>
74149+#include <linux/sysctl.h>
74150+#include <linux/netdevice.h>
74151+#include <linux/ptrace.h>
74152+#include <linux/gracl.h>
74153+#include <linux/gralloc.h>
74154+#include <linux/security.h>
74155+#include <linux/grinternal.h>
74156+#include <linux/pid_namespace.h>
74157+#include <linux/stop_machine.h>
74158+#include <linux/fdtable.h>
74159+#include <linux/percpu.h>
74160+#include <linux/lglock.h>
74161+#include <linux/hugetlb.h>
74162+#include <linux/posix-timers.h>
74163+#include "../fs/mount.h"
74164+
74165+#include <asm/uaccess.h>
74166+#include <asm/errno.h>
74167+#include <asm/mman.h>
74168+
74169+extern struct gr_policy_state *polstate;
74170+
74171+#define FOR_EACH_ROLE_START(role) \
74172+ role = polstate->role_list; \
74173+ while (role) {
74174+
74175+#define FOR_EACH_ROLE_END(role) \
74176+ role = role->prev; \
74177+ }
74178+
74179+struct path gr_real_root;
74180+
74181+extern struct gr_alloc_state *current_alloc_state;
74182+
74183+u16 acl_sp_role_value;
74184+
74185+static DEFINE_MUTEX(gr_dev_mutex);
74186+
74187+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74188+extern void gr_clear_learn_entries(void);
74189+
74190+struct gr_arg *gr_usermode __read_only;
74191+unsigned char *gr_system_salt __read_only;
74192+unsigned char *gr_system_sum __read_only;
74193+
74194+static unsigned int gr_auth_attempts = 0;
74195+static unsigned long gr_auth_expires = 0UL;
74196+
74197+struct acl_object_label *fakefs_obj_rw;
74198+struct acl_object_label *fakefs_obj_rwx;
74199+
74200+extern int gr_init_uidset(void);
74201+extern void gr_free_uidset(void);
74202+extern void gr_remove_uid(uid_t uid);
74203+extern int gr_find_uid(uid_t uid);
74204+
74205+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
74206+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74207+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74208+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74209+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74210+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74211+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74212+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74213+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74214+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
74215+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
74216+extern void assign_special_role(const char *rolename);
74217+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74218+extern int gr_rbac_disable(void *unused);
74219+extern void gr_enable_rbac_system(void);
74220+
74221+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74222+{
74223+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74224+ return -EFAULT;
74225+
74226+ return 0;
74227+}
74228+
74229+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74230+{
74231+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74232+ return -EFAULT;
74233+
74234+ return 0;
74235+}
74236+
74237+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74238+{
74239+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74240+ return -EFAULT;
74241+
74242+ return 0;
74243+}
74244+
74245+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74246+{
74247+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74248+ return -EFAULT;
74249+
74250+ return 0;
74251+}
74252+
74253+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74254+{
74255+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74256+ return -EFAULT;
74257+
74258+ return 0;
74259+}
74260+
74261+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74262+{
74263+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74264+ return -EFAULT;
74265+
74266+ return 0;
74267+}
74268+
74269+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74270+{
74271+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
74272+ return -EFAULT;
74273+
74274+ return 0;
74275+}
74276+
74277+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
74278+{
74279+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
74280+ return -EFAULT;
74281+
74282+ return 0;
74283+}
74284+
74285+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
74286+{
74287+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
74288+ return -EFAULT;
74289+
74290+ return 0;
74291+}
74292+
74293+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
74294+{
74295+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
74296+ return -EFAULT;
74297+
74298+ if ((uwrap->version != GRSECURITY_VERSION) ||
74299+ (uwrap->size != sizeof(struct gr_arg)))
74300+ return -EINVAL;
74301+
74302+ return 0;
74303+}
74304+
74305+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
74306+{
74307+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
74308+ return -EFAULT;
74309+
74310+ return 0;
74311+}
74312+
74313+static size_t get_gr_arg_wrapper_size_normal(void)
74314+{
74315+ return sizeof(struct gr_arg_wrapper);
74316+}
74317+
74318+#ifdef CONFIG_COMPAT
74319+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74320+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74321+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74322+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74323+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74324+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74325+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74326+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74327+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74328+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74329+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74330+extern size_t get_gr_arg_wrapper_size_compat(void);
74331+
74332+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74333+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74334+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74335+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74336+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74337+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74338+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74339+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74340+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74341+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74342+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74343+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74344+
74345+#else
74346+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74347+#define copy_gr_arg copy_gr_arg_normal
74348+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74349+#define copy_acl_object_label copy_acl_object_label_normal
74350+#define copy_acl_subject_label copy_acl_subject_label_normal
74351+#define copy_acl_role_label copy_acl_role_label_normal
74352+#define copy_acl_ip_label copy_acl_ip_label_normal
74353+#define copy_pointer_from_array copy_pointer_from_array_normal
74354+#define copy_sprole_pw copy_sprole_pw_normal
74355+#define copy_role_transition copy_role_transition_normal
74356+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74357+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74358+#endif
74359+
74360+static struct acl_subject_label *
74361+lookup_subject_map(const struct acl_subject_label *userp)
74362+{
74363+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74364+ struct subject_map *match;
74365+
74366+ match = polstate->subj_map_set.s_hash[index];
74367+
74368+ while (match && match->user != userp)
74369+ match = match->next;
74370+
74371+ if (match != NULL)
74372+ return match->kernel;
74373+ else
74374+ return NULL;
74375+}
74376+
74377+static void
74378+insert_subj_map_entry(struct subject_map *subjmap)
74379+{
74380+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74381+ struct subject_map **curr;
74382+
74383+ subjmap->prev = NULL;
74384+
74385+ curr = &polstate->subj_map_set.s_hash[index];
74386+ if (*curr != NULL)
74387+ (*curr)->prev = subjmap;
74388+
74389+ subjmap->next = *curr;
74390+ *curr = subjmap;
74391+
74392+ return;
74393+}
74394+
74395+static void
74396+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74397+{
74398+ unsigned int index =
74399+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74400+ struct acl_role_label **curr;
74401+ struct acl_role_label *tmp, *tmp2;
74402+
74403+ curr = &polstate->acl_role_set.r_hash[index];
74404+
74405+ /* simple case, slot is empty, just set it to our role */
74406+ if (*curr == NULL) {
74407+ *curr = role;
74408+ } else {
74409+ /* example:
74410+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74411+ 2 -> 3
74412+ */
74413+ /* first check to see if we can already be reached via this slot */
74414+ tmp = *curr;
74415+ while (tmp && tmp != role)
74416+ tmp = tmp->next;
74417+ if (tmp == role) {
74418+ /* we don't need to add ourselves to this slot's chain */
74419+ return;
74420+ }
74421+ /* we need to add ourselves to this chain, two cases */
74422+ if (role->next == NULL) {
74423+ /* simple case, append the current chain to our role */
74424+ role->next = *curr;
74425+ *curr = role;
74426+ } else {
74427+ /* 1 -> 2 -> 3 -> 4
74428+ 2 -> 3 -> 4
74429+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74430+ */
74431+ /* trickier case: walk our role's chain until we find
74432+ the role for the start of the current slot's chain */
74433+ tmp = role;
74434+ tmp2 = *curr;
74435+ while (tmp->next && tmp->next != tmp2)
74436+ tmp = tmp->next;
74437+ if (tmp->next == tmp2) {
74438+ /* from example above, we found 3, so just
74439+ replace this slot's chain with ours */
74440+ *curr = role;
74441+ } else {
74442+ /* we didn't find a subset of our role's chain
74443+ in the current slot's chain, so append their
74444+ chain to ours, and set us as the first role in
74445+ the slot's chain
74446+
74447+ we could fold this case with the case above,
74448+ but making it explicit for clarity
74449+ */
74450+ tmp->next = tmp2;
74451+ *curr = role;
74452+ }
74453+ }
74454+ }
74455+
74456+ return;
74457+}
74458+
74459+static void
74460+insert_acl_role_label(struct acl_role_label *role)
74461+{
74462+ int i;
74463+
74464+ if (polstate->role_list == NULL) {
74465+ polstate->role_list = role;
74466+ role->prev = NULL;
74467+ } else {
74468+ role->prev = polstate->role_list;
74469+ polstate->role_list = role;
74470+ }
74471+
74472+ /* used for hash chains */
74473+ role->next = NULL;
74474+
74475+ if (role->roletype & GR_ROLE_DOMAIN) {
74476+ for (i = 0; i < role->domain_child_num; i++)
74477+ __insert_acl_role_label(role, role->domain_children[i]);
74478+ } else
74479+ __insert_acl_role_label(role, role->uidgid);
74480+}
74481+
74482+static int
74483+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
74484+{
74485+ struct name_entry **curr, *nentry;
74486+ struct inodev_entry *ientry;
74487+ unsigned int len = strlen(name);
74488+ unsigned int key = full_name_hash(name, len);
74489+ unsigned int index = key % polstate->name_set.n_size;
74490+
74491+ curr = &polstate->name_set.n_hash[index];
74492+
74493+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74494+ curr = &((*curr)->next);
74495+
74496+ if (*curr != NULL)
74497+ return 1;
74498+
74499+ nentry = acl_alloc(sizeof (struct name_entry));
74500+ if (nentry == NULL)
74501+ return 0;
74502+ ientry = acl_alloc(sizeof (struct inodev_entry));
74503+ if (ientry == NULL)
74504+ return 0;
74505+ ientry->nentry = nentry;
74506+
74507+ nentry->key = key;
74508+ nentry->name = name;
74509+ nentry->inode = inode;
74510+ nentry->device = device;
74511+ nentry->len = len;
74512+ nentry->deleted = deleted;
74513+
74514+ nentry->prev = NULL;
74515+ curr = &polstate->name_set.n_hash[index];
74516+ if (*curr != NULL)
74517+ (*curr)->prev = nentry;
74518+ nentry->next = *curr;
74519+ *curr = nentry;
74520+
74521+ /* insert us into the table searchable by inode/dev */
74522+ __insert_inodev_entry(polstate, ientry);
74523+
74524+ return 1;
74525+}
74526+
74527+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74528+
74529+static void *
74530+create_table(__u32 * len, int elementsize)
74531+{
74532+ unsigned int table_sizes[] = {
74533+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74534+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74535+ 4194301, 8388593, 16777213, 33554393, 67108859
74536+ };
74537+ void *newtable = NULL;
74538+ unsigned int pwr = 0;
74539+
74540+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74541+ table_sizes[pwr] <= *len)
74542+ pwr++;
74543+
74544+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74545+ return newtable;
74546+
74547+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74548+ newtable =
74549+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74550+ else
74551+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74552+
74553+ *len = table_sizes[pwr];
74554+
74555+ return newtable;
74556+}
74557+
74558+static int
74559+init_variables(const struct gr_arg *arg, bool reload)
74560+{
74561+ struct task_struct *reaper = init_pid_ns.child_reaper;
74562+ unsigned int stacksize;
74563+
74564+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74565+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74566+ polstate->name_set.n_size = arg->role_db.num_objects;
74567+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74568+
74569+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74570+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74571+ return 1;
74572+
74573+ if (!reload) {
74574+ if (!gr_init_uidset())
74575+ return 1;
74576+ }
74577+
74578+ /* set up the stack that holds allocation info */
74579+
74580+ stacksize = arg->role_db.num_pointers + 5;
74581+
74582+ if (!acl_alloc_stack_init(stacksize))
74583+ return 1;
74584+
74585+ if (!reload) {
74586+ /* grab reference for the real root dentry and vfsmount */
74587+ get_fs_root(reaper->fs, &gr_real_root);
74588+
74589+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74590+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74591+#endif
74592+
74593+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74594+ if (fakefs_obj_rw == NULL)
74595+ return 1;
74596+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74597+
74598+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74599+ if (fakefs_obj_rwx == NULL)
74600+ return 1;
74601+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74602+ }
74603+
74604+ polstate->subj_map_set.s_hash =
74605+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74606+ polstate->acl_role_set.r_hash =
74607+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74608+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74609+ polstate->inodev_set.i_hash =
74610+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74611+
74612+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74613+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74614+ return 1;
74615+
74616+ memset(polstate->subj_map_set.s_hash, 0,
74617+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74618+ memset(polstate->acl_role_set.r_hash, 0,
74619+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74620+ memset(polstate->name_set.n_hash, 0,
74621+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74622+ memset(polstate->inodev_set.i_hash, 0,
74623+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74624+
74625+ return 0;
74626+}
74627+
74628+/* free information not needed after startup
74629+ currently contains user->kernel pointer mappings for subjects
74630+*/
74631+
74632+static void
74633+free_init_variables(void)
74634+{
74635+ __u32 i;
74636+
74637+ if (polstate->subj_map_set.s_hash) {
74638+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74639+ if (polstate->subj_map_set.s_hash[i]) {
74640+ kfree(polstate->subj_map_set.s_hash[i]);
74641+ polstate->subj_map_set.s_hash[i] = NULL;
74642+ }
74643+ }
74644+
74645+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74646+ PAGE_SIZE)
74647+ kfree(polstate->subj_map_set.s_hash);
74648+ else
74649+ vfree(polstate->subj_map_set.s_hash);
74650+ }
74651+
74652+ return;
74653+}
74654+
74655+static void
74656+free_variables(bool reload)
74657+{
74658+ struct acl_subject_label *s;
74659+ struct acl_role_label *r;
74660+ struct task_struct *task, *task2;
74661+ unsigned int x;
74662+
74663+ if (!reload) {
74664+ gr_clear_learn_entries();
74665+
74666+ read_lock(&tasklist_lock);
74667+ do_each_thread(task2, task) {
74668+ task->acl_sp_role = 0;
74669+ task->acl_role_id = 0;
74670+ task->inherited = 0;
74671+ task->acl = NULL;
74672+ task->role = NULL;
74673+ } while_each_thread(task2, task);
74674+ read_unlock(&tasklist_lock);
74675+
74676+ kfree(fakefs_obj_rw);
74677+ fakefs_obj_rw = NULL;
74678+ kfree(fakefs_obj_rwx);
74679+ fakefs_obj_rwx = NULL;
74680+
74681+ /* release the reference to the real root dentry and vfsmount */
74682+ path_put(&gr_real_root);
74683+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74684+ }
74685+
74686+ /* free all object hash tables */
74687+
74688+ FOR_EACH_ROLE_START(r)
74689+ if (r->subj_hash == NULL)
74690+ goto next_role;
74691+ FOR_EACH_SUBJECT_START(r, s, x)
74692+ if (s->obj_hash == NULL)
74693+ break;
74694+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74695+ kfree(s->obj_hash);
74696+ else
74697+ vfree(s->obj_hash);
74698+ FOR_EACH_SUBJECT_END(s, x)
74699+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74700+ if (s->obj_hash == NULL)
74701+ break;
74702+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74703+ kfree(s->obj_hash);
74704+ else
74705+ vfree(s->obj_hash);
74706+ FOR_EACH_NESTED_SUBJECT_END(s)
74707+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74708+ kfree(r->subj_hash);
74709+ else
74710+ vfree(r->subj_hash);
74711+ r->subj_hash = NULL;
74712+next_role:
74713+ FOR_EACH_ROLE_END(r)
74714+
74715+ acl_free_all();
74716+
74717+ if (polstate->acl_role_set.r_hash) {
74718+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74719+ PAGE_SIZE)
74720+ kfree(polstate->acl_role_set.r_hash);
74721+ else
74722+ vfree(polstate->acl_role_set.r_hash);
74723+ }
74724+ if (polstate->name_set.n_hash) {
74725+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74726+ PAGE_SIZE)
74727+ kfree(polstate->name_set.n_hash);
74728+ else
74729+ vfree(polstate->name_set.n_hash);
74730+ }
74731+
74732+ if (polstate->inodev_set.i_hash) {
74733+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74734+ PAGE_SIZE)
74735+ kfree(polstate->inodev_set.i_hash);
74736+ else
74737+ vfree(polstate->inodev_set.i_hash);
74738+ }
74739+
74740+ if (!reload)
74741+ gr_free_uidset();
74742+
74743+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74744+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74745+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74746+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74747+
74748+ polstate->default_role = NULL;
74749+ polstate->kernel_role = NULL;
74750+ polstate->role_list = NULL;
74751+
74752+ return;
74753+}
74754+
74755+static struct acl_subject_label *
74756+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74757+
74758+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74759+{
74760+ unsigned int len = strnlen_user(*name, maxlen);
74761+ char *tmp;
74762+
74763+ if (!len || len >= maxlen)
74764+ return -EINVAL;
74765+
74766+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74767+ return -ENOMEM;
74768+
74769+ if (copy_from_user(tmp, *name, len))
74770+ return -EFAULT;
74771+
74772+ tmp[len-1] = '\0';
74773+ *name = tmp;
74774+
74775+ return 0;
74776+}
74777+
74778+static int
74779+copy_user_glob(struct acl_object_label *obj)
74780+{
74781+ struct acl_object_label *g_tmp, **guser;
74782+ int error;
74783+
74784+ if (obj->globbed == NULL)
74785+ return 0;
74786+
74787+ guser = &obj->globbed;
74788+ while (*guser) {
74789+ g_tmp = (struct acl_object_label *)
74790+ acl_alloc(sizeof (struct acl_object_label));
74791+ if (g_tmp == NULL)
74792+ return -ENOMEM;
74793+
74794+ if (copy_acl_object_label(g_tmp, *guser))
74795+ return -EFAULT;
74796+
74797+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74798+ if (error)
74799+ return error;
74800+
74801+ *guser = g_tmp;
74802+ guser = &(g_tmp->next);
74803+ }
74804+
74805+ return 0;
74806+}
74807+
74808+static int
74809+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74810+ struct acl_role_label *role)
74811+{
74812+ struct acl_object_label *o_tmp;
74813+ int ret;
74814+
74815+ while (userp) {
74816+ if ((o_tmp = (struct acl_object_label *)
74817+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74818+ return -ENOMEM;
74819+
74820+ if (copy_acl_object_label(o_tmp, userp))
74821+ return -EFAULT;
74822+
74823+ userp = o_tmp->prev;
74824+
74825+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74826+ if (ret)
74827+ return ret;
74828+
74829+ insert_acl_obj_label(o_tmp, subj);
74830+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74831+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74832+ return -ENOMEM;
74833+
74834+ ret = copy_user_glob(o_tmp);
74835+ if (ret)
74836+ return ret;
74837+
74838+ if (o_tmp->nested) {
74839+ int already_copied;
74840+
74841+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74842+ if (IS_ERR(o_tmp->nested))
74843+ return PTR_ERR(o_tmp->nested);
74844+
74845+ /* insert into nested subject list if we haven't copied this one yet
74846+ to prevent duplicate entries */
74847+ if (!already_copied) {
74848+ o_tmp->nested->next = role->hash->first;
74849+ role->hash->first = o_tmp->nested;
74850+ }
74851+ }
74852+ }
74853+
74854+ return 0;
74855+}
74856+
74857+static __u32
74858+count_user_subjs(struct acl_subject_label *userp)
74859+{
74860+ struct acl_subject_label s_tmp;
74861+ __u32 num = 0;
74862+
74863+ while (userp) {
74864+ if (copy_acl_subject_label(&s_tmp, userp))
74865+ break;
74866+
74867+ userp = s_tmp.prev;
74868+ }
74869+
74870+ return num;
74871+}
74872+
74873+static int
74874+copy_user_allowedips(struct acl_role_label *rolep)
74875+{
74876+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74877+
74878+ ruserip = rolep->allowed_ips;
74879+
74880+ while (ruserip) {
74881+ rlast = rtmp;
74882+
74883+ if ((rtmp = (struct role_allowed_ip *)
74884+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74885+ return -ENOMEM;
74886+
74887+ if (copy_role_allowed_ip(rtmp, ruserip))
74888+ return -EFAULT;
74889+
74890+ ruserip = rtmp->prev;
74891+
74892+ if (!rlast) {
74893+ rtmp->prev = NULL;
74894+ rolep->allowed_ips = rtmp;
74895+ } else {
74896+ rlast->next = rtmp;
74897+ rtmp->prev = rlast;
74898+ }
74899+
74900+ if (!ruserip)
74901+ rtmp->next = NULL;
74902+ }
74903+
74904+ return 0;
74905+}
74906+
74907+static int
74908+copy_user_transitions(struct acl_role_label *rolep)
74909+{
74910+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74911+ int error;
74912+
74913+ rusertp = rolep->transitions;
74914+
74915+ while (rusertp) {
74916+ rlast = rtmp;
74917+
74918+ if ((rtmp = (struct role_transition *)
74919+ acl_alloc(sizeof (struct role_transition))) == NULL)
74920+ return -ENOMEM;
74921+
74922+ if (copy_role_transition(rtmp, rusertp))
74923+ return -EFAULT;
74924+
74925+ rusertp = rtmp->prev;
74926+
74927+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74928+ if (error)
74929+ return error;
74930+
74931+ if (!rlast) {
74932+ rtmp->prev = NULL;
74933+ rolep->transitions = rtmp;
74934+ } else {
74935+ rlast->next = rtmp;
74936+ rtmp->prev = rlast;
74937+ }
74938+
74939+ if (!rusertp)
74940+ rtmp->next = NULL;
74941+ }
74942+
74943+ return 0;
74944+}
74945+
74946+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74947+{
74948+ struct acl_object_label o_tmp;
74949+ __u32 num = 0;
74950+
74951+ while (userp) {
74952+ if (copy_acl_object_label(&o_tmp, userp))
74953+ break;
74954+
74955+ userp = o_tmp.prev;
74956+ num++;
74957+ }
74958+
74959+ return num;
74960+}
74961+
74962+static struct acl_subject_label *
74963+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74964+{
74965+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74966+ __u32 num_objs;
74967+ struct acl_ip_label **i_tmp, *i_utmp2;
74968+ struct gr_hash_struct ghash;
74969+ struct subject_map *subjmap;
74970+ unsigned int i_num;
74971+ int err;
74972+
74973+ if (already_copied != NULL)
74974+ *already_copied = 0;
74975+
74976+ s_tmp = lookup_subject_map(userp);
74977+
74978+ /* we've already copied this subject into the kernel, just return
74979+ the reference to it, and don't copy it over again
74980+ */
74981+ if (s_tmp) {
74982+ if (already_copied != NULL)
74983+ *already_copied = 1;
74984+ return(s_tmp);
74985+ }
74986+
74987+ if ((s_tmp = (struct acl_subject_label *)
74988+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74989+ return ERR_PTR(-ENOMEM);
74990+
74991+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74992+ if (subjmap == NULL)
74993+ return ERR_PTR(-ENOMEM);
74994+
74995+ subjmap->user = userp;
74996+ subjmap->kernel = s_tmp;
74997+ insert_subj_map_entry(subjmap);
74998+
74999+ if (copy_acl_subject_label(s_tmp, userp))
75000+ return ERR_PTR(-EFAULT);
75001+
75002+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75003+ if (err)
75004+ return ERR_PTR(err);
75005+
75006+ if (!strcmp(s_tmp->filename, "/"))
75007+ role->root_label = s_tmp;
75008+
75009+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
75010+ return ERR_PTR(-EFAULT);
75011+
75012+ /* copy user and group transition tables */
75013+
75014+ if (s_tmp->user_trans_num) {
75015+ uid_t *uidlist;
75016+
75017+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75018+ if (uidlist == NULL)
75019+ return ERR_PTR(-ENOMEM);
75020+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75021+ return ERR_PTR(-EFAULT);
75022+
75023+ s_tmp->user_transitions = uidlist;
75024+ }
75025+
75026+ if (s_tmp->group_trans_num) {
75027+ gid_t *gidlist;
75028+
75029+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75030+ if (gidlist == NULL)
75031+ return ERR_PTR(-ENOMEM);
75032+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75033+ return ERR_PTR(-EFAULT);
75034+
75035+ s_tmp->group_transitions = gidlist;
75036+ }
75037+
75038+ /* set up object hash table */
75039+ num_objs = count_user_objs(ghash.first);
75040+
75041+ s_tmp->obj_hash_size = num_objs;
75042+ s_tmp->obj_hash =
75043+ (struct acl_object_label **)
75044+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75045+
75046+ if (!s_tmp->obj_hash)
75047+ return ERR_PTR(-ENOMEM);
75048+
75049+ memset(s_tmp->obj_hash, 0,
75050+ s_tmp->obj_hash_size *
75051+ sizeof (struct acl_object_label *));
75052+
75053+ /* add in objects */
75054+ err = copy_user_objs(ghash.first, s_tmp, role);
75055+
75056+ if (err)
75057+ return ERR_PTR(err);
75058+
75059+ /* set pointer for parent subject */
75060+ if (s_tmp->parent_subject) {
75061+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75062+
75063+ if (IS_ERR(s_tmp2))
75064+ return s_tmp2;
75065+
75066+ s_tmp->parent_subject = s_tmp2;
75067+ }
75068+
75069+ /* add in ip acls */
75070+
75071+ if (!s_tmp->ip_num) {
75072+ s_tmp->ips = NULL;
75073+ goto insert;
75074+ }
75075+
75076+ i_tmp =
75077+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75078+ sizeof (struct acl_ip_label *));
75079+
75080+ if (!i_tmp)
75081+ return ERR_PTR(-ENOMEM);
75082+
75083+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75084+ *(i_tmp + i_num) =
75085+ (struct acl_ip_label *)
75086+ acl_alloc(sizeof (struct acl_ip_label));
75087+ if (!*(i_tmp + i_num))
75088+ return ERR_PTR(-ENOMEM);
75089+
75090+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75091+ return ERR_PTR(-EFAULT);
75092+
75093+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75094+ return ERR_PTR(-EFAULT);
75095+
75096+ if ((*(i_tmp + i_num))->iface == NULL)
75097+ continue;
75098+
75099+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75100+ if (err)
75101+ return ERR_PTR(err);
75102+ }
75103+
75104+ s_tmp->ips = i_tmp;
75105+
75106+insert:
75107+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75108+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75109+ return ERR_PTR(-ENOMEM);
75110+
75111+ return s_tmp;
75112+}
75113+
75114+static int
75115+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75116+{
75117+ struct acl_subject_label s_pre;
75118+ struct acl_subject_label * ret;
75119+ int err;
75120+
75121+ while (userp) {
75122+ if (copy_acl_subject_label(&s_pre, userp))
75123+ return -EFAULT;
75124+
75125+ ret = do_copy_user_subj(userp, role, NULL);
75126+
75127+ err = PTR_ERR(ret);
75128+ if (IS_ERR(ret))
75129+ return err;
75130+
75131+ insert_acl_subj_label(ret, role);
75132+
75133+ userp = s_pre.prev;
75134+ }
75135+
75136+ return 0;
75137+}
75138+
75139+static int
75140+copy_user_acl(struct gr_arg *arg)
75141+{
75142+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75143+ struct acl_subject_label *subj_list;
75144+ struct sprole_pw *sptmp;
75145+ struct gr_hash_struct *ghash;
75146+ uid_t *domainlist;
75147+ unsigned int r_num;
75148+ int err = 0;
75149+ __u16 i;
75150+ __u32 num_subjs;
75151+
75152+ /* we need a default and kernel role */
75153+ if (arg->role_db.num_roles < 2)
75154+ return -EINVAL;
75155+
75156+ /* copy special role authentication info from userspace */
75157+
75158+ polstate->num_sprole_pws = arg->num_sprole_pws;
75159+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75160+
75161+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75162+ return -ENOMEM;
75163+
75164+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75165+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75166+ if (!sptmp)
75167+ return -ENOMEM;
75168+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75169+ return -EFAULT;
75170+
75171+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75172+ if (err)
75173+ return err;
75174+
75175+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75176+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75177+#endif
75178+
75179+ polstate->acl_special_roles[i] = sptmp;
75180+ }
75181+
75182+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75183+
75184+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75185+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75186+
75187+ if (!r_tmp)
75188+ return -ENOMEM;
75189+
75190+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75191+ return -EFAULT;
75192+
75193+ if (copy_acl_role_label(r_tmp, r_utmp2))
75194+ return -EFAULT;
75195+
75196+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75197+ if (err)
75198+ return err;
75199+
75200+ if (!strcmp(r_tmp->rolename, "default")
75201+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75202+ polstate->default_role = r_tmp;
75203+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75204+ polstate->kernel_role = r_tmp;
75205+ }
75206+
75207+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75208+ return -ENOMEM;
75209+
75210+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75211+ return -EFAULT;
75212+
75213+ r_tmp->hash = ghash;
75214+
75215+ num_subjs = count_user_subjs(r_tmp->hash->first);
75216+
75217+ r_tmp->subj_hash_size = num_subjs;
75218+ r_tmp->subj_hash =
75219+ (struct acl_subject_label **)
75220+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75221+
75222+ if (!r_tmp->subj_hash)
75223+ return -ENOMEM;
75224+
75225+ err = copy_user_allowedips(r_tmp);
75226+ if (err)
75227+ return err;
75228+
75229+ /* copy domain info */
75230+ if (r_tmp->domain_children != NULL) {
75231+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75232+ if (domainlist == NULL)
75233+ return -ENOMEM;
75234+
75235+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75236+ return -EFAULT;
75237+
75238+ r_tmp->domain_children = domainlist;
75239+ }
75240+
75241+ err = copy_user_transitions(r_tmp);
75242+ if (err)
75243+ return err;
75244+
75245+ memset(r_tmp->subj_hash, 0,
75246+ r_tmp->subj_hash_size *
75247+ sizeof (struct acl_subject_label *));
75248+
75249+ /* acquire the list of subjects, then NULL out
75250+ the list prior to parsing the subjects for this role,
75251+ as during this parsing the list is replaced with a list
75252+ of *nested* subjects for the role
75253+ */
75254+ subj_list = r_tmp->hash->first;
75255+
75256+ /* set nested subject list to null */
75257+ r_tmp->hash->first = NULL;
75258+
75259+ err = copy_user_subjs(subj_list, r_tmp);
75260+
75261+ if (err)
75262+ return err;
75263+
75264+ insert_acl_role_label(r_tmp);
75265+ }
75266+
75267+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
75268+ return -EINVAL;
75269+
75270+ return err;
75271+}
75272+
75273+static int gracl_reload_apply_policies(void *reload)
75274+{
75275+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
75276+ struct task_struct *task, *task2;
75277+ struct acl_role_label *role, *rtmp;
75278+ struct acl_subject_label *subj;
75279+ const struct cred *cred;
75280+ int role_applied;
75281+ int ret = 0;
75282+
75283+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
75284+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
75285+
75286+ /* first make sure we'll be able to apply the new policy cleanly */
75287+ do_each_thread(task2, task) {
75288+ if (task->exec_file == NULL)
75289+ continue;
75290+ role_applied = 0;
75291+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75292+ /* preserve special roles */
75293+ FOR_EACH_ROLE_START(role)
75294+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75295+ rtmp = task->role;
75296+ task->role = role;
75297+ role_applied = 1;
75298+ break;
75299+ }
75300+ FOR_EACH_ROLE_END(role)
75301+ }
75302+ if (!role_applied) {
75303+ cred = __task_cred(task);
75304+ rtmp = task->role;
75305+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75306+ }
75307+ /* this handles non-nested inherited subjects, nested subjects will still
75308+ be dropped currently */
75309+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
75310+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
75311+ /* change the role back so that we've made no modifications to the policy */
75312+ task->role = rtmp;
75313+
75314+ if (subj == NULL || task->tmpacl == NULL) {
75315+ ret = -EINVAL;
75316+ goto out;
75317+ }
75318+ } while_each_thread(task2, task);
75319+
75320+ /* now actually apply the policy */
75321+
75322+ do_each_thread(task2, task) {
75323+ if (task->exec_file) {
75324+ role_applied = 0;
75325+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75326+ /* preserve special roles */
75327+ FOR_EACH_ROLE_START(role)
75328+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75329+ task->role = role;
75330+ role_applied = 1;
75331+ break;
75332+ }
75333+ FOR_EACH_ROLE_END(role)
75334+ }
75335+ if (!role_applied) {
75336+ cred = __task_cred(task);
75337+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75338+ }
75339+ /* this handles non-nested inherited subjects, nested subjects will still
75340+ be dropped currently */
75341+ if (!reload_state->oldmode && task->inherited)
75342+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
75343+ else {
75344+ /* looked up and tagged to the task previously */
75345+ subj = task->tmpacl;
75346+ }
75347+ /* subj will be non-null */
75348+ __gr_apply_subject_to_task(polstate, task, subj);
75349+ if (reload_state->oldmode) {
75350+ task->acl_role_id = 0;
75351+ task->acl_sp_role = 0;
75352+ task->inherited = 0;
75353+ }
75354+ } else {
75355+ // it's a kernel process
75356+ task->role = polstate->kernel_role;
75357+ task->acl = polstate->kernel_role->root_label;
75358+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75359+ task->acl->mode &= ~GR_PROCFIND;
75360+#endif
75361+ }
75362+ } while_each_thread(task2, task);
75363+
75364+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75365+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75366+
75367+out:
75368+
75369+ return ret;
75370+}
75371+
75372+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75373+{
75374+ struct gr_reload_state new_reload_state = { };
75375+ int err;
75376+
75377+ new_reload_state.oldpolicy_ptr = polstate;
75378+ new_reload_state.oldalloc_ptr = current_alloc_state;
75379+ new_reload_state.oldmode = oldmode;
75380+
75381+ current_alloc_state = &new_reload_state.newalloc;
75382+ polstate = &new_reload_state.newpolicy;
75383+
75384+ /* everything relevant is now saved off, copy in the new policy */
75385+ if (init_variables(args, true)) {
75386+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75387+ err = -ENOMEM;
75388+ goto error;
75389+ }
75390+
75391+ err = copy_user_acl(args);
75392+ free_init_variables();
75393+ if (err)
75394+ goto error;
75395+ /* the new policy is copied in, with the old policy available via saved_state
75396+ first go through applying roles, making sure to preserve special roles
75397+ then apply new subjects, making sure to preserve inherited and nested subjects,
75398+ though currently only inherited subjects will be preserved
75399+ */
75400+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75401+ if (err)
75402+ goto error;
75403+
75404+ /* we've now applied the new policy, so restore the old policy state to free it */
75405+ polstate = &new_reload_state.oldpolicy;
75406+ current_alloc_state = &new_reload_state.oldalloc;
75407+ free_variables(true);
75408+
75409+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75410+ to running_polstate/current_alloc_state inside stop_machine
75411+ */
75412+ err = 0;
75413+ goto out;
75414+error:
75415+ /* on error of loading the new policy, we'll just keep the previous
75416+ policy set around
75417+ */
75418+ free_variables(true);
75419+
75420+ /* doesn't affect runtime, but maintains consistent state */
75421+out:
75422+ polstate = new_reload_state.oldpolicy_ptr;
75423+ current_alloc_state = new_reload_state.oldalloc_ptr;
75424+
75425+ return err;
75426+}
75427+
75428+static int
75429+gracl_init(struct gr_arg *args)
75430+{
75431+ int error = 0;
75432+
75433+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75434+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75435+
75436+ if (init_variables(args, false)) {
75437+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75438+ error = -ENOMEM;
75439+ goto out;
75440+ }
75441+
75442+ error = copy_user_acl(args);
75443+ free_init_variables();
75444+ if (error)
75445+ goto out;
75446+
75447+ error = gr_set_acls(0);
75448+ if (error)
75449+ goto out;
75450+
75451+ gr_enable_rbac_system();
75452+
75453+ return 0;
75454+
75455+out:
75456+ free_variables(false);
75457+ return error;
75458+}
75459+
75460+static int
75461+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75462+ unsigned char **sum)
75463+{
75464+ struct acl_role_label *r;
75465+ struct role_allowed_ip *ipp;
75466+ struct role_transition *trans;
75467+ unsigned int i;
75468+ int found = 0;
75469+ u32 curr_ip = current->signal->curr_ip;
75470+
75471+ current->signal->saved_ip = curr_ip;
75472+
75473+ /* check transition table */
75474+
75475+ for (trans = current->role->transitions; trans; trans = trans->next) {
75476+ if (!strcmp(rolename, trans->rolename)) {
75477+ found = 1;
75478+ break;
75479+ }
75480+ }
75481+
75482+ if (!found)
75483+ return 0;
75484+
75485+ /* handle special roles that do not require authentication
75486+ and check ip */
75487+
75488+ FOR_EACH_ROLE_START(r)
75489+ if (!strcmp(rolename, r->rolename) &&
75490+ (r->roletype & GR_ROLE_SPECIAL)) {
75491+ found = 0;
75492+ if (r->allowed_ips != NULL) {
75493+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75494+ if ((ntohl(curr_ip) & ipp->netmask) ==
75495+ (ntohl(ipp->addr) & ipp->netmask))
75496+ found = 1;
75497+ }
75498+ } else
75499+ found = 2;
75500+ if (!found)
75501+ return 0;
75502+
75503+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75504+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75505+ *salt = NULL;
75506+ *sum = NULL;
75507+ return 1;
75508+ }
75509+ }
75510+ FOR_EACH_ROLE_END(r)
75511+
75512+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75513+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75514+ *salt = polstate->acl_special_roles[i]->salt;
75515+ *sum = polstate->acl_special_roles[i]->sum;
75516+ return 1;
75517+ }
75518+ }
75519+
75520+ return 0;
75521+}
75522+
75523+int gr_check_secure_terminal(struct task_struct *task)
75524+{
75525+ struct task_struct *p, *p2, *p3;
75526+ struct files_struct *files;
75527+ struct fdtable *fdt;
75528+ struct file *our_file = NULL, *file;
75529+ int i;
75530+
75531+ if (task->signal->tty == NULL)
75532+ return 1;
75533+
75534+ files = get_files_struct(task);
75535+ if (files != NULL) {
75536+ rcu_read_lock();
75537+ fdt = files_fdtable(files);
75538+ for (i=0; i < fdt->max_fds; i++) {
75539+ file = fcheck_files(files, i);
75540+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75541+ get_file(file);
75542+ our_file = file;
75543+ }
75544+ }
75545+ rcu_read_unlock();
75546+ put_files_struct(files);
75547+ }
75548+
75549+ if (our_file == NULL)
75550+ return 1;
75551+
75552+ read_lock(&tasklist_lock);
75553+ do_each_thread(p2, p) {
75554+ files = get_files_struct(p);
75555+ if (files == NULL ||
75556+ (p->signal && p->signal->tty == task->signal->tty)) {
75557+ if (files != NULL)
75558+ put_files_struct(files);
75559+ continue;
75560+ }
75561+ rcu_read_lock();
75562+ fdt = files_fdtable(files);
75563+ for (i=0; i < fdt->max_fds; i++) {
75564+ file = fcheck_files(files, i);
75565+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75566+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75567+ p3 = task;
75568+ while (task_pid_nr(p3) > 0) {
75569+ if (p3 == p)
75570+ break;
75571+ p3 = p3->real_parent;
75572+ }
75573+ if (p3 == p)
75574+ break;
75575+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75576+ gr_handle_alertkill(p);
75577+ rcu_read_unlock();
75578+ put_files_struct(files);
75579+ read_unlock(&tasklist_lock);
75580+ fput(our_file);
75581+ return 0;
75582+ }
75583+ }
75584+ rcu_read_unlock();
75585+ put_files_struct(files);
75586+ } while_each_thread(p2, p);
75587+ read_unlock(&tasklist_lock);
75588+
75589+ fput(our_file);
75590+ return 1;
75591+}
75592+
75593+ssize_t
75594+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75595+{
75596+ struct gr_arg_wrapper uwrap;
75597+ unsigned char *sprole_salt = NULL;
75598+ unsigned char *sprole_sum = NULL;
75599+ int error = 0;
75600+ int error2 = 0;
75601+ size_t req_count = 0;
75602+ unsigned char oldmode = 0;
75603+
75604+ mutex_lock(&gr_dev_mutex);
75605+
75606+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75607+ error = -EPERM;
75608+ goto out;
75609+ }
75610+
75611+#ifdef CONFIG_COMPAT
75612+ pax_open_kernel();
75613+ if (is_compat_task()) {
75614+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75615+ copy_gr_arg = &copy_gr_arg_compat;
75616+ copy_acl_object_label = &copy_acl_object_label_compat;
75617+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75618+ copy_acl_role_label = &copy_acl_role_label_compat;
75619+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75620+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75621+ copy_role_transition = &copy_role_transition_compat;
75622+ copy_sprole_pw = &copy_sprole_pw_compat;
75623+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75624+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75625+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75626+ } else {
75627+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75628+ copy_gr_arg = &copy_gr_arg_normal;
75629+ copy_acl_object_label = &copy_acl_object_label_normal;
75630+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75631+ copy_acl_role_label = &copy_acl_role_label_normal;
75632+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75633+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75634+ copy_role_transition = &copy_role_transition_normal;
75635+ copy_sprole_pw = &copy_sprole_pw_normal;
75636+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75637+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75638+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75639+ }
75640+ pax_close_kernel();
75641+#endif
75642+
75643+ req_count = get_gr_arg_wrapper_size();
75644+
75645+ if (count != req_count) {
75646+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75647+ error = -EINVAL;
75648+ goto out;
75649+ }
75650+
75651+
75652+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75653+ gr_auth_expires = 0;
75654+ gr_auth_attempts = 0;
75655+ }
75656+
75657+ error = copy_gr_arg_wrapper(buf, &uwrap);
75658+ if (error)
75659+ goto out;
75660+
75661+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75662+ if (error)
75663+ goto out;
75664+
75665+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75666+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75667+ time_after(gr_auth_expires, get_seconds())) {
75668+ error = -EBUSY;
75669+ goto out;
75670+ }
75671+
75672+ /* if non-root trying to do anything other than use a special role,
75673+ do not attempt authentication, do not count towards authentication
75674+ locking
75675+ */
75676+
75677+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75678+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75679+ gr_is_global_nonroot(current_uid())) {
75680+ error = -EPERM;
75681+ goto out;
75682+ }
75683+
75684+ /* ensure pw and special role name are null terminated */
75685+
75686+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75687+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75688+
75689+ /* Okay.
75690+ * We have our enough of the argument structure..(we have yet
75691+ * to copy_from_user the tables themselves) . Copy the tables
75692+ * only if we need them, i.e. for loading operations. */
75693+
75694+ switch (gr_usermode->mode) {
75695+ case GR_STATUS:
75696+ if (gr_acl_is_enabled()) {
75697+ error = 1;
75698+ if (!gr_check_secure_terminal(current))
75699+ error = 3;
75700+ } else
75701+ error = 2;
75702+ goto out;
75703+ case GR_SHUTDOWN:
75704+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75705+ stop_machine(gr_rbac_disable, NULL, NULL);
75706+ free_variables(false);
75707+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75708+ memset(gr_system_salt, 0, GR_SALT_LEN);
75709+ memset(gr_system_sum, 0, GR_SHA_LEN);
75710+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75711+ } else if (gr_acl_is_enabled()) {
75712+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75713+ error = -EPERM;
75714+ } else {
75715+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75716+ error = -EAGAIN;
75717+ }
75718+ break;
75719+ case GR_ENABLE:
75720+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75721+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75722+ else {
75723+ if (gr_acl_is_enabled())
75724+ error = -EAGAIN;
75725+ else
75726+ error = error2;
75727+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75728+ }
75729+ break;
75730+ case GR_OLDRELOAD:
75731+ oldmode = 1;
75732+ case GR_RELOAD:
75733+ if (!gr_acl_is_enabled()) {
75734+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75735+ error = -EAGAIN;
75736+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75737+ error2 = gracl_reload(gr_usermode, oldmode);
75738+ if (!error2)
75739+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75740+ else {
75741+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75742+ error = error2;
75743+ }
75744+ } else {
75745+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75746+ error = -EPERM;
75747+ }
75748+ break;
75749+ case GR_SEGVMOD:
75750+ if (unlikely(!gr_acl_is_enabled())) {
75751+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75752+ error = -EAGAIN;
75753+ break;
75754+ }
75755+
75756+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75757+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75758+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75759+ struct acl_subject_label *segvacl;
75760+ segvacl =
75761+ lookup_acl_subj_label(gr_usermode->segv_inode,
75762+ gr_usermode->segv_device,
75763+ current->role);
75764+ if (segvacl) {
75765+ segvacl->crashes = 0;
75766+ segvacl->expires = 0;
75767+ }
75768+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75769+ gr_remove_uid(gr_usermode->segv_uid);
75770+ }
75771+ } else {
75772+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75773+ error = -EPERM;
75774+ }
75775+ break;
75776+ case GR_SPROLE:
75777+ case GR_SPROLEPAM:
75778+ if (unlikely(!gr_acl_is_enabled())) {
75779+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75780+ error = -EAGAIN;
75781+ break;
75782+ }
75783+
75784+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75785+ current->role->expires = 0;
75786+ current->role->auth_attempts = 0;
75787+ }
75788+
75789+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75790+ time_after(current->role->expires, get_seconds())) {
75791+ error = -EBUSY;
75792+ goto out;
75793+ }
75794+
75795+ if (lookup_special_role_auth
75796+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75797+ && ((!sprole_salt && !sprole_sum)
75798+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75799+ char *p = "";
75800+ assign_special_role(gr_usermode->sp_role);
75801+ read_lock(&tasklist_lock);
75802+ if (current->real_parent)
75803+ p = current->real_parent->role->rolename;
75804+ read_unlock(&tasklist_lock);
75805+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75806+ p, acl_sp_role_value);
75807+ } else {
75808+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75809+ error = -EPERM;
75810+ if(!(current->role->auth_attempts++))
75811+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75812+
75813+ goto out;
75814+ }
75815+ break;
75816+ case GR_UNSPROLE:
75817+ if (unlikely(!gr_acl_is_enabled())) {
75818+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75819+ error = -EAGAIN;
75820+ break;
75821+ }
75822+
75823+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75824+ char *p = "";
75825+ int i = 0;
75826+
75827+ read_lock(&tasklist_lock);
75828+ if (current->real_parent) {
75829+ p = current->real_parent->role->rolename;
75830+ i = current->real_parent->acl_role_id;
75831+ }
75832+ read_unlock(&tasklist_lock);
75833+
75834+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75835+ gr_set_acls(1);
75836+ } else {
75837+ error = -EPERM;
75838+ goto out;
75839+ }
75840+ break;
75841+ default:
75842+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75843+ error = -EINVAL;
75844+ break;
75845+ }
75846+
75847+ if (error != -EPERM)
75848+ goto out;
75849+
75850+ if(!(gr_auth_attempts++))
75851+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75852+
75853+ out:
75854+ mutex_unlock(&gr_dev_mutex);
75855+
75856+ if (!error)
75857+ error = req_count;
75858+
75859+ return error;
75860+}
75861+
75862+int
75863+gr_set_acls(const int type)
75864+{
75865+ struct task_struct *task, *task2;
75866+ struct acl_role_label *role = current->role;
75867+ struct acl_subject_label *subj;
75868+ __u16 acl_role_id = current->acl_role_id;
75869+ const struct cred *cred;
75870+ int ret;
75871+
75872+ rcu_read_lock();
75873+ read_lock(&tasklist_lock);
75874+ read_lock(&grsec_exec_file_lock);
75875+ do_each_thread(task2, task) {
75876+ /* check to see if we're called from the exit handler,
75877+ if so, only replace ACLs that have inherited the admin
75878+ ACL */
75879+
75880+ if (type && (task->role != role ||
75881+ task->acl_role_id != acl_role_id))
75882+ continue;
75883+
75884+ task->acl_role_id = 0;
75885+ task->acl_sp_role = 0;
75886+ task->inherited = 0;
75887+
75888+ if (task->exec_file) {
75889+ cred = __task_cred(task);
75890+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75891+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75892+ if (subj == NULL) {
75893+ ret = -EINVAL;
75894+ read_unlock(&grsec_exec_file_lock);
75895+ read_unlock(&tasklist_lock);
75896+ rcu_read_unlock();
75897+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75898+ return ret;
75899+ }
75900+ __gr_apply_subject_to_task(polstate, task, subj);
75901+ } else {
75902+ // it's a kernel process
75903+ task->role = polstate->kernel_role;
75904+ task->acl = polstate->kernel_role->root_label;
75905+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75906+ task->acl->mode &= ~GR_PROCFIND;
75907+#endif
75908+ }
75909+ } while_each_thread(task2, task);
75910+ read_unlock(&grsec_exec_file_lock);
75911+ read_unlock(&tasklist_lock);
75912+ rcu_read_unlock();
75913+
75914+ return 0;
75915+}
75916diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75917new file mode 100644
75918index 0000000..39645c9
75919--- /dev/null
75920+++ b/grsecurity/gracl_res.c
75921@@ -0,0 +1,68 @@
75922+#include <linux/kernel.h>
75923+#include <linux/sched.h>
75924+#include <linux/gracl.h>
75925+#include <linux/grinternal.h>
75926+
75927+static const char *restab_log[] = {
75928+ [RLIMIT_CPU] = "RLIMIT_CPU",
75929+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75930+ [RLIMIT_DATA] = "RLIMIT_DATA",
75931+ [RLIMIT_STACK] = "RLIMIT_STACK",
75932+ [RLIMIT_CORE] = "RLIMIT_CORE",
75933+ [RLIMIT_RSS] = "RLIMIT_RSS",
75934+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75935+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75936+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75937+ [RLIMIT_AS] = "RLIMIT_AS",
75938+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75939+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75940+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75941+ [RLIMIT_NICE] = "RLIMIT_NICE",
75942+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75943+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75944+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75945+};
75946+
75947+void
75948+gr_log_resource(const struct task_struct *task,
75949+ const int res, const unsigned long wanted, const int gt)
75950+{
75951+ const struct cred *cred;
75952+ unsigned long rlim;
75953+
75954+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75955+ return;
75956+
75957+ // not yet supported resource
75958+ if (unlikely(!restab_log[res]))
75959+ return;
75960+
75961+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75962+ rlim = task_rlimit_max(task, res);
75963+ else
75964+ rlim = task_rlimit(task, res);
75965+
75966+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75967+ return;
75968+
75969+ rcu_read_lock();
75970+ cred = __task_cred(task);
75971+
75972+ if (res == RLIMIT_NPROC &&
75973+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75974+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75975+ goto out_rcu_unlock;
75976+ else if (res == RLIMIT_MEMLOCK &&
75977+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75978+ goto out_rcu_unlock;
75979+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75980+ goto out_rcu_unlock;
75981+ rcu_read_unlock();
75982+
75983+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75984+
75985+ return;
75986+out_rcu_unlock:
75987+ rcu_read_unlock();
75988+ return;
75989+}
75990diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75991new file mode 100644
75992index 0000000..218b66b
75993--- /dev/null
75994+++ b/grsecurity/gracl_segv.c
75995@@ -0,0 +1,324 @@
75996+#include <linux/kernel.h>
75997+#include <linux/mm.h>
75998+#include <asm/uaccess.h>
75999+#include <asm/errno.h>
76000+#include <asm/mman.h>
76001+#include <net/sock.h>
76002+#include <linux/file.h>
76003+#include <linux/fs.h>
76004+#include <linux/net.h>
76005+#include <linux/in.h>
76006+#include <linux/slab.h>
76007+#include <linux/types.h>
76008+#include <linux/sched.h>
76009+#include <linux/timer.h>
76010+#include <linux/gracl.h>
76011+#include <linux/grsecurity.h>
76012+#include <linux/grinternal.h>
76013+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76014+#include <linux/magic.h>
76015+#include <linux/pagemap.h>
76016+#include "../fs/btrfs/async-thread.h"
76017+#include "../fs/btrfs/ctree.h"
76018+#include "../fs/btrfs/btrfs_inode.h"
76019+#endif
76020+
76021+static struct crash_uid *uid_set;
76022+static unsigned short uid_used;
76023+static DEFINE_SPINLOCK(gr_uid_lock);
76024+extern rwlock_t gr_inode_lock;
76025+extern struct acl_subject_label *
76026+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
76027+ struct acl_role_label *role);
76028+
76029+static inline dev_t __get_dev(const struct dentry *dentry)
76030+{
76031+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76032+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76033+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76034+ else
76035+#endif
76036+ return dentry->d_sb->s_dev;
76037+}
76038+
76039+static inline u64 __get_ino(const struct dentry *dentry)
76040+{
76041+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76042+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76043+ return btrfs_ino(dentry->d_inode);
76044+ else
76045+#endif
76046+ return dentry->d_inode->i_ino;
76047+}
76048+
76049+int
76050+gr_init_uidset(void)
76051+{
76052+ uid_set =
76053+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76054+ uid_used = 0;
76055+
76056+ return uid_set ? 1 : 0;
76057+}
76058+
76059+void
76060+gr_free_uidset(void)
76061+{
76062+ if (uid_set) {
76063+ struct crash_uid *tmpset;
76064+ spin_lock(&gr_uid_lock);
76065+ tmpset = uid_set;
76066+ uid_set = NULL;
76067+ uid_used = 0;
76068+ spin_unlock(&gr_uid_lock);
76069+ if (tmpset)
76070+ kfree(tmpset);
76071+ }
76072+
76073+ return;
76074+}
76075+
76076+int
76077+gr_find_uid(const uid_t uid)
76078+{
76079+ struct crash_uid *tmp = uid_set;
76080+ uid_t buid;
76081+ int low = 0, high = uid_used - 1, mid;
76082+
76083+ while (high >= low) {
76084+ mid = (low + high) >> 1;
76085+ buid = tmp[mid].uid;
76086+ if (buid == uid)
76087+ return mid;
76088+ if (buid > uid)
76089+ high = mid - 1;
76090+ if (buid < uid)
76091+ low = mid + 1;
76092+ }
76093+
76094+ return -1;
76095+}
76096+
76097+static __inline__ void
76098+gr_insertsort(void)
76099+{
76100+ unsigned short i, j;
76101+ struct crash_uid index;
76102+
76103+ for (i = 1; i < uid_used; i++) {
76104+ index = uid_set[i];
76105+ j = i;
76106+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76107+ uid_set[j] = uid_set[j - 1];
76108+ j--;
76109+ }
76110+ uid_set[j] = index;
76111+ }
76112+
76113+ return;
76114+}
76115+
76116+static __inline__ void
76117+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76118+{
76119+ int loc;
76120+ uid_t uid = GR_GLOBAL_UID(kuid);
76121+
76122+ if (uid_used == GR_UIDTABLE_MAX)
76123+ return;
76124+
76125+ loc = gr_find_uid(uid);
76126+
76127+ if (loc >= 0) {
76128+ uid_set[loc].expires = expires;
76129+ return;
76130+ }
76131+
76132+ uid_set[uid_used].uid = uid;
76133+ uid_set[uid_used].expires = expires;
76134+ uid_used++;
76135+
76136+ gr_insertsort();
76137+
76138+ return;
76139+}
76140+
76141+void
76142+gr_remove_uid(const unsigned short loc)
76143+{
76144+ unsigned short i;
76145+
76146+ for (i = loc + 1; i < uid_used; i++)
76147+ uid_set[i - 1] = uid_set[i];
76148+
76149+ uid_used--;
76150+
76151+ return;
76152+}
76153+
76154+int
76155+gr_check_crash_uid(const kuid_t kuid)
76156+{
76157+ int loc;
76158+ int ret = 0;
76159+ uid_t uid;
76160+
76161+ if (unlikely(!gr_acl_is_enabled()))
76162+ return 0;
76163+
76164+ uid = GR_GLOBAL_UID(kuid);
76165+
76166+ spin_lock(&gr_uid_lock);
76167+ loc = gr_find_uid(uid);
76168+
76169+ if (loc < 0)
76170+ goto out_unlock;
76171+
76172+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76173+ gr_remove_uid(loc);
76174+ else
76175+ ret = 1;
76176+
76177+out_unlock:
76178+ spin_unlock(&gr_uid_lock);
76179+ return ret;
76180+}
76181+
76182+static __inline__ int
76183+proc_is_setxid(const struct cred *cred)
76184+{
76185+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76186+ !uid_eq(cred->uid, cred->fsuid))
76187+ return 1;
76188+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76189+ !gid_eq(cred->gid, cred->fsgid))
76190+ return 1;
76191+
76192+ return 0;
76193+}
76194+
76195+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76196+
76197+void
76198+gr_handle_crash(struct task_struct *task, const int sig)
76199+{
76200+ struct acl_subject_label *curr;
76201+ struct task_struct *tsk, *tsk2;
76202+ const struct cred *cred;
76203+ const struct cred *cred2;
76204+
76205+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76206+ return;
76207+
76208+ if (unlikely(!gr_acl_is_enabled()))
76209+ return;
76210+
76211+ curr = task->acl;
76212+
76213+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76214+ return;
76215+
76216+ if (time_before_eq(curr->expires, get_seconds())) {
76217+ curr->expires = 0;
76218+ curr->crashes = 0;
76219+ }
76220+
76221+ curr->crashes++;
76222+
76223+ if (!curr->expires)
76224+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76225+
76226+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76227+ time_after(curr->expires, get_seconds())) {
76228+ rcu_read_lock();
76229+ cred = __task_cred(task);
76230+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76231+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76232+ spin_lock(&gr_uid_lock);
76233+ gr_insert_uid(cred->uid, curr->expires);
76234+ spin_unlock(&gr_uid_lock);
76235+ curr->expires = 0;
76236+ curr->crashes = 0;
76237+ read_lock(&tasklist_lock);
76238+ do_each_thread(tsk2, tsk) {
76239+ cred2 = __task_cred(tsk);
76240+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76241+ gr_fake_force_sig(SIGKILL, tsk);
76242+ } while_each_thread(tsk2, tsk);
76243+ read_unlock(&tasklist_lock);
76244+ } else {
76245+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76246+ read_lock(&tasklist_lock);
76247+ read_lock(&grsec_exec_file_lock);
76248+ do_each_thread(tsk2, tsk) {
76249+ if (likely(tsk != task)) {
76250+ // if this thread has the same subject as the one that triggered
76251+ // RES_CRASH and it's the same binary, kill it
76252+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76253+ gr_fake_force_sig(SIGKILL, tsk);
76254+ }
76255+ } while_each_thread(tsk2, tsk);
76256+ read_unlock(&grsec_exec_file_lock);
76257+ read_unlock(&tasklist_lock);
76258+ }
76259+ rcu_read_unlock();
76260+ }
76261+
76262+ return;
76263+}
76264+
76265+int
76266+gr_check_crash_exec(const struct file *filp)
76267+{
76268+ struct acl_subject_label *curr;
76269+ struct dentry *dentry;
76270+
76271+ if (unlikely(!gr_acl_is_enabled()))
76272+ return 0;
76273+
76274+ read_lock(&gr_inode_lock);
76275+ dentry = filp->f_path.dentry;
76276+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
76277+ current->role);
76278+ read_unlock(&gr_inode_lock);
76279+
76280+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
76281+ (!curr->crashes && !curr->expires))
76282+ return 0;
76283+
76284+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76285+ time_after(curr->expires, get_seconds()))
76286+ return 1;
76287+ else if (time_before_eq(curr->expires, get_seconds())) {
76288+ curr->crashes = 0;
76289+ curr->expires = 0;
76290+ }
76291+
76292+ return 0;
76293+}
76294+
76295+void
76296+gr_handle_alertkill(struct task_struct *task)
76297+{
76298+ struct acl_subject_label *curracl;
76299+ __u32 curr_ip;
76300+ struct task_struct *p, *p2;
76301+
76302+ if (unlikely(!gr_acl_is_enabled()))
76303+ return;
76304+
76305+ curracl = task->acl;
76306+ curr_ip = task->signal->curr_ip;
76307+
76308+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
76309+ read_lock(&tasklist_lock);
76310+ do_each_thread(p2, p) {
76311+ if (p->signal->curr_ip == curr_ip)
76312+ gr_fake_force_sig(SIGKILL, p);
76313+ } while_each_thread(p2, p);
76314+ read_unlock(&tasklist_lock);
76315+ } else if (curracl->mode & GR_KILLPROC)
76316+ gr_fake_force_sig(SIGKILL, task);
76317+
76318+ return;
76319+}
76320diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
76321new file mode 100644
76322index 0000000..6b0c9cc
76323--- /dev/null
76324+++ b/grsecurity/gracl_shm.c
76325@@ -0,0 +1,40 @@
76326+#include <linux/kernel.h>
76327+#include <linux/mm.h>
76328+#include <linux/sched.h>
76329+#include <linux/file.h>
76330+#include <linux/ipc.h>
76331+#include <linux/gracl.h>
76332+#include <linux/grsecurity.h>
76333+#include <linux/grinternal.h>
76334+
76335+int
76336+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76337+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76338+{
76339+ struct task_struct *task;
76340+
76341+ if (!gr_acl_is_enabled())
76342+ return 1;
76343+
76344+ rcu_read_lock();
76345+ read_lock(&tasklist_lock);
76346+
76347+ task = find_task_by_vpid(shm_cprid);
76348+
76349+ if (unlikely(!task))
76350+ task = find_task_by_vpid(shm_lapid);
76351+
76352+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76353+ (task_pid_nr(task) == shm_lapid)) &&
76354+ (task->acl->mode & GR_PROTSHM) &&
76355+ (task->acl != current->acl))) {
76356+ read_unlock(&tasklist_lock);
76357+ rcu_read_unlock();
76358+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76359+ return 0;
76360+ }
76361+ read_unlock(&tasklist_lock);
76362+ rcu_read_unlock();
76363+
76364+ return 1;
76365+}
76366diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76367new file mode 100644
76368index 0000000..bc0be01
76369--- /dev/null
76370+++ b/grsecurity/grsec_chdir.c
76371@@ -0,0 +1,19 @@
76372+#include <linux/kernel.h>
76373+#include <linux/sched.h>
76374+#include <linux/fs.h>
76375+#include <linux/file.h>
76376+#include <linux/grsecurity.h>
76377+#include <linux/grinternal.h>
76378+
76379+void
76380+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76381+{
76382+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76383+ if ((grsec_enable_chdir && grsec_enable_group &&
76384+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76385+ !grsec_enable_group)) {
76386+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76387+ }
76388+#endif
76389+ return;
76390+}
76391diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76392new file mode 100644
76393index 0000000..114ea4f
76394--- /dev/null
76395+++ b/grsecurity/grsec_chroot.c
76396@@ -0,0 +1,467 @@
76397+#include <linux/kernel.h>
76398+#include <linux/module.h>
76399+#include <linux/sched.h>
76400+#include <linux/file.h>
76401+#include <linux/fs.h>
76402+#include <linux/mount.h>
76403+#include <linux/types.h>
76404+#include "../fs/mount.h"
76405+#include <linux/grsecurity.h>
76406+#include <linux/grinternal.h>
76407+
76408+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76409+int gr_init_ran;
76410+#endif
76411+
76412+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76413+{
76414+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76415+ struct dentry *tmpd = dentry;
76416+
76417+ read_seqlock_excl(&mount_lock);
76418+ write_seqlock(&rename_lock);
76419+
76420+ while (tmpd != mnt->mnt_root) {
76421+ atomic_inc(&tmpd->chroot_refcnt);
76422+ tmpd = tmpd->d_parent;
76423+ }
76424+ atomic_inc(&tmpd->chroot_refcnt);
76425+
76426+ write_sequnlock(&rename_lock);
76427+ read_sequnlock_excl(&mount_lock);
76428+#endif
76429+}
76430+
76431+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76432+{
76433+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76434+ struct dentry *tmpd = dentry;
76435+
76436+ read_seqlock_excl(&mount_lock);
76437+ write_seqlock(&rename_lock);
76438+
76439+ while (tmpd != mnt->mnt_root) {
76440+ atomic_dec(&tmpd->chroot_refcnt);
76441+ tmpd = tmpd->d_parent;
76442+ }
76443+ atomic_dec(&tmpd->chroot_refcnt);
76444+
76445+ write_sequnlock(&rename_lock);
76446+ read_sequnlock_excl(&mount_lock);
76447+#endif
76448+}
76449+
76450+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76451+static struct dentry *get_closest_chroot(struct dentry *dentry)
76452+{
76453+ write_seqlock(&rename_lock);
76454+ do {
76455+ if (atomic_read(&dentry->chroot_refcnt)) {
76456+ write_sequnlock(&rename_lock);
76457+ return dentry;
76458+ }
76459+ dentry = dentry->d_parent;
76460+ } while (!IS_ROOT(dentry));
76461+ write_sequnlock(&rename_lock);
76462+ return NULL;
76463+}
76464+#endif
76465+
76466+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
76467+ struct dentry *newdentry, struct vfsmount *newmnt)
76468+{
76469+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76470+ struct dentry *chroot;
76471+
76472+ if (unlikely(!grsec_enable_chroot_rename))
76473+ return 0;
76474+
76475+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
76476+ return 0;
76477+
76478+ chroot = get_closest_chroot(olddentry);
76479+
76480+ if (chroot == NULL)
76481+ return 0;
76482+
76483+ if (is_subdir(newdentry, chroot))
76484+ return 0;
76485+
76486+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
76487+
76488+ return 1;
76489+#else
76490+ return 0;
76491+#endif
76492+}
76493+
76494+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76495+{
76496+#ifdef CONFIG_GRKERNSEC
76497+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76498+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76499+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76500+ && gr_init_ran
76501+#endif
76502+ )
76503+ task->gr_is_chrooted = 1;
76504+ else {
76505+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76506+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76507+ gr_init_ran = 1;
76508+#endif
76509+ task->gr_is_chrooted = 0;
76510+ }
76511+
76512+ task->gr_chroot_dentry = path->dentry;
76513+#endif
76514+ return;
76515+}
76516+
76517+void gr_clear_chroot_entries(struct task_struct *task)
76518+{
76519+#ifdef CONFIG_GRKERNSEC
76520+ task->gr_is_chrooted = 0;
76521+ task->gr_chroot_dentry = NULL;
76522+#endif
76523+ return;
76524+}
76525+
76526+int
76527+gr_handle_chroot_unix(const pid_t pid)
76528+{
76529+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76530+ struct task_struct *p;
76531+
76532+ if (unlikely(!grsec_enable_chroot_unix))
76533+ return 1;
76534+
76535+ if (likely(!proc_is_chrooted(current)))
76536+ return 1;
76537+
76538+ rcu_read_lock();
76539+ read_lock(&tasklist_lock);
76540+ p = find_task_by_vpid_unrestricted(pid);
76541+ if (unlikely(p && !have_same_root(current, p))) {
76542+ read_unlock(&tasklist_lock);
76543+ rcu_read_unlock();
76544+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76545+ return 0;
76546+ }
76547+ read_unlock(&tasklist_lock);
76548+ rcu_read_unlock();
76549+#endif
76550+ return 1;
76551+}
76552+
76553+int
76554+gr_handle_chroot_nice(void)
76555+{
76556+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76557+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76558+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76559+ return -EPERM;
76560+ }
76561+#endif
76562+ return 0;
76563+}
76564+
76565+int
76566+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76567+{
76568+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76569+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76570+ && proc_is_chrooted(current)) {
76571+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76572+ return -EACCES;
76573+ }
76574+#endif
76575+ return 0;
76576+}
76577+
76578+int
76579+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76580+{
76581+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76582+ struct task_struct *p;
76583+ int ret = 0;
76584+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76585+ return ret;
76586+
76587+ read_lock(&tasklist_lock);
76588+ do_each_pid_task(pid, type, p) {
76589+ if (!have_same_root(current, p)) {
76590+ ret = 1;
76591+ goto out;
76592+ }
76593+ } while_each_pid_task(pid, type, p);
76594+out:
76595+ read_unlock(&tasklist_lock);
76596+ return ret;
76597+#endif
76598+ return 0;
76599+}
76600+
76601+int
76602+gr_pid_is_chrooted(struct task_struct *p)
76603+{
76604+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76605+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76606+ return 0;
76607+
76608+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76609+ !have_same_root(current, p)) {
76610+ return 1;
76611+ }
76612+#endif
76613+ return 0;
76614+}
76615+
76616+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76617+
76618+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76619+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76620+{
76621+ struct path path, currentroot;
76622+ int ret = 0;
76623+
76624+ path.dentry = (struct dentry *)u_dentry;
76625+ path.mnt = (struct vfsmount *)u_mnt;
76626+ get_fs_root(current->fs, &currentroot);
76627+ if (path_is_under(&path, &currentroot))
76628+ ret = 1;
76629+ path_put(&currentroot);
76630+
76631+ return ret;
76632+}
76633+#endif
76634+
76635+int
76636+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76637+{
76638+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76639+ if (!grsec_enable_chroot_fchdir)
76640+ return 1;
76641+
76642+ if (!proc_is_chrooted(current))
76643+ return 1;
76644+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76645+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76646+ return 0;
76647+ }
76648+#endif
76649+ return 1;
76650+}
76651+
76652+int
76653+gr_chroot_fhandle(void)
76654+{
76655+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76656+ if (!grsec_enable_chroot_fchdir)
76657+ return 1;
76658+
76659+ if (!proc_is_chrooted(current))
76660+ return 1;
76661+ else {
76662+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76663+ return 0;
76664+ }
76665+#endif
76666+ return 1;
76667+}
76668+
76669+int
76670+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76671+ const u64 shm_createtime)
76672+{
76673+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76674+ struct task_struct *p;
76675+
76676+ if (unlikely(!grsec_enable_chroot_shmat))
76677+ return 1;
76678+
76679+ if (likely(!proc_is_chrooted(current)))
76680+ return 1;
76681+
76682+ rcu_read_lock();
76683+ read_lock(&tasklist_lock);
76684+
76685+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76686+ if (time_before_eq64(p->start_time, shm_createtime)) {
76687+ if (have_same_root(current, p)) {
76688+ goto allow;
76689+ } else {
76690+ read_unlock(&tasklist_lock);
76691+ rcu_read_unlock();
76692+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76693+ return 0;
76694+ }
76695+ }
76696+ /* creator exited, pid reuse, fall through to next check */
76697+ }
76698+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76699+ if (unlikely(!have_same_root(current, p))) {
76700+ read_unlock(&tasklist_lock);
76701+ rcu_read_unlock();
76702+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76703+ return 0;
76704+ }
76705+ }
76706+
76707+allow:
76708+ read_unlock(&tasklist_lock);
76709+ rcu_read_unlock();
76710+#endif
76711+ return 1;
76712+}
76713+
76714+void
76715+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76716+{
76717+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76718+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76719+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76720+#endif
76721+ return;
76722+}
76723+
76724+int
76725+gr_handle_chroot_mknod(const struct dentry *dentry,
76726+ const struct vfsmount *mnt, const int mode)
76727+{
76728+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76729+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76730+ proc_is_chrooted(current)) {
76731+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76732+ return -EPERM;
76733+ }
76734+#endif
76735+ return 0;
76736+}
76737+
76738+int
76739+gr_handle_chroot_mount(const struct dentry *dentry,
76740+ const struct vfsmount *mnt, const char *dev_name)
76741+{
76742+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76743+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76744+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76745+ return -EPERM;
76746+ }
76747+#endif
76748+ return 0;
76749+}
76750+
76751+int
76752+gr_handle_chroot_pivot(void)
76753+{
76754+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76755+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76756+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76757+ return -EPERM;
76758+ }
76759+#endif
76760+ return 0;
76761+}
76762+
76763+int
76764+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76765+{
76766+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76767+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76768+ !gr_is_outside_chroot(dentry, mnt)) {
76769+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76770+ return -EPERM;
76771+ }
76772+#endif
76773+ return 0;
76774+}
76775+
76776+extern const char *captab_log[];
76777+extern int captab_log_entries;
76778+
76779+int
76780+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76781+{
76782+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76783+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76784+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76785+ if (cap_raised(chroot_caps, cap)) {
76786+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76787+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76788+ }
76789+ return 0;
76790+ }
76791+ }
76792+#endif
76793+ return 1;
76794+}
76795+
76796+int
76797+gr_chroot_is_capable(const int cap)
76798+{
76799+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76800+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76801+#endif
76802+ return 1;
76803+}
76804+
76805+int
76806+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76807+{
76808+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76809+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76810+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76811+ if (cap_raised(chroot_caps, cap)) {
76812+ return 0;
76813+ }
76814+ }
76815+#endif
76816+ return 1;
76817+}
76818+
76819+int
76820+gr_chroot_is_capable_nolog(const int cap)
76821+{
76822+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76823+ return gr_task_chroot_is_capable_nolog(current, cap);
76824+#endif
76825+ return 1;
76826+}
76827+
76828+int
76829+gr_handle_chroot_sysctl(const int op)
76830+{
76831+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76832+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76833+ proc_is_chrooted(current))
76834+ return -EACCES;
76835+#endif
76836+ return 0;
76837+}
76838+
76839+void
76840+gr_handle_chroot_chdir(const struct path *path)
76841+{
76842+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76843+ if (grsec_enable_chroot_chdir)
76844+ set_fs_pwd(current->fs, path);
76845+#endif
76846+ return;
76847+}
76848+
76849+int
76850+gr_handle_chroot_chmod(const struct dentry *dentry,
76851+ const struct vfsmount *mnt, const int mode)
76852+{
76853+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76854+ /* allow chmod +s on directories, but not files */
76855+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76856+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76857+ proc_is_chrooted(current)) {
76858+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76859+ return -EPERM;
76860+ }
76861+#endif
76862+ return 0;
76863+}
76864diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76865new file mode 100644
76866index 0000000..946f750
76867--- /dev/null
76868+++ b/grsecurity/grsec_disabled.c
76869@@ -0,0 +1,445 @@
76870+#include <linux/kernel.h>
76871+#include <linux/module.h>
76872+#include <linux/sched.h>
76873+#include <linux/file.h>
76874+#include <linux/fs.h>
76875+#include <linux/kdev_t.h>
76876+#include <linux/net.h>
76877+#include <linux/in.h>
76878+#include <linux/ip.h>
76879+#include <linux/skbuff.h>
76880+#include <linux/sysctl.h>
76881+
76882+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76883+void
76884+pax_set_initial_flags(struct linux_binprm *bprm)
76885+{
76886+ return;
76887+}
76888+#endif
76889+
76890+#ifdef CONFIG_SYSCTL
76891+__u32
76892+gr_handle_sysctl(const struct ctl_table * table, const int op)
76893+{
76894+ return 0;
76895+}
76896+#endif
76897+
76898+#ifdef CONFIG_TASKSTATS
76899+int gr_is_taskstats_denied(int pid)
76900+{
76901+ return 0;
76902+}
76903+#endif
76904+
76905+int
76906+gr_acl_is_enabled(void)
76907+{
76908+ return 0;
76909+}
76910+
76911+int
76912+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76913+{
76914+ return 0;
76915+}
76916+
76917+void
76918+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76919+{
76920+ return;
76921+}
76922+
76923+int
76924+gr_handle_rawio(const struct inode *inode)
76925+{
76926+ return 0;
76927+}
76928+
76929+void
76930+gr_acl_handle_psacct(struct task_struct *task, const long code)
76931+{
76932+ return;
76933+}
76934+
76935+int
76936+gr_handle_ptrace(struct task_struct *task, const long request)
76937+{
76938+ return 0;
76939+}
76940+
76941+int
76942+gr_handle_proc_ptrace(struct task_struct *task)
76943+{
76944+ return 0;
76945+}
76946+
76947+int
76948+gr_set_acls(const int type)
76949+{
76950+ return 0;
76951+}
76952+
76953+int
76954+gr_check_hidden_task(const struct task_struct *tsk)
76955+{
76956+ return 0;
76957+}
76958+
76959+int
76960+gr_check_protected_task(const struct task_struct *task)
76961+{
76962+ return 0;
76963+}
76964+
76965+int
76966+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76967+{
76968+ return 0;
76969+}
76970+
76971+void
76972+gr_copy_label(struct task_struct *tsk)
76973+{
76974+ return;
76975+}
76976+
76977+void
76978+gr_set_pax_flags(struct task_struct *task)
76979+{
76980+ return;
76981+}
76982+
76983+int
76984+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76985+ const int unsafe_share)
76986+{
76987+ return 0;
76988+}
76989+
76990+void
76991+gr_handle_delete(const u64 ino, const dev_t dev)
76992+{
76993+ return;
76994+}
76995+
76996+void
76997+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76998+{
76999+ return;
77000+}
77001+
77002+void
77003+gr_handle_crash(struct task_struct *task, const int sig)
77004+{
77005+ return;
77006+}
77007+
77008+int
77009+gr_check_crash_exec(const struct file *filp)
77010+{
77011+ return 0;
77012+}
77013+
77014+int
77015+gr_check_crash_uid(const kuid_t uid)
77016+{
77017+ return 0;
77018+}
77019+
77020+void
77021+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77022+ struct dentry *old_dentry,
77023+ struct dentry *new_dentry,
77024+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77025+{
77026+ return;
77027+}
77028+
77029+int
77030+gr_search_socket(const int family, const int type, const int protocol)
77031+{
77032+ return 1;
77033+}
77034+
77035+int
77036+gr_search_connectbind(const int mode, const struct socket *sock,
77037+ const struct sockaddr_in *addr)
77038+{
77039+ return 0;
77040+}
77041+
77042+void
77043+gr_handle_alertkill(struct task_struct *task)
77044+{
77045+ return;
77046+}
77047+
77048+__u32
77049+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77050+{
77051+ return 1;
77052+}
77053+
77054+__u32
77055+gr_acl_handle_hidden_file(const struct dentry * dentry,
77056+ const struct vfsmount * mnt)
77057+{
77058+ return 1;
77059+}
77060+
77061+__u32
77062+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77063+ int acc_mode)
77064+{
77065+ return 1;
77066+}
77067+
77068+__u32
77069+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77070+{
77071+ return 1;
77072+}
77073+
77074+__u32
77075+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77076+{
77077+ return 1;
77078+}
77079+
77080+int
77081+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77082+ unsigned int *vm_flags)
77083+{
77084+ return 1;
77085+}
77086+
77087+__u32
77088+gr_acl_handle_truncate(const struct dentry * dentry,
77089+ const struct vfsmount * mnt)
77090+{
77091+ return 1;
77092+}
77093+
77094+__u32
77095+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77096+{
77097+ return 1;
77098+}
77099+
77100+__u32
77101+gr_acl_handle_access(const struct dentry * dentry,
77102+ const struct vfsmount * mnt, const int fmode)
77103+{
77104+ return 1;
77105+}
77106+
77107+__u32
77108+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77109+ umode_t *mode)
77110+{
77111+ return 1;
77112+}
77113+
77114+__u32
77115+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77116+{
77117+ return 1;
77118+}
77119+
77120+__u32
77121+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77122+{
77123+ return 1;
77124+}
77125+
77126+__u32
77127+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77128+{
77129+ return 1;
77130+}
77131+
77132+void
77133+grsecurity_init(void)
77134+{
77135+ return;
77136+}
77137+
77138+umode_t gr_acl_umask(void)
77139+{
77140+ return 0;
77141+}
77142+
77143+__u32
77144+gr_acl_handle_mknod(const struct dentry * new_dentry,
77145+ const struct dentry * parent_dentry,
77146+ const struct vfsmount * parent_mnt,
77147+ const int mode)
77148+{
77149+ return 1;
77150+}
77151+
77152+__u32
77153+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77154+ const struct dentry * parent_dentry,
77155+ const struct vfsmount * parent_mnt)
77156+{
77157+ return 1;
77158+}
77159+
77160+__u32
77161+gr_acl_handle_symlink(const struct dentry * new_dentry,
77162+ const struct dentry * parent_dentry,
77163+ const struct vfsmount * parent_mnt, const struct filename *from)
77164+{
77165+ return 1;
77166+}
77167+
77168+__u32
77169+gr_acl_handle_link(const struct dentry * new_dentry,
77170+ const struct dentry * parent_dentry,
77171+ const struct vfsmount * parent_mnt,
77172+ const struct dentry * old_dentry,
77173+ const struct vfsmount * old_mnt, const struct filename *to)
77174+{
77175+ return 1;
77176+}
77177+
77178+int
77179+gr_acl_handle_rename(const struct dentry *new_dentry,
77180+ const struct dentry *parent_dentry,
77181+ const struct vfsmount *parent_mnt,
77182+ const struct dentry *old_dentry,
77183+ const struct inode *old_parent_inode,
77184+ const struct vfsmount *old_mnt, const struct filename *newname,
77185+ unsigned int flags)
77186+{
77187+ return 0;
77188+}
77189+
77190+int
77191+gr_acl_handle_filldir(const struct file *file, const char *name,
77192+ const int namelen, const u64 ino)
77193+{
77194+ return 1;
77195+}
77196+
77197+int
77198+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77199+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77200+{
77201+ return 1;
77202+}
77203+
77204+int
77205+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77206+{
77207+ return 0;
77208+}
77209+
77210+int
77211+gr_search_accept(const struct socket *sock)
77212+{
77213+ return 0;
77214+}
77215+
77216+int
77217+gr_search_listen(const struct socket *sock)
77218+{
77219+ return 0;
77220+}
77221+
77222+int
77223+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77224+{
77225+ return 0;
77226+}
77227+
77228+__u32
77229+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77230+{
77231+ return 1;
77232+}
77233+
77234+__u32
77235+gr_acl_handle_creat(const struct dentry * dentry,
77236+ const struct dentry * p_dentry,
77237+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77238+ const int imode)
77239+{
77240+ return 1;
77241+}
77242+
77243+void
77244+gr_acl_handle_exit(void)
77245+{
77246+ return;
77247+}
77248+
77249+int
77250+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77251+{
77252+ return 1;
77253+}
77254+
77255+void
77256+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77257+{
77258+ return;
77259+}
77260+
77261+int
77262+gr_acl_handle_procpidmem(const struct task_struct *task)
77263+{
77264+ return 0;
77265+}
77266+
77267+int
77268+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77269+{
77270+ return 0;
77271+}
77272+
77273+int
77274+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77275+{
77276+ return 0;
77277+}
77278+
77279+int
77280+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77281+{
77282+ return 0;
77283+}
77284+
77285+int
77286+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77287+{
77288+ return 0;
77289+}
77290+
77291+int gr_acl_enable_at_secure(void)
77292+{
77293+ return 0;
77294+}
77295+
77296+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77297+{
77298+ return dentry->d_sb->s_dev;
77299+}
77300+
77301+u64 gr_get_ino_from_dentry(struct dentry *dentry)
77302+{
77303+ return dentry->d_inode->i_ino;
77304+}
77305+
77306+void gr_put_exec_file(struct task_struct *task)
77307+{
77308+ return;
77309+}
77310+
77311+#ifdef CONFIG_SECURITY
77312+EXPORT_SYMBOL_GPL(gr_check_user_change);
77313+EXPORT_SYMBOL_GPL(gr_check_group_change);
77314+#endif
77315diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77316new file mode 100644
77317index 0000000..fb7531e
77318--- /dev/null
77319+++ b/grsecurity/grsec_exec.c
77320@@ -0,0 +1,189 @@
77321+#include <linux/kernel.h>
77322+#include <linux/sched.h>
77323+#include <linux/file.h>
77324+#include <linux/binfmts.h>
77325+#include <linux/fs.h>
77326+#include <linux/types.h>
77327+#include <linux/grdefs.h>
77328+#include <linux/grsecurity.h>
77329+#include <linux/grinternal.h>
77330+#include <linux/capability.h>
77331+#include <linux/module.h>
77332+#include <linux/compat.h>
77333+
77334+#include <asm/uaccess.h>
77335+
77336+#ifdef CONFIG_GRKERNSEC_EXECLOG
77337+static char gr_exec_arg_buf[132];
77338+static DEFINE_MUTEX(gr_exec_arg_mutex);
77339+#endif
77340+
77341+struct user_arg_ptr {
77342+#ifdef CONFIG_COMPAT
77343+ bool is_compat;
77344+#endif
77345+ union {
77346+ const char __user *const __user *native;
77347+#ifdef CONFIG_COMPAT
77348+ const compat_uptr_t __user *compat;
77349+#endif
77350+ } ptr;
77351+};
77352+
77353+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77354+
77355+void
77356+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77357+{
77358+#ifdef CONFIG_GRKERNSEC_EXECLOG
77359+ char *grarg = gr_exec_arg_buf;
77360+ unsigned int i, x, execlen = 0;
77361+ char c;
77362+
77363+ if (!((grsec_enable_execlog && grsec_enable_group &&
77364+ in_group_p(grsec_audit_gid))
77365+ || (grsec_enable_execlog && !grsec_enable_group)))
77366+ return;
77367+
77368+ mutex_lock(&gr_exec_arg_mutex);
77369+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77370+
77371+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77372+ const char __user *p;
77373+ unsigned int len;
77374+
77375+ p = get_user_arg_ptr(argv, i);
77376+ if (IS_ERR(p))
77377+ goto log;
77378+
77379+ len = strnlen_user(p, 128 - execlen);
77380+ if (len > 128 - execlen)
77381+ len = 128 - execlen;
77382+ else if (len > 0)
77383+ len--;
77384+ if (copy_from_user(grarg + execlen, p, len))
77385+ goto log;
77386+
77387+ /* rewrite unprintable characters */
77388+ for (x = 0; x < len; x++) {
77389+ c = *(grarg + execlen + x);
77390+ if (c < 32 || c > 126)
77391+ *(grarg + execlen + x) = ' ';
77392+ }
77393+
77394+ execlen += len;
77395+ *(grarg + execlen) = ' ';
77396+ *(grarg + execlen + 1) = '\0';
77397+ execlen++;
77398+ }
77399+
77400+ log:
77401+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77402+ bprm->file->f_path.mnt, grarg);
77403+ mutex_unlock(&gr_exec_arg_mutex);
77404+#endif
77405+ return;
77406+}
77407+
77408+#ifdef CONFIG_GRKERNSEC
77409+extern int gr_acl_is_capable(const int cap);
77410+extern int gr_acl_is_capable_nolog(const int cap);
77411+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77412+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77413+extern int gr_chroot_is_capable(const int cap);
77414+extern int gr_chroot_is_capable_nolog(const int cap);
77415+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77416+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77417+#endif
77418+
77419+const char *captab_log[] = {
77420+ "CAP_CHOWN",
77421+ "CAP_DAC_OVERRIDE",
77422+ "CAP_DAC_READ_SEARCH",
77423+ "CAP_FOWNER",
77424+ "CAP_FSETID",
77425+ "CAP_KILL",
77426+ "CAP_SETGID",
77427+ "CAP_SETUID",
77428+ "CAP_SETPCAP",
77429+ "CAP_LINUX_IMMUTABLE",
77430+ "CAP_NET_BIND_SERVICE",
77431+ "CAP_NET_BROADCAST",
77432+ "CAP_NET_ADMIN",
77433+ "CAP_NET_RAW",
77434+ "CAP_IPC_LOCK",
77435+ "CAP_IPC_OWNER",
77436+ "CAP_SYS_MODULE",
77437+ "CAP_SYS_RAWIO",
77438+ "CAP_SYS_CHROOT",
77439+ "CAP_SYS_PTRACE",
77440+ "CAP_SYS_PACCT",
77441+ "CAP_SYS_ADMIN",
77442+ "CAP_SYS_BOOT",
77443+ "CAP_SYS_NICE",
77444+ "CAP_SYS_RESOURCE",
77445+ "CAP_SYS_TIME",
77446+ "CAP_SYS_TTY_CONFIG",
77447+ "CAP_MKNOD",
77448+ "CAP_LEASE",
77449+ "CAP_AUDIT_WRITE",
77450+ "CAP_AUDIT_CONTROL",
77451+ "CAP_SETFCAP",
77452+ "CAP_MAC_OVERRIDE",
77453+ "CAP_MAC_ADMIN",
77454+ "CAP_SYSLOG",
77455+ "CAP_WAKE_ALARM",
77456+ "CAP_BLOCK_SUSPEND",
77457+ "CAP_AUDIT_READ"
77458+};
77459+
77460+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77461+
77462+int gr_is_capable(const int cap)
77463+{
77464+#ifdef CONFIG_GRKERNSEC
77465+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77466+ return 1;
77467+ return 0;
77468+#else
77469+ return 1;
77470+#endif
77471+}
77472+
77473+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77474+{
77475+#ifdef CONFIG_GRKERNSEC
77476+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77477+ return 1;
77478+ return 0;
77479+#else
77480+ return 1;
77481+#endif
77482+}
77483+
77484+int gr_is_capable_nolog(const int cap)
77485+{
77486+#ifdef CONFIG_GRKERNSEC
77487+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77488+ return 1;
77489+ return 0;
77490+#else
77491+ return 1;
77492+#endif
77493+}
77494+
77495+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77496+{
77497+#ifdef CONFIG_GRKERNSEC
77498+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77499+ return 1;
77500+ return 0;
77501+#else
77502+ return 1;
77503+#endif
77504+}
77505+
77506+EXPORT_SYMBOL_GPL(gr_is_capable);
77507+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77508+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77509+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77510diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77511new file mode 100644
77512index 0000000..06cc6ea
77513--- /dev/null
77514+++ b/grsecurity/grsec_fifo.c
77515@@ -0,0 +1,24 @@
77516+#include <linux/kernel.h>
77517+#include <linux/sched.h>
77518+#include <linux/fs.h>
77519+#include <linux/file.h>
77520+#include <linux/grinternal.h>
77521+
77522+int
77523+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77524+ const struct dentry *dir, const int flag, const int acc_mode)
77525+{
77526+#ifdef CONFIG_GRKERNSEC_FIFO
77527+ const struct cred *cred = current_cred();
77528+
77529+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77530+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77531+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77532+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77533+ if (!inode_permission(dentry->d_inode, acc_mode))
77534+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77535+ return -EACCES;
77536+ }
77537+#endif
77538+ return 0;
77539+}
77540diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77541new file mode 100644
77542index 0000000..8ca18bf
77543--- /dev/null
77544+++ b/grsecurity/grsec_fork.c
77545@@ -0,0 +1,23 @@
77546+#include <linux/kernel.h>
77547+#include <linux/sched.h>
77548+#include <linux/grsecurity.h>
77549+#include <linux/grinternal.h>
77550+#include <linux/errno.h>
77551+
77552+void
77553+gr_log_forkfail(const int retval)
77554+{
77555+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77556+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77557+ switch (retval) {
77558+ case -EAGAIN:
77559+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77560+ break;
77561+ case -ENOMEM:
77562+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77563+ break;
77564+ }
77565+ }
77566+#endif
77567+ return;
77568+}
77569diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77570new file mode 100644
77571index 0000000..4ed9e7d
77572--- /dev/null
77573+++ b/grsecurity/grsec_init.c
77574@@ -0,0 +1,290 @@
77575+#include <linux/kernel.h>
77576+#include <linux/sched.h>
77577+#include <linux/mm.h>
77578+#include <linux/gracl.h>
77579+#include <linux/slab.h>
77580+#include <linux/vmalloc.h>
77581+#include <linux/percpu.h>
77582+#include <linux/module.h>
77583+
77584+int grsec_enable_ptrace_readexec;
77585+int grsec_enable_setxid;
77586+int grsec_enable_symlinkown;
77587+kgid_t grsec_symlinkown_gid;
77588+int grsec_enable_brute;
77589+int grsec_enable_link;
77590+int grsec_enable_dmesg;
77591+int grsec_enable_harden_ptrace;
77592+int grsec_enable_harden_ipc;
77593+int grsec_enable_fifo;
77594+int grsec_enable_execlog;
77595+int grsec_enable_signal;
77596+int grsec_enable_forkfail;
77597+int grsec_enable_audit_ptrace;
77598+int grsec_enable_time;
77599+int grsec_enable_group;
77600+kgid_t grsec_audit_gid;
77601+int grsec_enable_chdir;
77602+int grsec_enable_mount;
77603+int grsec_enable_rofs;
77604+int grsec_deny_new_usb;
77605+int grsec_enable_chroot_findtask;
77606+int grsec_enable_chroot_mount;
77607+int grsec_enable_chroot_shmat;
77608+int grsec_enable_chroot_fchdir;
77609+int grsec_enable_chroot_double;
77610+int grsec_enable_chroot_pivot;
77611+int grsec_enable_chroot_chdir;
77612+int grsec_enable_chroot_chmod;
77613+int grsec_enable_chroot_mknod;
77614+int grsec_enable_chroot_nice;
77615+int grsec_enable_chroot_execlog;
77616+int grsec_enable_chroot_caps;
77617+int grsec_enable_chroot_rename;
77618+int grsec_enable_chroot_sysctl;
77619+int grsec_enable_chroot_unix;
77620+int grsec_enable_tpe;
77621+kgid_t grsec_tpe_gid;
77622+int grsec_enable_blackhole;
77623+#ifdef CONFIG_IPV6_MODULE
77624+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77625+#endif
77626+int grsec_lastack_retries;
77627+int grsec_enable_tpe_all;
77628+int grsec_enable_tpe_invert;
77629+int grsec_enable_socket_all;
77630+kgid_t grsec_socket_all_gid;
77631+int grsec_enable_socket_client;
77632+kgid_t grsec_socket_client_gid;
77633+int grsec_enable_socket_server;
77634+kgid_t grsec_socket_server_gid;
77635+int grsec_resource_logging;
77636+int grsec_disable_privio;
77637+int grsec_enable_log_rwxmaps;
77638+int grsec_lock;
77639+
77640+DEFINE_SPINLOCK(grsec_alert_lock);
77641+unsigned long grsec_alert_wtime = 0;
77642+unsigned long grsec_alert_fyet = 0;
77643+
77644+DEFINE_SPINLOCK(grsec_audit_lock);
77645+
77646+DEFINE_RWLOCK(grsec_exec_file_lock);
77647+
77648+char *gr_shared_page[4];
77649+
77650+char *gr_alert_log_fmt;
77651+char *gr_audit_log_fmt;
77652+char *gr_alert_log_buf;
77653+char *gr_audit_log_buf;
77654+
77655+extern struct gr_arg *gr_usermode;
77656+extern unsigned char *gr_system_salt;
77657+extern unsigned char *gr_system_sum;
77658+
77659+void __init
77660+grsecurity_init(void)
77661+{
77662+ int j;
77663+ /* create the per-cpu shared pages */
77664+
77665+#ifdef CONFIG_X86
77666+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77667+#endif
77668+
77669+ for (j = 0; j < 4; j++) {
77670+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77671+ if (gr_shared_page[j] == NULL) {
77672+ panic("Unable to allocate grsecurity shared page");
77673+ return;
77674+ }
77675+ }
77676+
77677+ /* allocate log buffers */
77678+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77679+ if (!gr_alert_log_fmt) {
77680+ panic("Unable to allocate grsecurity alert log format buffer");
77681+ return;
77682+ }
77683+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77684+ if (!gr_audit_log_fmt) {
77685+ panic("Unable to allocate grsecurity audit log format buffer");
77686+ return;
77687+ }
77688+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77689+ if (!gr_alert_log_buf) {
77690+ panic("Unable to allocate grsecurity alert log buffer");
77691+ return;
77692+ }
77693+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77694+ if (!gr_audit_log_buf) {
77695+ panic("Unable to allocate grsecurity audit log buffer");
77696+ return;
77697+ }
77698+
77699+ /* allocate memory for authentication structure */
77700+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77701+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77702+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77703+
77704+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77705+ panic("Unable to allocate grsecurity authentication structure");
77706+ return;
77707+ }
77708+
77709+#ifdef CONFIG_GRKERNSEC_IO
77710+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77711+ grsec_disable_privio = 1;
77712+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77713+ grsec_disable_privio = 1;
77714+#else
77715+ grsec_disable_privio = 0;
77716+#endif
77717+#endif
77718+
77719+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77720+ /* for backward compatibility, tpe_invert always defaults to on if
77721+ enabled in the kernel
77722+ */
77723+ grsec_enable_tpe_invert = 1;
77724+#endif
77725+
77726+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77727+#ifndef CONFIG_GRKERNSEC_SYSCTL
77728+ grsec_lock = 1;
77729+#endif
77730+
77731+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77732+ grsec_enable_log_rwxmaps = 1;
77733+#endif
77734+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77735+ grsec_enable_group = 1;
77736+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77737+#endif
77738+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77739+ grsec_enable_ptrace_readexec = 1;
77740+#endif
77741+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77742+ grsec_enable_chdir = 1;
77743+#endif
77744+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77745+ grsec_enable_harden_ptrace = 1;
77746+#endif
77747+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77748+ grsec_enable_harden_ipc = 1;
77749+#endif
77750+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77751+ grsec_enable_mount = 1;
77752+#endif
77753+#ifdef CONFIG_GRKERNSEC_LINK
77754+ grsec_enable_link = 1;
77755+#endif
77756+#ifdef CONFIG_GRKERNSEC_BRUTE
77757+ grsec_enable_brute = 1;
77758+#endif
77759+#ifdef CONFIG_GRKERNSEC_DMESG
77760+ grsec_enable_dmesg = 1;
77761+#endif
77762+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77763+ grsec_enable_blackhole = 1;
77764+ grsec_lastack_retries = 4;
77765+#endif
77766+#ifdef CONFIG_GRKERNSEC_FIFO
77767+ grsec_enable_fifo = 1;
77768+#endif
77769+#ifdef CONFIG_GRKERNSEC_EXECLOG
77770+ grsec_enable_execlog = 1;
77771+#endif
77772+#ifdef CONFIG_GRKERNSEC_SETXID
77773+ grsec_enable_setxid = 1;
77774+#endif
77775+#ifdef CONFIG_GRKERNSEC_SIGNAL
77776+ grsec_enable_signal = 1;
77777+#endif
77778+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77779+ grsec_enable_forkfail = 1;
77780+#endif
77781+#ifdef CONFIG_GRKERNSEC_TIME
77782+ grsec_enable_time = 1;
77783+#endif
77784+#ifdef CONFIG_GRKERNSEC_RESLOG
77785+ grsec_resource_logging = 1;
77786+#endif
77787+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77788+ grsec_enable_chroot_findtask = 1;
77789+#endif
77790+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77791+ grsec_enable_chroot_unix = 1;
77792+#endif
77793+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77794+ grsec_enable_chroot_mount = 1;
77795+#endif
77796+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77797+ grsec_enable_chroot_fchdir = 1;
77798+#endif
77799+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77800+ grsec_enable_chroot_shmat = 1;
77801+#endif
77802+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77803+ grsec_enable_audit_ptrace = 1;
77804+#endif
77805+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77806+ grsec_enable_chroot_double = 1;
77807+#endif
77808+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77809+ grsec_enable_chroot_pivot = 1;
77810+#endif
77811+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77812+ grsec_enable_chroot_chdir = 1;
77813+#endif
77814+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77815+ grsec_enable_chroot_chmod = 1;
77816+#endif
77817+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77818+ grsec_enable_chroot_mknod = 1;
77819+#endif
77820+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77821+ grsec_enable_chroot_nice = 1;
77822+#endif
77823+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77824+ grsec_enable_chroot_execlog = 1;
77825+#endif
77826+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77827+ grsec_enable_chroot_caps = 1;
77828+#endif
77829+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77830+ grsec_enable_chroot_rename = 1;
77831+#endif
77832+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77833+ grsec_enable_chroot_sysctl = 1;
77834+#endif
77835+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77836+ grsec_enable_symlinkown = 1;
77837+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77838+#endif
77839+#ifdef CONFIG_GRKERNSEC_TPE
77840+ grsec_enable_tpe = 1;
77841+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77842+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77843+ grsec_enable_tpe_all = 1;
77844+#endif
77845+#endif
77846+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77847+ grsec_enable_socket_all = 1;
77848+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77849+#endif
77850+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77851+ grsec_enable_socket_client = 1;
77852+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77853+#endif
77854+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77855+ grsec_enable_socket_server = 1;
77856+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77857+#endif
77858+#endif
77859+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77860+ grsec_deny_new_usb = 1;
77861+#endif
77862+
77863+ return;
77864+}
77865diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77866new file mode 100644
77867index 0000000..1773300
77868--- /dev/null
77869+++ b/grsecurity/grsec_ipc.c
77870@@ -0,0 +1,48 @@
77871+#include <linux/kernel.h>
77872+#include <linux/mm.h>
77873+#include <linux/sched.h>
77874+#include <linux/file.h>
77875+#include <linux/ipc.h>
77876+#include <linux/ipc_namespace.h>
77877+#include <linux/grsecurity.h>
77878+#include <linux/grinternal.h>
77879+
77880+int
77881+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77882+{
77883+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77884+ int write;
77885+ int orig_granted_mode;
77886+ kuid_t euid;
77887+ kgid_t egid;
77888+
77889+ if (!grsec_enable_harden_ipc)
77890+ return 1;
77891+
77892+ euid = current_euid();
77893+ egid = current_egid();
77894+
77895+ write = requested_mode & 00002;
77896+ orig_granted_mode = ipcp->mode;
77897+
77898+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77899+ orig_granted_mode >>= 6;
77900+ else {
77901+ /* if likely wrong permissions, lock to user */
77902+ if (orig_granted_mode & 0007)
77903+ orig_granted_mode = 0;
77904+ /* otherwise do a egid-only check */
77905+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77906+ orig_granted_mode >>= 3;
77907+ /* otherwise, no access */
77908+ else
77909+ orig_granted_mode = 0;
77910+ }
77911+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77912+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77913+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77914+ return 0;
77915+ }
77916+#endif
77917+ return 1;
77918+}
77919diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77920new file mode 100644
77921index 0000000..5e05e20
77922--- /dev/null
77923+++ b/grsecurity/grsec_link.c
77924@@ -0,0 +1,58 @@
77925+#include <linux/kernel.h>
77926+#include <linux/sched.h>
77927+#include <linux/fs.h>
77928+#include <linux/file.h>
77929+#include <linux/grinternal.h>
77930+
77931+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77932+{
77933+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77934+ const struct inode *link_inode = link->dentry->d_inode;
77935+
77936+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77937+ /* ignore root-owned links, e.g. /proc/self */
77938+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77939+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77940+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77941+ return 1;
77942+ }
77943+#endif
77944+ return 0;
77945+}
77946+
77947+int
77948+gr_handle_follow_link(const struct inode *parent,
77949+ const struct inode *inode,
77950+ const struct dentry *dentry, const struct vfsmount *mnt)
77951+{
77952+#ifdef CONFIG_GRKERNSEC_LINK
77953+ const struct cred *cred = current_cred();
77954+
77955+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77956+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77957+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77958+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77959+ return -EACCES;
77960+ }
77961+#endif
77962+ return 0;
77963+}
77964+
77965+int
77966+gr_handle_hardlink(const struct dentry *dentry,
77967+ const struct vfsmount *mnt,
77968+ struct inode *inode, const int mode, const struct filename *to)
77969+{
77970+#ifdef CONFIG_GRKERNSEC_LINK
77971+ const struct cred *cred = current_cred();
77972+
77973+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77974+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77975+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77976+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77977+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77978+ return -EPERM;
77979+ }
77980+#endif
77981+ return 0;
77982+}
77983diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77984new file mode 100644
77985index 0000000..dbe0a6b
77986--- /dev/null
77987+++ b/grsecurity/grsec_log.c
77988@@ -0,0 +1,341 @@
77989+#include <linux/kernel.h>
77990+#include <linux/sched.h>
77991+#include <linux/file.h>
77992+#include <linux/tty.h>
77993+#include <linux/fs.h>
77994+#include <linux/mm.h>
77995+#include <linux/grinternal.h>
77996+
77997+#ifdef CONFIG_TREE_PREEMPT_RCU
77998+#define DISABLE_PREEMPT() preempt_disable()
77999+#define ENABLE_PREEMPT() preempt_enable()
78000+#else
78001+#define DISABLE_PREEMPT()
78002+#define ENABLE_PREEMPT()
78003+#endif
78004+
78005+#define BEGIN_LOCKS(x) \
78006+ DISABLE_PREEMPT(); \
78007+ rcu_read_lock(); \
78008+ read_lock(&tasklist_lock); \
78009+ read_lock(&grsec_exec_file_lock); \
78010+ if (x != GR_DO_AUDIT) \
78011+ spin_lock(&grsec_alert_lock); \
78012+ else \
78013+ spin_lock(&grsec_audit_lock)
78014+
78015+#define END_LOCKS(x) \
78016+ if (x != GR_DO_AUDIT) \
78017+ spin_unlock(&grsec_alert_lock); \
78018+ else \
78019+ spin_unlock(&grsec_audit_lock); \
78020+ read_unlock(&grsec_exec_file_lock); \
78021+ read_unlock(&tasklist_lock); \
78022+ rcu_read_unlock(); \
78023+ ENABLE_PREEMPT(); \
78024+ if (x == GR_DONT_AUDIT) \
78025+ gr_handle_alertkill(current)
78026+
78027+enum {
78028+ FLOODING,
78029+ NO_FLOODING
78030+};
78031+
78032+extern char *gr_alert_log_fmt;
78033+extern char *gr_audit_log_fmt;
78034+extern char *gr_alert_log_buf;
78035+extern char *gr_audit_log_buf;
78036+
78037+static int gr_log_start(int audit)
78038+{
78039+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78040+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78041+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78042+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78043+ unsigned long curr_secs = get_seconds();
78044+
78045+ if (audit == GR_DO_AUDIT)
78046+ goto set_fmt;
78047+
78048+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78049+ grsec_alert_wtime = curr_secs;
78050+ grsec_alert_fyet = 0;
78051+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78052+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78053+ grsec_alert_fyet++;
78054+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78055+ grsec_alert_wtime = curr_secs;
78056+ grsec_alert_fyet++;
78057+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78058+ return FLOODING;
78059+ }
78060+ else return FLOODING;
78061+
78062+set_fmt:
78063+#endif
78064+ memset(buf, 0, PAGE_SIZE);
78065+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78066+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78067+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78068+ } else if (current->signal->curr_ip) {
78069+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78070+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78071+ } else if (gr_acl_is_enabled()) {
78072+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78073+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78074+ } else {
78075+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78076+ strcpy(buf, fmt);
78077+ }
78078+
78079+ return NO_FLOODING;
78080+}
78081+
78082+static void gr_log_middle(int audit, const char *msg, va_list ap)
78083+ __attribute__ ((format (printf, 2, 0)));
78084+
78085+static void gr_log_middle(int audit, const char *msg, va_list ap)
78086+{
78087+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78088+ unsigned int len = strlen(buf);
78089+
78090+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78091+
78092+ return;
78093+}
78094+
78095+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78096+ __attribute__ ((format (printf, 2, 3)));
78097+
78098+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78099+{
78100+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78101+ unsigned int len = strlen(buf);
78102+ va_list ap;
78103+
78104+ va_start(ap, msg);
78105+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78106+ va_end(ap);
78107+
78108+ return;
78109+}
78110+
78111+static void gr_log_end(int audit, int append_default)
78112+{
78113+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78114+ if (append_default) {
78115+ struct task_struct *task = current;
78116+ struct task_struct *parent = task->real_parent;
78117+ const struct cred *cred = __task_cred(task);
78118+ const struct cred *pcred = __task_cred(parent);
78119+ unsigned int len = strlen(buf);
78120+
78121+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78122+ }
78123+
78124+ printk("%s\n", buf);
78125+
78126+ return;
78127+}
78128+
78129+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78130+{
78131+ int logtype;
78132+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78133+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78134+ void *voidptr = NULL;
78135+ int num1 = 0, num2 = 0;
78136+ unsigned long ulong1 = 0, ulong2 = 0;
78137+ struct dentry *dentry = NULL;
78138+ struct vfsmount *mnt = NULL;
78139+ struct file *file = NULL;
78140+ struct task_struct *task = NULL;
78141+ struct vm_area_struct *vma = NULL;
78142+ const struct cred *cred, *pcred;
78143+ va_list ap;
78144+
78145+ BEGIN_LOCKS(audit);
78146+ logtype = gr_log_start(audit);
78147+ if (logtype == FLOODING) {
78148+ END_LOCKS(audit);
78149+ return;
78150+ }
78151+ va_start(ap, argtypes);
78152+ switch (argtypes) {
78153+ case GR_TTYSNIFF:
78154+ task = va_arg(ap, struct task_struct *);
78155+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78156+ break;
78157+ case GR_SYSCTL_HIDDEN:
78158+ str1 = va_arg(ap, char *);
78159+ gr_log_middle_varargs(audit, msg, result, str1);
78160+ break;
78161+ case GR_RBAC:
78162+ dentry = va_arg(ap, struct dentry *);
78163+ mnt = va_arg(ap, struct vfsmount *);
78164+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78165+ break;
78166+ case GR_RBAC_STR:
78167+ dentry = va_arg(ap, struct dentry *);
78168+ mnt = va_arg(ap, struct vfsmount *);
78169+ str1 = va_arg(ap, char *);
78170+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78171+ break;
78172+ case GR_STR_RBAC:
78173+ str1 = va_arg(ap, char *);
78174+ dentry = va_arg(ap, struct dentry *);
78175+ mnt = va_arg(ap, struct vfsmount *);
78176+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78177+ break;
78178+ case GR_RBAC_MODE2:
78179+ dentry = va_arg(ap, struct dentry *);
78180+ mnt = va_arg(ap, struct vfsmount *);
78181+ str1 = va_arg(ap, char *);
78182+ str2 = va_arg(ap, char *);
78183+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78184+ break;
78185+ case GR_RBAC_MODE3:
78186+ dentry = va_arg(ap, struct dentry *);
78187+ mnt = va_arg(ap, struct vfsmount *);
78188+ str1 = va_arg(ap, char *);
78189+ str2 = va_arg(ap, char *);
78190+ str3 = va_arg(ap, char *);
78191+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78192+ break;
78193+ case GR_FILENAME:
78194+ dentry = va_arg(ap, struct dentry *);
78195+ mnt = va_arg(ap, struct vfsmount *);
78196+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78197+ break;
78198+ case GR_STR_FILENAME:
78199+ str1 = va_arg(ap, char *);
78200+ dentry = va_arg(ap, struct dentry *);
78201+ mnt = va_arg(ap, struct vfsmount *);
78202+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78203+ break;
78204+ case GR_FILENAME_STR:
78205+ dentry = va_arg(ap, struct dentry *);
78206+ mnt = va_arg(ap, struct vfsmount *);
78207+ str1 = va_arg(ap, char *);
78208+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78209+ break;
78210+ case GR_FILENAME_TWO_INT:
78211+ dentry = va_arg(ap, struct dentry *);
78212+ mnt = va_arg(ap, struct vfsmount *);
78213+ num1 = va_arg(ap, int);
78214+ num2 = va_arg(ap, int);
78215+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78216+ break;
78217+ case GR_FILENAME_TWO_INT_STR:
78218+ dentry = va_arg(ap, struct dentry *);
78219+ mnt = va_arg(ap, struct vfsmount *);
78220+ num1 = va_arg(ap, int);
78221+ num2 = va_arg(ap, int);
78222+ str1 = va_arg(ap, char *);
78223+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78224+ break;
78225+ case GR_TEXTREL:
78226+ file = va_arg(ap, struct file *);
78227+ ulong1 = va_arg(ap, unsigned long);
78228+ ulong2 = va_arg(ap, unsigned long);
78229+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78230+ break;
78231+ case GR_PTRACE:
78232+ task = va_arg(ap, struct task_struct *);
78233+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78234+ break;
78235+ case GR_RESOURCE:
78236+ task = va_arg(ap, struct task_struct *);
78237+ cred = __task_cred(task);
78238+ pcred = __task_cred(task->real_parent);
78239+ ulong1 = va_arg(ap, unsigned long);
78240+ str1 = va_arg(ap, char *);
78241+ ulong2 = va_arg(ap, unsigned long);
78242+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78243+ break;
78244+ case GR_CAP:
78245+ task = va_arg(ap, struct task_struct *);
78246+ cred = __task_cred(task);
78247+ pcred = __task_cred(task->real_parent);
78248+ str1 = va_arg(ap, char *);
78249+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78250+ break;
78251+ case GR_SIG:
78252+ str1 = va_arg(ap, char *);
78253+ voidptr = va_arg(ap, void *);
78254+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78255+ break;
78256+ case GR_SIG2:
78257+ task = va_arg(ap, struct task_struct *);
78258+ cred = __task_cred(task);
78259+ pcred = __task_cred(task->real_parent);
78260+ num1 = va_arg(ap, int);
78261+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78262+ break;
78263+ case GR_CRASH1:
78264+ task = va_arg(ap, struct task_struct *);
78265+ cred = __task_cred(task);
78266+ pcred = __task_cred(task->real_parent);
78267+ ulong1 = va_arg(ap, unsigned long);
78268+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78269+ break;
78270+ case GR_CRASH2:
78271+ task = va_arg(ap, struct task_struct *);
78272+ cred = __task_cred(task);
78273+ pcred = __task_cred(task->real_parent);
78274+ ulong1 = va_arg(ap, unsigned long);
78275+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78276+ break;
78277+ case GR_RWXMAP:
78278+ file = va_arg(ap, struct file *);
78279+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78280+ break;
78281+ case GR_RWXMAPVMA:
78282+ vma = va_arg(ap, struct vm_area_struct *);
78283+ if (vma->vm_file)
78284+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78285+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78286+ str1 = "<stack>";
78287+ else if (vma->vm_start <= current->mm->brk &&
78288+ vma->vm_end >= current->mm->start_brk)
78289+ str1 = "<heap>";
78290+ else
78291+ str1 = "<anonymous mapping>";
78292+ gr_log_middle_varargs(audit, msg, str1);
78293+ break;
78294+ case GR_PSACCT:
78295+ {
78296+ unsigned int wday, cday;
78297+ __u8 whr, chr;
78298+ __u8 wmin, cmin;
78299+ __u8 wsec, csec;
78300+ char cur_tty[64] = { 0 };
78301+ char parent_tty[64] = { 0 };
78302+
78303+ task = va_arg(ap, struct task_struct *);
78304+ wday = va_arg(ap, unsigned int);
78305+ cday = va_arg(ap, unsigned int);
78306+ whr = va_arg(ap, int);
78307+ chr = va_arg(ap, int);
78308+ wmin = va_arg(ap, int);
78309+ cmin = va_arg(ap, int);
78310+ wsec = va_arg(ap, int);
78311+ csec = va_arg(ap, int);
78312+ ulong1 = va_arg(ap, unsigned long);
78313+ cred = __task_cred(task);
78314+ pcred = __task_cred(task->real_parent);
78315+
78316+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78317+ }
78318+ break;
78319+ default:
78320+ gr_log_middle(audit, msg, ap);
78321+ }
78322+ va_end(ap);
78323+ // these don't need DEFAULTSECARGS printed on the end
78324+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78325+ gr_log_end(audit, 0);
78326+ else
78327+ gr_log_end(audit, 1);
78328+ END_LOCKS(audit);
78329+}
78330diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78331new file mode 100644
78332index 0000000..0e39d8c
78333--- /dev/null
78334+++ b/grsecurity/grsec_mem.c
78335@@ -0,0 +1,48 @@
78336+#include <linux/kernel.h>
78337+#include <linux/sched.h>
78338+#include <linux/mm.h>
78339+#include <linux/mman.h>
78340+#include <linux/module.h>
78341+#include <linux/grinternal.h>
78342+
78343+void gr_handle_msr_write(void)
78344+{
78345+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78346+ return;
78347+}
78348+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78349+
78350+void
78351+gr_handle_ioperm(void)
78352+{
78353+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78354+ return;
78355+}
78356+
78357+void
78358+gr_handle_iopl(void)
78359+{
78360+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78361+ return;
78362+}
78363+
78364+void
78365+gr_handle_mem_readwrite(u64 from, u64 to)
78366+{
78367+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78368+ return;
78369+}
78370+
78371+void
78372+gr_handle_vm86(void)
78373+{
78374+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78375+ return;
78376+}
78377+
78378+void
78379+gr_log_badprocpid(const char *entry)
78380+{
78381+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78382+ return;
78383+}
78384diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78385new file mode 100644
78386index 0000000..6f9eb73
78387--- /dev/null
78388+++ b/grsecurity/grsec_mount.c
78389@@ -0,0 +1,65 @@
78390+#include <linux/kernel.h>
78391+#include <linux/sched.h>
78392+#include <linux/mount.h>
78393+#include <linux/major.h>
78394+#include <linux/grsecurity.h>
78395+#include <linux/grinternal.h>
78396+
78397+void
78398+gr_log_remount(const char *devname, const int retval)
78399+{
78400+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78401+ if (grsec_enable_mount && (retval >= 0))
78402+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78403+#endif
78404+ return;
78405+}
78406+
78407+void
78408+gr_log_unmount(const char *devname, const int retval)
78409+{
78410+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78411+ if (grsec_enable_mount && (retval >= 0))
78412+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78413+#endif
78414+ return;
78415+}
78416+
78417+void
78418+gr_log_mount(const char *from, struct path *to, const int retval)
78419+{
78420+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78421+ if (grsec_enable_mount && (retval >= 0))
78422+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
78423+#endif
78424+ return;
78425+}
78426+
78427+int
78428+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78429+{
78430+#ifdef CONFIG_GRKERNSEC_ROFS
78431+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78432+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78433+ return -EPERM;
78434+ } else
78435+ return 0;
78436+#endif
78437+ return 0;
78438+}
78439+
78440+int
78441+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78442+{
78443+#ifdef CONFIG_GRKERNSEC_ROFS
78444+ struct inode *inode = dentry->d_inode;
78445+
78446+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78447+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78448+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78449+ return -EPERM;
78450+ } else
78451+ return 0;
78452+#endif
78453+ return 0;
78454+}
78455diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78456new file mode 100644
78457index 0000000..6ee9d50
78458--- /dev/null
78459+++ b/grsecurity/grsec_pax.c
78460@@ -0,0 +1,45 @@
78461+#include <linux/kernel.h>
78462+#include <linux/sched.h>
78463+#include <linux/mm.h>
78464+#include <linux/file.h>
78465+#include <linux/grinternal.h>
78466+#include <linux/grsecurity.h>
78467+
78468+void
78469+gr_log_textrel(struct vm_area_struct * vma)
78470+{
78471+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78472+ if (grsec_enable_log_rwxmaps)
78473+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78474+#endif
78475+ return;
78476+}
78477+
78478+void gr_log_ptgnustack(struct file *file)
78479+{
78480+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78481+ if (grsec_enable_log_rwxmaps)
78482+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78483+#endif
78484+ return;
78485+}
78486+
78487+void
78488+gr_log_rwxmmap(struct file *file)
78489+{
78490+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78491+ if (grsec_enable_log_rwxmaps)
78492+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78493+#endif
78494+ return;
78495+}
78496+
78497+void
78498+gr_log_rwxmprotect(struct vm_area_struct *vma)
78499+{
78500+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78501+ if (grsec_enable_log_rwxmaps)
78502+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78503+#endif
78504+ return;
78505+}
78506diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78507new file mode 100644
78508index 0000000..2005a3a
78509--- /dev/null
78510+++ b/grsecurity/grsec_proc.c
78511@@ -0,0 +1,20 @@
78512+#include <linux/kernel.h>
78513+#include <linux/sched.h>
78514+#include <linux/grsecurity.h>
78515+#include <linux/grinternal.h>
78516+
78517+int gr_proc_is_restricted(void)
78518+{
78519+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78520+ const struct cred *cred = current_cred();
78521+#endif
78522+
78523+#ifdef CONFIG_GRKERNSEC_PROC_USER
78524+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78525+ return -EACCES;
78526+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78527+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78528+ return -EACCES;
78529+#endif
78530+ return 0;
78531+}
78532diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78533new file mode 100644
78534index 0000000..f7f29aa
78535--- /dev/null
78536+++ b/grsecurity/grsec_ptrace.c
78537@@ -0,0 +1,30 @@
78538+#include <linux/kernel.h>
78539+#include <linux/sched.h>
78540+#include <linux/grinternal.h>
78541+#include <linux/security.h>
78542+
78543+void
78544+gr_audit_ptrace(struct task_struct *task)
78545+{
78546+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78547+ if (grsec_enable_audit_ptrace)
78548+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78549+#endif
78550+ return;
78551+}
78552+
78553+int
78554+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78555+{
78556+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78557+ const struct dentry *dentry = file->f_path.dentry;
78558+ const struct vfsmount *mnt = file->f_path.mnt;
78559+
78560+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78561+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78562+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78563+ return -EACCES;
78564+ }
78565+#endif
78566+ return 0;
78567+}
78568diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78569new file mode 100644
78570index 0000000..3860c7e
78571--- /dev/null
78572+++ b/grsecurity/grsec_sig.c
78573@@ -0,0 +1,236 @@
78574+#include <linux/kernel.h>
78575+#include <linux/sched.h>
78576+#include <linux/fs.h>
78577+#include <linux/delay.h>
78578+#include <linux/grsecurity.h>
78579+#include <linux/grinternal.h>
78580+#include <linux/hardirq.h>
78581+
78582+char *signames[] = {
78583+ [SIGSEGV] = "Segmentation fault",
78584+ [SIGILL] = "Illegal instruction",
78585+ [SIGABRT] = "Abort",
78586+ [SIGBUS] = "Invalid alignment/Bus error"
78587+};
78588+
78589+void
78590+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78591+{
78592+#ifdef CONFIG_GRKERNSEC_SIGNAL
78593+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78594+ (sig == SIGABRT) || (sig == SIGBUS))) {
78595+ if (task_pid_nr(t) == task_pid_nr(current)) {
78596+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78597+ } else {
78598+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78599+ }
78600+ }
78601+#endif
78602+ return;
78603+}
78604+
78605+int
78606+gr_handle_signal(const struct task_struct *p, const int sig)
78607+{
78608+#ifdef CONFIG_GRKERNSEC
78609+ /* ignore the 0 signal for protected task checks */
78610+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78611+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78612+ return -EPERM;
78613+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78614+ return -EPERM;
78615+ }
78616+#endif
78617+ return 0;
78618+}
78619+
78620+#ifdef CONFIG_GRKERNSEC
78621+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78622+
78623+int gr_fake_force_sig(int sig, struct task_struct *t)
78624+{
78625+ unsigned long int flags;
78626+ int ret, blocked, ignored;
78627+ struct k_sigaction *action;
78628+
78629+ spin_lock_irqsave(&t->sighand->siglock, flags);
78630+ action = &t->sighand->action[sig-1];
78631+ ignored = action->sa.sa_handler == SIG_IGN;
78632+ blocked = sigismember(&t->blocked, sig);
78633+ if (blocked || ignored) {
78634+ action->sa.sa_handler = SIG_DFL;
78635+ if (blocked) {
78636+ sigdelset(&t->blocked, sig);
78637+ recalc_sigpending_and_wake(t);
78638+ }
78639+ }
78640+ if (action->sa.sa_handler == SIG_DFL)
78641+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78642+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78643+
78644+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78645+
78646+ return ret;
78647+}
78648+#endif
78649+
78650+#define GR_USER_BAN_TIME (15 * 60)
78651+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78652+
78653+void gr_handle_brute_attach(int dumpable)
78654+{
78655+#ifdef CONFIG_GRKERNSEC_BRUTE
78656+ struct task_struct *p = current;
78657+ kuid_t uid = GLOBAL_ROOT_UID;
78658+ int daemon = 0;
78659+
78660+ if (!grsec_enable_brute)
78661+ return;
78662+
78663+ rcu_read_lock();
78664+ read_lock(&tasklist_lock);
78665+ read_lock(&grsec_exec_file_lock);
78666+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78667+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78668+ p->real_parent->brute = 1;
78669+ daemon = 1;
78670+ } else {
78671+ const struct cred *cred = __task_cred(p), *cred2;
78672+ struct task_struct *tsk, *tsk2;
78673+
78674+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78675+ struct user_struct *user;
78676+
78677+ uid = cred->uid;
78678+
78679+ /* this is put upon execution past expiration */
78680+ user = find_user(uid);
78681+ if (user == NULL)
78682+ goto unlock;
78683+ user->suid_banned = 1;
78684+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78685+ if (user->suid_ban_expires == ~0UL)
78686+ user->suid_ban_expires--;
78687+
78688+ /* only kill other threads of the same binary, from the same user */
78689+ do_each_thread(tsk2, tsk) {
78690+ cred2 = __task_cred(tsk);
78691+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78692+ gr_fake_force_sig(SIGKILL, tsk);
78693+ } while_each_thread(tsk2, tsk);
78694+ }
78695+ }
78696+unlock:
78697+ read_unlock(&grsec_exec_file_lock);
78698+ read_unlock(&tasklist_lock);
78699+ rcu_read_unlock();
78700+
78701+ if (gr_is_global_nonroot(uid))
78702+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78703+ else if (daemon)
78704+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78705+
78706+#endif
78707+ return;
78708+}
78709+
78710+void gr_handle_brute_check(void)
78711+{
78712+#ifdef CONFIG_GRKERNSEC_BRUTE
78713+ struct task_struct *p = current;
78714+
78715+ if (unlikely(p->brute)) {
78716+ if (!grsec_enable_brute)
78717+ p->brute = 0;
78718+ else if (time_before(get_seconds(), p->brute_expires))
78719+ msleep(30 * 1000);
78720+ }
78721+#endif
78722+ return;
78723+}
78724+
78725+void gr_handle_kernel_exploit(void)
78726+{
78727+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78728+ const struct cred *cred;
78729+ struct task_struct *tsk, *tsk2;
78730+ struct user_struct *user;
78731+ kuid_t uid;
78732+
78733+ if (in_irq() || in_serving_softirq() || in_nmi())
78734+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78735+
78736+ uid = current_uid();
78737+
78738+ if (gr_is_global_root(uid))
78739+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78740+ else {
78741+ /* kill all the processes of this user, hold a reference
78742+ to their creds struct, and prevent them from creating
78743+ another process until system reset
78744+ */
78745+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78746+ GR_GLOBAL_UID(uid));
78747+ /* we intentionally leak this ref */
78748+ user = get_uid(current->cred->user);
78749+ if (user)
78750+ user->kernel_banned = 1;
78751+
78752+ /* kill all processes of this user */
78753+ read_lock(&tasklist_lock);
78754+ do_each_thread(tsk2, tsk) {
78755+ cred = __task_cred(tsk);
78756+ if (uid_eq(cred->uid, uid))
78757+ gr_fake_force_sig(SIGKILL, tsk);
78758+ } while_each_thread(tsk2, tsk);
78759+ read_unlock(&tasklist_lock);
78760+ }
78761+#endif
78762+}
78763+
78764+#ifdef CONFIG_GRKERNSEC_BRUTE
78765+static bool suid_ban_expired(struct user_struct *user)
78766+{
78767+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78768+ user->suid_banned = 0;
78769+ user->suid_ban_expires = 0;
78770+ free_uid(user);
78771+ return true;
78772+ }
78773+
78774+ return false;
78775+}
78776+#endif
78777+
78778+int gr_process_kernel_exec_ban(void)
78779+{
78780+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78781+ if (unlikely(current->cred->user->kernel_banned))
78782+ return -EPERM;
78783+#endif
78784+ return 0;
78785+}
78786+
78787+int gr_process_kernel_setuid_ban(struct user_struct *user)
78788+{
78789+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78790+ if (unlikely(user->kernel_banned))
78791+ gr_fake_force_sig(SIGKILL, current);
78792+#endif
78793+ return 0;
78794+}
78795+
78796+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78797+{
78798+#ifdef CONFIG_GRKERNSEC_BRUTE
78799+ struct user_struct *user = current->cred->user;
78800+ if (unlikely(user->suid_banned)) {
78801+ if (suid_ban_expired(user))
78802+ return 0;
78803+ /* disallow execution of suid binaries only */
78804+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78805+ return -EPERM;
78806+ }
78807+#endif
78808+ return 0;
78809+}
78810diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78811new file mode 100644
78812index 0000000..e3650b6
78813--- /dev/null
78814+++ b/grsecurity/grsec_sock.c
78815@@ -0,0 +1,244 @@
78816+#include <linux/kernel.h>
78817+#include <linux/module.h>
78818+#include <linux/sched.h>
78819+#include <linux/file.h>
78820+#include <linux/net.h>
78821+#include <linux/in.h>
78822+#include <linux/ip.h>
78823+#include <net/sock.h>
78824+#include <net/inet_sock.h>
78825+#include <linux/grsecurity.h>
78826+#include <linux/grinternal.h>
78827+#include <linux/gracl.h>
78828+
78829+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78830+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78831+
78832+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78833+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78834+
78835+#ifdef CONFIG_UNIX_MODULE
78836+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78837+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78838+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78839+EXPORT_SYMBOL_GPL(gr_handle_create);
78840+#endif
78841+
78842+#ifdef CONFIG_GRKERNSEC
78843+#define gr_conn_table_size 32749
78844+struct conn_table_entry {
78845+ struct conn_table_entry *next;
78846+ struct signal_struct *sig;
78847+};
78848+
78849+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78850+DEFINE_SPINLOCK(gr_conn_table_lock);
78851+
78852+extern const char * gr_socktype_to_name(unsigned char type);
78853+extern const char * gr_proto_to_name(unsigned char proto);
78854+extern const char * gr_sockfamily_to_name(unsigned char family);
78855+
78856+static __inline__ int
78857+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78858+{
78859+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78860+}
78861+
78862+static __inline__ int
78863+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78864+ __u16 sport, __u16 dport)
78865+{
78866+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78867+ sig->gr_sport == sport && sig->gr_dport == dport))
78868+ return 1;
78869+ else
78870+ return 0;
78871+}
78872+
78873+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78874+{
78875+ struct conn_table_entry **match;
78876+ unsigned int index;
78877+
78878+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78879+ sig->gr_sport, sig->gr_dport,
78880+ gr_conn_table_size);
78881+
78882+ newent->sig = sig;
78883+
78884+ match = &gr_conn_table[index];
78885+ newent->next = *match;
78886+ *match = newent;
78887+
78888+ return;
78889+}
78890+
78891+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78892+{
78893+ struct conn_table_entry *match, *last = NULL;
78894+ unsigned int index;
78895+
78896+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78897+ sig->gr_sport, sig->gr_dport,
78898+ gr_conn_table_size);
78899+
78900+ match = gr_conn_table[index];
78901+ while (match && !conn_match(match->sig,
78902+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78903+ sig->gr_dport)) {
78904+ last = match;
78905+ match = match->next;
78906+ }
78907+
78908+ if (match) {
78909+ if (last)
78910+ last->next = match->next;
78911+ else
78912+ gr_conn_table[index] = NULL;
78913+ kfree(match);
78914+ }
78915+
78916+ return;
78917+}
78918+
78919+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78920+ __u16 sport, __u16 dport)
78921+{
78922+ struct conn_table_entry *match;
78923+ unsigned int index;
78924+
78925+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78926+
78927+ match = gr_conn_table[index];
78928+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78929+ match = match->next;
78930+
78931+ if (match)
78932+ return match->sig;
78933+ else
78934+ return NULL;
78935+}
78936+
78937+#endif
78938+
78939+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78940+{
78941+#ifdef CONFIG_GRKERNSEC
78942+ struct signal_struct *sig = current->signal;
78943+ struct conn_table_entry *newent;
78944+
78945+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78946+ if (newent == NULL)
78947+ return;
78948+ /* no bh lock needed since we are called with bh disabled */
78949+ spin_lock(&gr_conn_table_lock);
78950+ gr_del_task_from_ip_table_nolock(sig);
78951+ sig->gr_saddr = inet->inet_rcv_saddr;
78952+ sig->gr_daddr = inet->inet_daddr;
78953+ sig->gr_sport = inet->inet_sport;
78954+ sig->gr_dport = inet->inet_dport;
78955+ gr_add_to_task_ip_table_nolock(sig, newent);
78956+ spin_unlock(&gr_conn_table_lock);
78957+#endif
78958+ return;
78959+}
78960+
78961+void gr_del_task_from_ip_table(struct task_struct *task)
78962+{
78963+#ifdef CONFIG_GRKERNSEC
78964+ spin_lock_bh(&gr_conn_table_lock);
78965+ gr_del_task_from_ip_table_nolock(task->signal);
78966+ spin_unlock_bh(&gr_conn_table_lock);
78967+#endif
78968+ return;
78969+}
78970+
78971+void
78972+gr_attach_curr_ip(const struct sock *sk)
78973+{
78974+#ifdef CONFIG_GRKERNSEC
78975+ struct signal_struct *p, *set;
78976+ const struct inet_sock *inet = inet_sk(sk);
78977+
78978+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78979+ return;
78980+
78981+ set = current->signal;
78982+
78983+ spin_lock_bh(&gr_conn_table_lock);
78984+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78985+ inet->inet_dport, inet->inet_sport);
78986+ if (unlikely(p != NULL)) {
78987+ set->curr_ip = p->curr_ip;
78988+ set->used_accept = 1;
78989+ gr_del_task_from_ip_table_nolock(p);
78990+ spin_unlock_bh(&gr_conn_table_lock);
78991+ return;
78992+ }
78993+ spin_unlock_bh(&gr_conn_table_lock);
78994+
78995+ set->curr_ip = inet->inet_daddr;
78996+ set->used_accept = 1;
78997+#endif
78998+ return;
78999+}
79000+
79001+int
79002+gr_handle_sock_all(const int family, const int type, const int protocol)
79003+{
79004+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79005+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79006+ (family != AF_UNIX)) {
79007+ if (family == AF_INET)
79008+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79009+ else
79010+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79011+ return -EACCES;
79012+ }
79013+#endif
79014+ return 0;
79015+}
79016+
79017+int
79018+gr_handle_sock_server(const struct sockaddr *sck)
79019+{
79020+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79021+ if (grsec_enable_socket_server &&
79022+ in_group_p(grsec_socket_server_gid) &&
79023+ sck && (sck->sa_family != AF_UNIX) &&
79024+ (sck->sa_family != AF_LOCAL)) {
79025+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79026+ return -EACCES;
79027+ }
79028+#endif
79029+ return 0;
79030+}
79031+
79032+int
79033+gr_handle_sock_server_other(const struct sock *sck)
79034+{
79035+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79036+ if (grsec_enable_socket_server &&
79037+ in_group_p(grsec_socket_server_gid) &&
79038+ sck && (sck->sk_family != AF_UNIX) &&
79039+ (sck->sk_family != AF_LOCAL)) {
79040+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79041+ return -EACCES;
79042+ }
79043+#endif
79044+ return 0;
79045+}
79046+
79047+int
79048+gr_handle_sock_client(const struct sockaddr *sck)
79049+{
79050+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79051+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79052+ sck && (sck->sa_family != AF_UNIX) &&
79053+ (sck->sa_family != AF_LOCAL)) {
79054+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79055+ return -EACCES;
79056+ }
79057+#endif
79058+ return 0;
79059+}
79060diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79061new file mode 100644
79062index 0000000..cce889e
79063--- /dev/null
79064+++ b/grsecurity/grsec_sysctl.c
79065@@ -0,0 +1,488 @@
79066+#include <linux/kernel.h>
79067+#include <linux/sched.h>
79068+#include <linux/sysctl.h>
79069+#include <linux/grsecurity.h>
79070+#include <linux/grinternal.h>
79071+
79072+int
79073+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79074+{
79075+#ifdef CONFIG_GRKERNSEC_SYSCTL
79076+ if (dirname == NULL || name == NULL)
79077+ return 0;
79078+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79079+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79080+ return -EACCES;
79081+ }
79082+#endif
79083+ return 0;
79084+}
79085+
79086+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79087+static int __maybe_unused __read_only one = 1;
79088+#endif
79089+
79090+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79091+ defined(CONFIG_GRKERNSEC_DENYUSB)
79092+struct ctl_table grsecurity_table[] = {
79093+#ifdef CONFIG_GRKERNSEC_SYSCTL
79094+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79095+#ifdef CONFIG_GRKERNSEC_IO
79096+ {
79097+ .procname = "disable_priv_io",
79098+ .data = &grsec_disable_privio,
79099+ .maxlen = sizeof(int),
79100+ .mode = 0600,
79101+ .proc_handler = &proc_dointvec,
79102+ },
79103+#endif
79104+#endif
79105+#ifdef CONFIG_GRKERNSEC_LINK
79106+ {
79107+ .procname = "linking_restrictions",
79108+ .data = &grsec_enable_link,
79109+ .maxlen = sizeof(int),
79110+ .mode = 0600,
79111+ .proc_handler = &proc_dointvec,
79112+ },
79113+#endif
79114+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79115+ {
79116+ .procname = "enforce_symlinksifowner",
79117+ .data = &grsec_enable_symlinkown,
79118+ .maxlen = sizeof(int),
79119+ .mode = 0600,
79120+ .proc_handler = &proc_dointvec,
79121+ },
79122+ {
79123+ .procname = "symlinkown_gid",
79124+ .data = &grsec_symlinkown_gid,
79125+ .maxlen = sizeof(int),
79126+ .mode = 0600,
79127+ .proc_handler = &proc_dointvec,
79128+ },
79129+#endif
79130+#ifdef CONFIG_GRKERNSEC_BRUTE
79131+ {
79132+ .procname = "deter_bruteforce",
79133+ .data = &grsec_enable_brute,
79134+ .maxlen = sizeof(int),
79135+ .mode = 0600,
79136+ .proc_handler = &proc_dointvec,
79137+ },
79138+#endif
79139+#ifdef CONFIG_GRKERNSEC_FIFO
79140+ {
79141+ .procname = "fifo_restrictions",
79142+ .data = &grsec_enable_fifo,
79143+ .maxlen = sizeof(int),
79144+ .mode = 0600,
79145+ .proc_handler = &proc_dointvec,
79146+ },
79147+#endif
79148+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79149+ {
79150+ .procname = "ptrace_readexec",
79151+ .data = &grsec_enable_ptrace_readexec,
79152+ .maxlen = sizeof(int),
79153+ .mode = 0600,
79154+ .proc_handler = &proc_dointvec,
79155+ },
79156+#endif
79157+#ifdef CONFIG_GRKERNSEC_SETXID
79158+ {
79159+ .procname = "consistent_setxid",
79160+ .data = &grsec_enable_setxid,
79161+ .maxlen = sizeof(int),
79162+ .mode = 0600,
79163+ .proc_handler = &proc_dointvec,
79164+ },
79165+#endif
79166+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79167+ {
79168+ .procname = "ip_blackhole",
79169+ .data = &grsec_enable_blackhole,
79170+ .maxlen = sizeof(int),
79171+ .mode = 0600,
79172+ .proc_handler = &proc_dointvec,
79173+ },
79174+ {
79175+ .procname = "lastack_retries",
79176+ .data = &grsec_lastack_retries,
79177+ .maxlen = sizeof(int),
79178+ .mode = 0600,
79179+ .proc_handler = &proc_dointvec,
79180+ },
79181+#endif
79182+#ifdef CONFIG_GRKERNSEC_EXECLOG
79183+ {
79184+ .procname = "exec_logging",
79185+ .data = &grsec_enable_execlog,
79186+ .maxlen = sizeof(int),
79187+ .mode = 0600,
79188+ .proc_handler = &proc_dointvec,
79189+ },
79190+#endif
79191+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79192+ {
79193+ .procname = "rwxmap_logging",
79194+ .data = &grsec_enable_log_rwxmaps,
79195+ .maxlen = sizeof(int),
79196+ .mode = 0600,
79197+ .proc_handler = &proc_dointvec,
79198+ },
79199+#endif
79200+#ifdef CONFIG_GRKERNSEC_SIGNAL
79201+ {
79202+ .procname = "signal_logging",
79203+ .data = &grsec_enable_signal,
79204+ .maxlen = sizeof(int),
79205+ .mode = 0600,
79206+ .proc_handler = &proc_dointvec,
79207+ },
79208+#endif
79209+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79210+ {
79211+ .procname = "forkfail_logging",
79212+ .data = &grsec_enable_forkfail,
79213+ .maxlen = sizeof(int),
79214+ .mode = 0600,
79215+ .proc_handler = &proc_dointvec,
79216+ },
79217+#endif
79218+#ifdef CONFIG_GRKERNSEC_TIME
79219+ {
79220+ .procname = "timechange_logging",
79221+ .data = &grsec_enable_time,
79222+ .maxlen = sizeof(int),
79223+ .mode = 0600,
79224+ .proc_handler = &proc_dointvec,
79225+ },
79226+#endif
79227+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79228+ {
79229+ .procname = "chroot_deny_shmat",
79230+ .data = &grsec_enable_chroot_shmat,
79231+ .maxlen = sizeof(int),
79232+ .mode = 0600,
79233+ .proc_handler = &proc_dointvec,
79234+ },
79235+#endif
79236+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79237+ {
79238+ .procname = "chroot_deny_unix",
79239+ .data = &grsec_enable_chroot_unix,
79240+ .maxlen = sizeof(int),
79241+ .mode = 0600,
79242+ .proc_handler = &proc_dointvec,
79243+ },
79244+#endif
79245+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79246+ {
79247+ .procname = "chroot_deny_mount",
79248+ .data = &grsec_enable_chroot_mount,
79249+ .maxlen = sizeof(int),
79250+ .mode = 0600,
79251+ .proc_handler = &proc_dointvec,
79252+ },
79253+#endif
79254+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79255+ {
79256+ .procname = "chroot_deny_fchdir",
79257+ .data = &grsec_enable_chroot_fchdir,
79258+ .maxlen = sizeof(int),
79259+ .mode = 0600,
79260+ .proc_handler = &proc_dointvec,
79261+ },
79262+#endif
79263+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79264+ {
79265+ .procname = "chroot_deny_chroot",
79266+ .data = &grsec_enable_chroot_double,
79267+ .maxlen = sizeof(int),
79268+ .mode = 0600,
79269+ .proc_handler = &proc_dointvec,
79270+ },
79271+#endif
79272+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79273+ {
79274+ .procname = "chroot_deny_pivot",
79275+ .data = &grsec_enable_chroot_pivot,
79276+ .maxlen = sizeof(int),
79277+ .mode = 0600,
79278+ .proc_handler = &proc_dointvec,
79279+ },
79280+#endif
79281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79282+ {
79283+ .procname = "chroot_enforce_chdir",
79284+ .data = &grsec_enable_chroot_chdir,
79285+ .maxlen = sizeof(int),
79286+ .mode = 0600,
79287+ .proc_handler = &proc_dointvec,
79288+ },
79289+#endif
79290+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79291+ {
79292+ .procname = "chroot_deny_chmod",
79293+ .data = &grsec_enable_chroot_chmod,
79294+ .maxlen = sizeof(int),
79295+ .mode = 0600,
79296+ .proc_handler = &proc_dointvec,
79297+ },
79298+#endif
79299+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79300+ {
79301+ .procname = "chroot_deny_mknod",
79302+ .data = &grsec_enable_chroot_mknod,
79303+ .maxlen = sizeof(int),
79304+ .mode = 0600,
79305+ .proc_handler = &proc_dointvec,
79306+ },
79307+#endif
79308+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79309+ {
79310+ .procname = "chroot_restrict_nice",
79311+ .data = &grsec_enable_chroot_nice,
79312+ .maxlen = sizeof(int),
79313+ .mode = 0600,
79314+ .proc_handler = &proc_dointvec,
79315+ },
79316+#endif
79317+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79318+ {
79319+ .procname = "chroot_execlog",
79320+ .data = &grsec_enable_chroot_execlog,
79321+ .maxlen = sizeof(int),
79322+ .mode = 0600,
79323+ .proc_handler = &proc_dointvec,
79324+ },
79325+#endif
79326+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79327+ {
79328+ .procname = "chroot_caps",
79329+ .data = &grsec_enable_chroot_caps,
79330+ .maxlen = sizeof(int),
79331+ .mode = 0600,
79332+ .proc_handler = &proc_dointvec,
79333+ },
79334+#endif
79335+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
79336+ {
79337+ .procname = "chroot_deny_bad_rename",
79338+ .data = &grsec_enable_chroot_rename,
79339+ .maxlen = sizeof(int),
79340+ .mode = 0600,
79341+ .proc_handler = &proc_dointvec,
79342+ },
79343+#endif
79344+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79345+ {
79346+ .procname = "chroot_deny_sysctl",
79347+ .data = &grsec_enable_chroot_sysctl,
79348+ .maxlen = sizeof(int),
79349+ .mode = 0600,
79350+ .proc_handler = &proc_dointvec,
79351+ },
79352+#endif
79353+#ifdef CONFIG_GRKERNSEC_TPE
79354+ {
79355+ .procname = "tpe",
79356+ .data = &grsec_enable_tpe,
79357+ .maxlen = sizeof(int),
79358+ .mode = 0600,
79359+ .proc_handler = &proc_dointvec,
79360+ },
79361+ {
79362+ .procname = "tpe_gid",
79363+ .data = &grsec_tpe_gid,
79364+ .maxlen = sizeof(int),
79365+ .mode = 0600,
79366+ .proc_handler = &proc_dointvec,
79367+ },
79368+#endif
79369+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79370+ {
79371+ .procname = "tpe_invert",
79372+ .data = &grsec_enable_tpe_invert,
79373+ .maxlen = sizeof(int),
79374+ .mode = 0600,
79375+ .proc_handler = &proc_dointvec,
79376+ },
79377+#endif
79378+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79379+ {
79380+ .procname = "tpe_restrict_all",
79381+ .data = &grsec_enable_tpe_all,
79382+ .maxlen = sizeof(int),
79383+ .mode = 0600,
79384+ .proc_handler = &proc_dointvec,
79385+ },
79386+#endif
79387+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79388+ {
79389+ .procname = "socket_all",
79390+ .data = &grsec_enable_socket_all,
79391+ .maxlen = sizeof(int),
79392+ .mode = 0600,
79393+ .proc_handler = &proc_dointvec,
79394+ },
79395+ {
79396+ .procname = "socket_all_gid",
79397+ .data = &grsec_socket_all_gid,
79398+ .maxlen = sizeof(int),
79399+ .mode = 0600,
79400+ .proc_handler = &proc_dointvec,
79401+ },
79402+#endif
79403+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79404+ {
79405+ .procname = "socket_client",
79406+ .data = &grsec_enable_socket_client,
79407+ .maxlen = sizeof(int),
79408+ .mode = 0600,
79409+ .proc_handler = &proc_dointvec,
79410+ },
79411+ {
79412+ .procname = "socket_client_gid",
79413+ .data = &grsec_socket_client_gid,
79414+ .maxlen = sizeof(int),
79415+ .mode = 0600,
79416+ .proc_handler = &proc_dointvec,
79417+ },
79418+#endif
79419+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79420+ {
79421+ .procname = "socket_server",
79422+ .data = &grsec_enable_socket_server,
79423+ .maxlen = sizeof(int),
79424+ .mode = 0600,
79425+ .proc_handler = &proc_dointvec,
79426+ },
79427+ {
79428+ .procname = "socket_server_gid",
79429+ .data = &grsec_socket_server_gid,
79430+ .maxlen = sizeof(int),
79431+ .mode = 0600,
79432+ .proc_handler = &proc_dointvec,
79433+ },
79434+#endif
79435+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79436+ {
79437+ .procname = "audit_group",
79438+ .data = &grsec_enable_group,
79439+ .maxlen = sizeof(int),
79440+ .mode = 0600,
79441+ .proc_handler = &proc_dointvec,
79442+ },
79443+ {
79444+ .procname = "audit_gid",
79445+ .data = &grsec_audit_gid,
79446+ .maxlen = sizeof(int),
79447+ .mode = 0600,
79448+ .proc_handler = &proc_dointvec,
79449+ },
79450+#endif
79451+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79452+ {
79453+ .procname = "audit_chdir",
79454+ .data = &grsec_enable_chdir,
79455+ .maxlen = sizeof(int),
79456+ .mode = 0600,
79457+ .proc_handler = &proc_dointvec,
79458+ },
79459+#endif
79460+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79461+ {
79462+ .procname = "audit_mount",
79463+ .data = &grsec_enable_mount,
79464+ .maxlen = sizeof(int),
79465+ .mode = 0600,
79466+ .proc_handler = &proc_dointvec,
79467+ },
79468+#endif
79469+#ifdef CONFIG_GRKERNSEC_DMESG
79470+ {
79471+ .procname = "dmesg",
79472+ .data = &grsec_enable_dmesg,
79473+ .maxlen = sizeof(int),
79474+ .mode = 0600,
79475+ .proc_handler = &proc_dointvec,
79476+ },
79477+#endif
79478+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79479+ {
79480+ .procname = "chroot_findtask",
79481+ .data = &grsec_enable_chroot_findtask,
79482+ .maxlen = sizeof(int),
79483+ .mode = 0600,
79484+ .proc_handler = &proc_dointvec,
79485+ },
79486+#endif
79487+#ifdef CONFIG_GRKERNSEC_RESLOG
79488+ {
79489+ .procname = "resource_logging",
79490+ .data = &grsec_resource_logging,
79491+ .maxlen = sizeof(int),
79492+ .mode = 0600,
79493+ .proc_handler = &proc_dointvec,
79494+ },
79495+#endif
79496+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79497+ {
79498+ .procname = "audit_ptrace",
79499+ .data = &grsec_enable_audit_ptrace,
79500+ .maxlen = sizeof(int),
79501+ .mode = 0600,
79502+ .proc_handler = &proc_dointvec,
79503+ },
79504+#endif
79505+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79506+ {
79507+ .procname = "harden_ptrace",
79508+ .data = &grsec_enable_harden_ptrace,
79509+ .maxlen = sizeof(int),
79510+ .mode = 0600,
79511+ .proc_handler = &proc_dointvec,
79512+ },
79513+#endif
79514+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79515+ {
79516+ .procname = "harden_ipc",
79517+ .data = &grsec_enable_harden_ipc,
79518+ .maxlen = sizeof(int),
79519+ .mode = 0600,
79520+ .proc_handler = &proc_dointvec,
79521+ },
79522+#endif
79523+ {
79524+ .procname = "grsec_lock",
79525+ .data = &grsec_lock,
79526+ .maxlen = sizeof(int),
79527+ .mode = 0600,
79528+ .proc_handler = &proc_dointvec,
79529+ },
79530+#endif
79531+#ifdef CONFIG_GRKERNSEC_ROFS
79532+ {
79533+ .procname = "romount_protect",
79534+ .data = &grsec_enable_rofs,
79535+ .maxlen = sizeof(int),
79536+ .mode = 0600,
79537+ .proc_handler = &proc_dointvec_minmax,
79538+ .extra1 = &one,
79539+ .extra2 = &one,
79540+ },
79541+#endif
79542+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79543+ {
79544+ .procname = "deny_new_usb",
79545+ .data = &grsec_deny_new_usb,
79546+ .maxlen = sizeof(int),
79547+ .mode = 0600,
79548+ .proc_handler = &proc_dointvec,
79549+ },
79550+#endif
79551+ { }
79552+};
79553+#endif
79554diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79555new file mode 100644
79556index 0000000..61b514e
79557--- /dev/null
79558+++ b/grsecurity/grsec_time.c
79559@@ -0,0 +1,16 @@
79560+#include <linux/kernel.h>
79561+#include <linux/sched.h>
79562+#include <linux/grinternal.h>
79563+#include <linux/module.h>
79564+
79565+void
79566+gr_log_timechange(void)
79567+{
79568+#ifdef CONFIG_GRKERNSEC_TIME
79569+ if (grsec_enable_time)
79570+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79571+#endif
79572+ return;
79573+}
79574+
79575+EXPORT_SYMBOL_GPL(gr_log_timechange);
79576diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79577new file mode 100644
79578index 0000000..d1953de
79579--- /dev/null
79580+++ b/grsecurity/grsec_tpe.c
79581@@ -0,0 +1,78 @@
79582+#include <linux/kernel.h>
79583+#include <linux/sched.h>
79584+#include <linux/file.h>
79585+#include <linux/fs.h>
79586+#include <linux/grinternal.h>
79587+
79588+extern int gr_acl_tpe_check(void);
79589+
79590+int
79591+gr_tpe_allow(const struct file *file)
79592+{
79593+#ifdef CONFIG_GRKERNSEC
79594+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79595+ struct inode *file_inode = file->f_path.dentry->d_inode;
79596+ const struct cred *cred = current_cred();
79597+ char *msg = NULL;
79598+ char *msg2 = NULL;
79599+
79600+ // never restrict root
79601+ if (gr_is_global_root(cred->uid))
79602+ return 1;
79603+
79604+ if (grsec_enable_tpe) {
79605+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79606+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79607+ msg = "not being in trusted group";
79608+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79609+ msg = "being in untrusted group";
79610+#else
79611+ if (in_group_p(grsec_tpe_gid))
79612+ msg = "being in untrusted group";
79613+#endif
79614+ }
79615+ if (!msg && gr_acl_tpe_check())
79616+ msg = "being in untrusted role";
79617+
79618+ // not in any affected group/role
79619+ if (!msg)
79620+ goto next_check;
79621+
79622+ if (gr_is_global_nonroot(inode->i_uid))
79623+ msg2 = "file in non-root-owned directory";
79624+ else if (inode->i_mode & S_IWOTH)
79625+ msg2 = "file in world-writable directory";
79626+ else if (inode->i_mode & S_IWGRP)
79627+ msg2 = "file in group-writable directory";
79628+ else if (file_inode->i_mode & S_IWOTH)
79629+ msg2 = "file is world-writable";
79630+
79631+ if (msg && msg2) {
79632+ char fullmsg[70] = {0};
79633+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79634+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79635+ return 0;
79636+ }
79637+ msg = NULL;
79638+next_check:
79639+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79640+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79641+ return 1;
79642+
79643+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79644+ msg = "directory not owned by user";
79645+ else if (inode->i_mode & S_IWOTH)
79646+ msg = "file in world-writable directory";
79647+ else if (inode->i_mode & S_IWGRP)
79648+ msg = "file in group-writable directory";
79649+ else if (file_inode->i_mode & S_IWOTH)
79650+ msg = "file is world-writable";
79651+
79652+ if (msg) {
79653+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79654+ return 0;
79655+ }
79656+#endif
79657+#endif
79658+ return 1;
79659+}
79660diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79661new file mode 100644
79662index 0000000..ae02d8e
79663--- /dev/null
79664+++ b/grsecurity/grsec_usb.c
79665@@ -0,0 +1,15 @@
79666+#include <linux/kernel.h>
79667+#include <linux/grinternal.h>
79668+#include <linux/module.h>
79669+
79670+int gr_handle_new_usb(void)
79671+{
79672+#ifdef CONFIG_GRKERNSEC_DENYUSB
79673+ if (grsec_deny_new_usb) {
79674+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79675+ return 1;
79676+ }
79677+#endif
79678+ return 0;
79679+}
79680+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79681diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79682new file mode 100644
79683index 0000000..158b330
79684--- /dev/null
79685+++ b/grsecurity/grsum.c
79686@@ -0,0 +1,64 @@
79687+#include <linux/err.h>
79688+#include <linux/kernel.h>
79689+#include <linux/sched.h>
79690+#include <linux/mm.h>
79691+#include <linux/scatterlist.h>
79692+#include <linux/crypto.h>
79693+#include <linux/gracl.h>
79694+
79695+
79696+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79697+#error "crypto and sha256 must be built into the kernel"
79698+#endif
79699+
79700+int
79701+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79702+{
79703+ struct crypto_hash *tfm;
79704+ struct hash_desc desc;
79705+ struct scatterlist sg[2];
79706+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79707+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79708+ unsigned long *sumptr = (unsigned long *)sum;
79709+ int cryptres;
79710+ int retval = 1;
79711+ volatile int mismatched = 0;
79712+ volatile int dummy = 0;
79713+ unsigned int i;
79714+
79715+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79716+ if (IS_ERR(tfm)) {
79717+ /* should never happen, since sha256 should be built in */
79718+ memset(entry->pw, 0, GR_PW_LEN);
79719+ return 1;
79720+ }
79721+
79722+ sg_init_table(sg, 2);
79723+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79724+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79725+
79726+ desc.tfm = tfm;
79727+ desc.flags = 0;
79728+
79729+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79730+ temp_sum);
79731+
79732+ memset(entry->pw, 0, GR_PW_LEN);
79733+
79734+ if (cryptres)
79735+ goto out;
79736+
79737+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79738+ if (sumptr[i] != tmpsumptr[i])
79739+ mismatched = 1;
79740+ else
79741+ dummy = 1; // waste a cycle
79742+
79743+ if (!mismatched)
79744+ retval = dummy - 1;
79745+
79746+out:
79747+ crypto_free_hash(tfm);
79748+
79749+ return retval;
79750+}
79751diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79752index 5bdab6b..9ae82fe 100644
79753--- a/include/asm-generic/4level-fixup.h
79754+++ b/include/asm-generic/4level-fixup.h
79755@@ -14,8 +14,10 @@
79756 #define pmd_alloc(mm, pud, address) \
79757 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79758 NULL: pmd_offset(pud, address))
79759+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79760
79761 #define pud_alloc(mm, pgd, address) (pgd)
79762+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79763 #define pud_offset(pgd, start) (pgd)
79764 #define pud_none(pud) 0
79765 #define pud_bad(pud) 0
79766diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79767index b7babf0..1e4b4f1 100644
79768--- a/include/asm-generic/atomic-long.h
79769+++ b/include/asm-generic/atomic-long.h
79770@@ -22,6 +22,12 @@
79771
79772 typedef atomic64_t atomic_long_t;
79773
79774+#ifdef CONFIG_PAX_REFCOUNT
79775+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79776+#else
79777+typedef atomic64_t atomic_long_unchecked_t;
79778+#endif
79779+
79780 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79781
79782 static inline long atomic_long_read(atomic_long_t *l)
79783@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79784 return (long)atomic64_read(v);
79785 }
79786
79787+#ifdef CONFIG_PAX_REFCOUNT
79788+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79789+{
79790+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79791+
79792+ return (long)atomic64_read_unchecked(v);
79793+}
79794+#endif
79795+
79796 static inline void atomic_long_set(atomic_long_t *l, long i)
79797 {
79798 atomic64_t *v = (atomic64_t *)l;
79799@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79800 atomic64_set(v, i);
79801 }
79802
79803+#ifdef CONFIG_PAX_REFCOUNT
79804+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79805+{
79806+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79807+
79808+ atomic64_set_unchecked(v, i);
79809+}
79810+#endif
79811+
79812 static inline void atomic_long_inc(atomic_long_t *l)
79813 {
79814 atomic64_t *v = (atomic64_t *)l;
79815@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79816 atomic64_inc(v);
79817 }
79818
79819+#ifdef CONFIG_PAX_REFCOUNT
79820+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79821+{
79822+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79823+
79824+ atomic64_inc_unchecked(v);
79825+}
79826+#endif
79827+
79828 static inline void atomic_long_dec(atomic_long_t *l)
79829 {
79830 atomic64_t *v = (atomic64_t *)l;
79831@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79832 atomic64_dec(v);
79833 }
79834
79835+#ifdef CONFIG_PAX_REFCOUNT
79836+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79837+{
79838+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79839+
79840+ atomic64_dec_unchecked(v);
79841+}
79842+#endif
79843+
79844 static inline void atomic_long_add(long i, atomic_long_t *l)
79845 {
79846 atomic64_t *v = (atomic64_t *)l;
79847@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79848 atomic64_add(i, v);
79849 }
79850
79851+#ifdef CONFIG_PAX_REFCOUNT
79852+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79853+{
79854+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79855+
79856+ atomic64_add_unchecked(i, v);
79857+}
79858+#endif
79859+
79860 static inline void atomic_long_sub(long i, atomic_long_t *l)
79861 {
79862 atomic64_t *v = (atomic64_t *)l;
79863@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79864 atomic64_sub(i, v);
79865 }
79866
79867+#ifdef CONFIG_PAX_REFCOUNT
79868+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79869+{
79870+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79871+
79872+ atomic64_sub_unchecked(i, v);
79873+}
79874+#endif
79875+
79876 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79877 {
79878 atomic64_t *v = (atomic64_t *)l;
79879@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79880 return atomic64_add_negative(i, v);
79881 }
79882
79883-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79884+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79885 {
79886 atomic64_t *v = (atomic64_t *)l;
79887
79888 return (long)atomic64_add_return(i, v);
79889 }
79890
79891+#ifdef CONFIG_PAX_REFCOUNT
79892+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79893+{
79894+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79895+
79896+ return (long)atomic64_add_return_unchecked(i, v);
79897+}
79898+#endif
79899+
79900 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79901 {
79902 atomic64_t *v = (atomic64_t *)l;
79903@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79904 return (long)atomic64_inc_return(v);
79905 }
79906
79907+#ifdef CONFIG_PAX_REFCOUNT
79908+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79909+{
79910+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79911+
79912+ return (long)atomic64_inc_return_unchecked(v);
79913+}
79914+#endif
79915+
79916 static inline long atomic_long_dec_return(atomic_long_t *l)
79917 {
79918 atomic64_t *v = (atomic64_t *)l;
79919@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79920
79921 typedef atomic_t atomic_long_t;
79922
79923+#ifdef CONFIG_PAX_REFCOUNT
79924+typedef atomic_unchecked_t atomic_long_unchecked_t;
79925+#else
79926+typedef atomic_t atomic_long_unchecked_t;
79927+#endif
79928+
79929 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79930 static inline long atomic_long_read(atomic_long_t *l)
79931 {
79932@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79933 return (long)atomic_read(v);
79934 }
79935
79936+#ifdef CONFIG_PAX_REFCOUNT
79937+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79938+{
79939+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79940+
79941+ return (long)atomic_read_unchecked(v);
79942+}
79943+#endif
79944+
79945 static inline void atomic_long_set(atomic_long_t *l, long i)
79946 {
79947 atomic_t *v = (atomic_t *)l;
79948@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79949 atomic_set(v, i);
79950 }
79951
79952+#ifdef CONFIG_PAX_REFCOUNT
79953+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79954+{
79955+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79956+
79957+ atomic_set_unchecked(v, i);
79958+}
79959+#endif
79960+
79961 static inline void atomic_long_inc(atomic_long_t *l)
79962 {
79963 atomic_t *v = (atomic_t *)l;
79964@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79965 atomic_inc(v);
79966 }
79967
79968+#ifdef CONFIG_PAX_REFCOUNT
79969+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79970+{
79971+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79972+
79973+ atomic_inc_unchecked(v);
79974+}
79975+#endif
79976+
79977 static inline void atomic_long_dec(atomic_long_t *l)
79978 {
79979 atomic_t *v = (atomic_t *)l;
79980@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79981 atomic_dec(v);
79982 }
79983
79984+#ifdef CONFIG_PAX_REFCOUNT
79985+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79986+{
79987+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79988+
79989+ atomic_dec_unchecked(v);
79990+}
79991+#endif
79992+
79993 static inline void atomic_long_add(long i, atomic_long_t *l)
79994 {
79995 atomic_t *v = (atomic_t *)l;
79996@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79997 atomic_add(i, v);
79998 }
79999
80000+#ifdef CONFIG_PAX_REFCOUNT
80001+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80002+{
80003+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80004+
80005+ atomic_add_unchecked(i, v);
80006+}
80007+#endif
80008+
80009 static inline void atomic_long_sub(long i, atomic_long_t *l)
80010 {
80011 atomic_t *v = (atomic_t *)l;
80012@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80013 atomic_sub(i, v);
80014 }
80015
80016+#ifdef CONFIG_PAX_REFCOUNT
80017+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80018+{
80019+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80020+
80021+ atomic_sub_unchecked(i, v);
80022+}
80023+#endif
80024+
80025 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80026 {
80027 atomic_t *v = (atomic_t *)l;
80028@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80029 return atomic_add_negative(i, v);
80030 }
80031
80032-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80033+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80034 {
80035 atomic_t *v = (atomic_t *)l;
80036
80037 return (long)atomic_add_return(i, v);
80038 }
80039
80040+#ifdef CONFIG_PAX_REFCOUNT
80041+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80042+{
80043+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80044+
80045+ return (long)atomic_add_return_unchecked(i, v);
80046+}
80047+
80048+#endif
80049+
80050 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80051 {
80052 atomic_t *v = (atomic_t *)l;
80053@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80054 return (long)atomic_inc_return(v);
80055 }
80056
80057+#ifdef CONFIG_PAX_REFCOUNT
80058+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80059+{
80060+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80061+
80062+ return (long)atomic_inc_return_unchecked(v);
80063+}
80064+#endif
80065+
80066 static inline long atomic_long_dec_return(atomic_long_t *l)
80067 {
80068 atomic_t *v = (atomic_t *)l;
80069@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80070
80071 #endif /* BITS_PER_LONG == 64 */
80072
80073+#ifdef CONFIG_PAX_REFCOUNT
80074+static inline void pax_refcount_needs_these_functions(void)
80075+{
80076+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80077+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80078+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80079+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80080+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80081+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80082+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80083+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80084+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80085+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80086+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80087+#ifdef CONFIG_X86
80088+ atomic_clear_mask_unchecked(0, NULL);
80089+ atomic_set_mask_unchecked(0, NULL);
80090+#endif
80091+
80092+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80093+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80094+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80095+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80096+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80097+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80098+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80099+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80100+}
80101+#else
80102+#define atomic_read_unchecked(v) atomic_read(v)
80103+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80104+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80105+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80106+#define atomic_inc_unchecked(v) atomic_inc(v)
80107+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80108+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80109+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80110+#define atomic_dec_unchecked(v) atomic_dec(v)
80111+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80112+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80113+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80114+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80115+
80116+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80117+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80118+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80119+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80120+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80121+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80122+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80123+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80124+#endif
80125+
80126 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80127diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80128index 30ad9c8..c70c170 100644
80129--- a/include/asm-generic/atomic64.h
80130+++ b/include/asm-generic/atomic64.h
80131@@ -16,6 +16,8 @@ typedef struct {
80132 long long counter;
80133 } atomic64_t;
80134
80135+typedef atomic64_t atomic64_unchecked_t;
80136+
80137 #define ATOMIC64_INIT(i) { (i) }
80138
80139 extern long long atomic64_read(const atomic64_t *v);
80140@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80141 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80142 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80143
80144+#define atomic64_read_unchecked(v) atomic64_read(v)
80145+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80146+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80147+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80148+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80149+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80150+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80151+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80152+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80153+
80154 #endif /* _ASM_GENERIC_ATOMIC64_H */
80155diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80156index f5c40b0..e902f9d 100644
80157--- a/include/asm-generic/barrier.h
80158+++ b/include/asm-generic/barrier.h
80159@@ -82,7 +82,7 @@
80160 do { \
80161 compiletime_assert_atomic_type(*p); \
80162 smp_mb(); \
80163- ACCESS_ONCE(*p) = (v); \
80164+ ACCESS_ONCE_RW(*p) = (v); \
80165 } while (0)
80166
80167 #define smp_load_acquire(p) \
80168diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80169index a60a7cc..0fe12f2 100644
80170--- a/include/asm-generic/bitops/__fls.h
80171+++ b/include/asm-generic/bitops/__fls.h
80172@@ -9,7 +9,7 @@
80173 *
80174 * Undefined if no set bit exists, so code should check against 0 first.
80175 */
80176-static __always_inline unsigned long __fls(unsigned long word)
80177+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80178 {
80179 int num = BITS_PER_LONG - 1;
80180
80181diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80182index 0576d1f..dad6c71 100644
80183--- a/include/asm-generic/bitops/fls.h
80184+++ b/include/asm-generic/bitops/fls.h
80185@@ -9,7 +9,7 @@
80186 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80187 */
80188
80189-static __always_inline int fls(int x)
80190+static __always_inline int __intentional_overflow(-1) fls(int x)
80191 {
80192 int r = 32;
80193
80194diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80195index b097cf8..3d40e14 100644
80196--- a/include/asm-generic/bitops/fls64.h
80197+++ b/include/asm-generic/bitops/fls64.h
80198@@ -15,7 +15,7 @@
80199 * at position 64.
80200 */
80201 #if BITS_PER_LONG == 32
80202-static __always_inline int fls64(__u64 x)
80203+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80204 {
80205 __u32 h = x >> 32;
80206 if (h)
80207@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80208 return fls(x);
80209 }
80210 #elif BITS_PER_LONG == 64
80211-static __always_inline int fls64(__u64 x)
80212+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80213 {
80214 if (x == 0)
80215 return 0;
80216diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80217index 1bfcfe5..e04c5c9 100644
80218--- a/include/asm-generic/cache.h
80219+++ b/include/asm-generic/cache.h
80220@@ -6,7 +6,7 @@
80221 * cache lines need to provide their own cache.h.
80222 */
80223
80224-#define L1_CACHE_SHIFT 5
80225-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80226+#define L1_CACHE_SHIFT 5UL
80227+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80228
80229 #endif /* __ASM_GENERIC_CACHE_H */
80230diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80231index 0d68a1e..b74a761 100644
80232--- a/include/asm-generic/emergency-restart.h
80233+++ b/include/asm-generic/emergency-restart.h
80234@@ -1,7 +1,7 @@
80235 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80236 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80237
80238-static inline void machine_emergency_restart(void)
80239+static inline __noreturn void machine_emergency_restart(void)
80240 {
80241 machine_restart(NULL);
80242 }
80243diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80244index 90f99c7..00ce236 100644
80245--- a/include/asm-generic/kmap_types.h
80246+++ b/include/asm-generic/kmap_types.h
80247@@ -2,9 +2,9 @@
80248 #define _ASM_GENERIC_KMAP_TYPES_H
80249
80250 #ifdef __WITH_KM_FENCE
80251-# define KM_TYPE_NR 41
80252+# define KM_TYPE_NR 42
80253 #else
80254-# define KM_TYPE_NR 20
80255+# define KM_TYPE_NR 21
80256 #endif
80257
80258 #endif
80259diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80260index 9ceb03b..62b0b8f 100644
80261--- a/include/asm-generic/local.h
80262+++ b/include/asm-generic/local.h
80263@@ -23,24 +23,37 @@ typedef struct
80264 atomic_long_t a;
80265 } local_t;
80266
80267+typedef struct {
80268+ atomic_long_unchecked_t a;
80269+} local_unchecked_t;
80270+
80271 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80272
80273 #define local_read(l) atomic_long_read(&(l)->a)
80274+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80275 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80276+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80277 #define local_inc(l) atomic_long_inc(&(l)->a)
80278+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80279 #define local_dec(l) atomic_long_dec(&(l)->a)
80280+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80281 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80282+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80283 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80284+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80285
80286 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80287 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80288 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80289 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80290 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80291+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80292 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80293 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80294+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80295
80296 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80297+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80298 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80299 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80300 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80301diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80302index 725612b..9cc513a 100644
80303--- a/include/asm-generic/pgtable-nopmd.h
80304+++ b/include/asm-generic/pgtable-nopmd.h
80305@@ -1,14 +1,19 @@
80306 #ifndef _PGTABLE_NOPMD_H
80307 #define _PGTABLE_NOPMD_H
80308
80309-#ifndef __ASSEMBLY__
80310-
80311 #include <asm-generic/pgtable-nopud.h>
80312
80313-struct mm_struct;
80314-
80315 #define __PAGETABLE_PMD_FOLDED
80316
80317+#define PMD_SHIFT PUD_SHIFT
80318+#define PTRS_PER_PMD 1
80319+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80320+#define PMD_MASK (~(PMD_SIZE-1))
80321+
80322+#ifndef __ASSEMBLY__
80323+
80324+struct mm_struct;
80325+
80326 /*
80327 * Having the pmd type consist of a pud gets the size right, and allows
80328 * us to conceptually access the pud entry that this pmd is folded into
80329@@ -16,11 +21,6 @@ struct mm_struct;
80330 */
80331 typedef struct { pud_t pud; } pmd_t;
80332
80333-#define PMD_SHIFT PUD_SHIFT
80334-#define PTRS_PER_PMD 1
80335-#define PMD_SIZE (1UL << PMD_SHIFT)
80336-#define PMD_MASK (~(PMD_SIZE-1))
80337-
80338 /*
80339 * The "pud_xxx()" functions here are trivial for a folded two-level
80340 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80341diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80342index 810431d..0ec4804f 100644
80343--- a/include/asm-generic/pgtable-nopud.h
80344+++ b/include/asm-generic/pgtable-nopud.h
80345@@ -1,10 +1,15 @@
80346 #ifndef _PGTABLE_NOPUD_H
80347 #define _PGTABLE_NOPUD_H
80348
80349-#ifndef __ASSEMBLY__
80350-
80351 #define __PAGETABLE_PUD_FOLDED
80352
80353+#define PUD_SHIFT PGDIR_SHIFT
80354+#define PTRS_PER_PUD 1
80355+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80356+#define PUD_MASK (~(PUD_SIZE-1))
80357+
80358+#ifndef __ASSEMBLY__
80359+
80360 /*
80361 * Having the pud type consist of a pgd gets the size right, and allows
80362 * us to conceptually access the pgd entry that this pud is folded into
80363@@ -12,11 +17,6 @@
80364 */
80365 typedef struct { pgd_t pgd; } pud_t;
80366
80367-#define PUD_SHIFT PGDIR_SHIFT
80368-#define PTRS_PER_PUD 1
80369-#define PUD_SIZE (1UL << PUD_SHIFT)
80370-#define PUD_MASK (~(PUD_SIZE-1))
80371-
80372 /*
80373 * The "pgd_xxx()" functions here are trivial for a folded two-level
80374 * setup: the pud is never bad, and a pud always exists (as it's folded
80375@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80376 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80377
80378 #define pgd_populate(mm, pgd, pud) do { } while (0)
80379+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80380 /*
80381 * (puds are folded into pgds so this doesn't get actually called,
80382 * but the define is needed for a generic inline function.)
80383diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80384index 4d46085..f4e92ef 100644
80385--- a/include/asm-generic/pgtable.h
80386+++ b/include/asm-generic/pgtable.h
80387@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
80388 }
80389 #endif /* CONFIG_NUMA_BALANCING */
80390
80391+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80392+#ifdef CONFIG_PAX_KERNEXEC
80393+#error KERNEXEC requires pax_open_kernel
80394+#else
80395+static inline unsigned long pax_open_kernel(void) { return 0; }
80396+#endif
80397+#endif
80398+
80399+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80400+#ifdef CONFIG_PAX_KERNEXEC
80401+#error KERNEXEC requires pax_close_kernel
80402+#else
80403+static inline unsigned long pax_close_kernel(void) { return 0; }
80404+#endif
80405+#endif
80406+
80407 #endif /* CONFIG_MMU */
80408
80409 #endif /* !__ASSEMBLY__ */
80410diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80411index 72d8803..cb9749c 100644
80412--- a/include/asm-generic/uaccess.h
80413+++ b/include/asm-generic/uaccess.h
80414@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80415 return __clear_user(to, n);
80416 }
80417
80418+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80419+#ifdef CONFIG_PAX_MEMORY_UDEREF
80420+#error UDEREF requires pax_open_userland
80421+#else
80422+static inline unsigned long pax_open_userland(void) { return 0; }
80423+#endif
80424+#endif
80425+
80426+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80427+#ifdef CONFIG_PAX_MEMORY_UDEREF
80428+#error UDEREF requires pax_close_userland
80429+#else
80430+static inline unsigned long pax_close_userland(void) { return 0; }
80431+#endif
80432+#endif
80433+
80434 #endif /* __ASM_GENERIC_UACCESS_H */
80435diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80436index ac78910..775a306 100644
80437--- a/include/asm-generic/vmlinux.lds.h
80438+++ b/include/asm-generic/vmlinux.lds.h
80439@@ -234,6 +234,7 @@
80440 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80441 VMLINUX_SYMBOL(__start_rodata) = .; \
80442 *(.rodata) *(.rodata.*) \
80443+ *(.data..read_only) \
80444 *(__vermagic) /* Kernel version magic */ \
80445 . = ALIGN(8); \
80446 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80447@@ -727,17 +728,18 @@
80448 * section in the linker script will go there too. @phdr should have
80449 * a leading colon.
80450 *
80451- * Note that this macros defines __per_cpu_load as an absolute symbol.
80452+ * Note that this macros defines per_cpu_load as an absolute symbol.
80453 * If there is no need to put the percpu section at a predetermined
80454 * address, use PERCPU_SECTION.
80455 */
80456 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80457- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80458- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80459+ per_cpu_load = .; \
80460+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80461 - LOAD_OFFSET) { \
80462+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80463 PERCPU_INPUT(cacheline) \
80464 } phdr \
80465- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80466+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80467
80468 /**
80469 * PERCPU_SECTION - define output section for percpu area, simple version
80470diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80471index 623a59c..1e79ab9 100644
80472--- a/include/crypto/algapi.h
80473+++ b/include/crypto/algapi.h
80474@@ -34,7 +34,7 @@ struct crypto_type {
80475 unsigned int maskclear;
80476 unsigned int maskset;
80477 unsigned int tfmsize;
80478-};
80479+} __do_const;
80480
80481 struct crypto_instance {
80482 struct crypto_alg alg;
80483diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80484index e928625..78c2c05 100644
80485--- a/include/drm/drmP.h
80486+++ b/include/drm/drmP.h
80487@@ -59,6 +59,7 @@
80488
80489 #include <asm/mman.h>
80490 #include <asm/pgalloc.h>
80491+#include <asm/local.h>
80492 #include <asm/uaccess.h>
80493
80494 #include <uapi/drm/drm.h>
80495@@ -224,10 +225,12 @@ void drm_err(const char *format, ...);
80496 * \param cmd command.
80497 * \param arg argument.
80498 */
80499-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80500+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80501+ struct drm_file *file_priv);
80502+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80503 struct drm_file *file_priv);
80504
80505-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80506+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80507 unsigned long arg);
80508
80509 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80510@@ -243,10 +246,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80511 struct drm_ioctl_desc {
80512 unsigned int cmd;
80513 int flags;
80514- drm_ioctl_t *func;
80515+ drm_ioctl_t func;
80516 unsigned int cmd_drv;
80517 const char *name;
80518-};
80519+} __do_const;
80520
80521 /**
80522 * Creates a driver or general drm_ioctl_desc array entry for the given
80523@@ -632,7 +635,8 @@ struct drm_info_list {
80524 int (*show)(struct seq_file*, void*); /** show callback */
80525 u32 driver_features; /**< Required driver features for this entry */
80526 void *data;
80527-};
80528+} __do_const;
80529+typedef struct drm_info_list __no_const drm_info_list_no_const;
80530
80531 /**
80532 * debugfs node structure. This structure represents a debugfs file.
80533@@ -716,7 +720,7 @@ struct drm_device {
80534
80535 /** \name Usage Counters */
80536 /*@{ */
80537- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80538+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80539 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80540 int buf_use; /**< Buffers in use -- cannot alloc */
80541 atomic_t buf_alloc; /**< Buffer allocation in progress */
80542diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80543index c250a22..59d2094 100644
80544--- a/include/drm/drm_crtc_helper.h
80545+++ b/include/drm/drm_crtc_helper.h
80546@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
80547 int (*atomic_check)(struct drm_encoder *encoder,
80548 struct drm_crtc_state *crtc_state,
80549 struct drm_connector_state *conn_state);
80550-};
80551+} __no_const;
80552
80553 /**
80554 * struct drm_connector_helper_funcs - helper operations for connectors
80555diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80556index d016dc5..3951fe0 100644
80557--- a/include/drm/i915_pciids.h
80558+++ b/include/drm/i915_pciids.h
80559@@ -37,7 +37,7 @@
80560 */
80561 #define INTEL_VGA_DEVICE(id, info) { \
80562 0x8086, id, \
80563- ~0, ~0, \
80564+ PCI_ANY_ID, PCI_ANY_ID, \
80565 0x030000, 0xff0000, \
80566 (unsigned long) info }
80567
80568diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80569index 72dcbe8..8db58d7 100644
80570--- a/include/drm/ttm/ttm_memory.h
80571+++ b/include/drm/ttm/ttm_memory.h
80572@@ -48,7 +48,7 @@
80573
80574 struct ttm_mem_shrink {
80575 int (*do_shrink) (struct ttm_mem_shrink *);
80576-};
80577+} __no_const;
80578
80579 /**
80580 * struct ttm_mem_global - Global memory accounting structure.
80581diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80582index 49a8284..9643967 100644
80583--- a/include/drm/ttm/ttm_page_alloc.h
80584+++ b/include/drm/ttm/ttm_page_alloc.h
80585@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80586 */
80587 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80588
80589+struct device;
80590 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80591 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80592
80593diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80594index 4b840e8..155d235 100644
80595--- a/include/keys/asymmetric-subtype.h
80596+++ b/include/keys/asymmetric-subtype.h
80597@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80598 /* Verify the signature on a key of this subtype (optional) */
80599 int (*verify_signature)(const struct key *key,
80600 const struct public_key_signature *sig);
80601-};
80602+} __do_const;
80603
80604 /**
80605 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80606diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80607index c1da539..1dcec55 100644
80608--- a/include/linux/atmdev.h
80609+++ b/include/linux/atmdev.h
80610@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80611 #endif
80612
80613 struct k_atm_aal_stats {
80614-#define __HANDLE_ITEM(i) atomic_t i
80615+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80616 __AAL_STAT_ITEMS
80617 #undef __HANDLE_ITEM
80618 };
80619@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80620 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80621 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80622 struct module *owner;
80623-};
80624+} __do_const ;
80625
80626 struct atmphy_ops {
80627 int (*start)(struct atm_dev *dev);
80628diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80629index 5b08a85..60922fb 100644
80630--- a/include/linux/atomic.h
80631+++ b/include/linux/atomic.h
80632@@ -12,7 +12,7 @@
80633 * Atomically adds @a to @v, so long as @v was not already @u.
80634 * Returns non-zero if @v was not @u, and zero otherwise.
80635 */
80636-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80637+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80638 {
80639 return __atomic_add_unless(v, a, u) != u;
80640 }
80641diff --git a/include/linux/audit.h b/include/linux/audit.h
80642index c2e7e3a..8bfc0e1 100644
80643--- a/include/linux/audit.h
80644+++ b/include/linux/audit.h
80645@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
80646 extern unsigned int audit_serial(void);
80647 extern int auditsc_get_stamp(struct audit_context *ctx,
80648 struct timespec *t, unsigned int *serial);
80649-extern int audit_set_loginuid(kuid_t loginuid);
80650+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80651
80652 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80653 {
80654diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80655index 576e463..28fd926 100644
80656--- a/include/linux/binfmts.h
80657+++ b/include/linux/binfmts.h
80658@@ -44,7 +44,7 @@ struct linux_binprm {
80659 unsigned interp_flags;
80660 unsigned interp_data;
80661 unsigned long loader, exec;
80662-};
80663+} __randomize_layout;
80664
80665 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80666 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80667@@ -77,8 +77,10 @@ struct linux_binfmt {
80668 int (*load_binary)(struct linux_binprm *);
80669 int (*load_shlib)(struct file *);
80670 int (*core_dump)(struct coredump_params *cprm);
80671+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80672+ void (*handle_mmap)(struct file *);
80673 unsigned long min_coredump; /* minimal dump size */
80674-};
80675+} __do_const __randomize_layout;
80676
80677 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80678
80679diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80680index dbfbf49..10be372 100644
80681--- a/include/linux/bitmap.h
80682+++ b/include/linux/bitmap.h
80683@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80684 return __bitmap_full(src, nbits);
80685 }
80686
80687-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80688+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80689 {
80690 if (small_const_nbits(nbits))
80691 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80692diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80693index 5d858e0..336c1d9 100644
80694--- a/include/linux/bitops.h
80695+++ b/include/linux/bitops.h
80696@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80697 * @word: value to rotate
80698 * @shift: bits to roll
80699 */
80700-static inline __u32 rol32(__u32 word, unsigned int shift)
80701+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80702 {
80703 return (word << shift) | (word >> (32 - shift));
80704 }
80705@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80706 * @word: value to rotate
80707 * @shift: bits to roll
80708 */
80709-static inline __u32 ror32(__u32 word, unsigned int shift)
80710+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80711 {
80712 return (word >> shift) | (word << (32 - shift));
80713 }
80714@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80715 return (__s32)(value << shift) >> shift;
80716 }
80717
80718-static inline unsigned fls_long(unsigned long l)
80719+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80720 {
80721 if (sizeof(l) == 4)
80722 return fls(l);
80723diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80724index 7f9a516..8889453 100644
80725--- a/include/linux/blkdev.h
80726+++ b/include/linux/blkdev.h
80727@@ -1616,7 +1616,7 @@ struct block_device_operations {
80728 /* this callback is with swap_lock and sometimes page table lock held */
80729 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80730 struct module *owner;
80731-};
80732+} __do_const;
80733
80734 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80735 unsigned long);
80736diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80737index afc1343..9735539 100644
80738--- a/include/linux/blktrace_api.h
80739+++ b/include/linux/blktrace_api.h
80740@@ -25,7 +25,7 @@ struct blk_trace {
80741 struct dentry *dropped_file;
80742 struct dentry *msg_file;
80743 struct list_head running_list;
80744- atomic_t dropped;
80745+ atomic_unchecked_t dropped;
80746 };
80747
80748 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80749diff --git a/include/linux/cache.h b/include/linux/cache.h
80750index 17e7e82..1d7da26 100644
80751--- a/include/linux/cache.h
80752+++ b/include/linux/cache.h
80753@@ -16,6 +16,14 @@
80754 #define __read_mostly
80755 #endif
80756
80757+#ifndef __read_only
80758+#ifdef CONFIG_PAX_KERNEXEC
80759+#error KERNEXEC requires __read_only
80760+#else
80761+#define __read_only __read_mostly
80762+#endif
80763+#endif
80764+
80765 #ifndef ____cacheline_aligned
80766 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80767 #endif
80768diff --git a/include/linux/capability.h b/include/linux/capability.h
80769index aa93e5e..985a1b0 100644
80770--- a/include/linux/capability.h
80771+++ b/include/linux/capability.h
80772@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80773 extern bool capable(int cap);
80774 extern bool ns_capable(struct user_namespace *ns, int cap);
80775 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80776+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80777 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80778+extern bool capable_nolog(int cap);
80779+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80780
80781 /* audit system wants to get cap info from files as well */
80782 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80783
80784+extern int is_privileged_binary(const struct dentry *dentry);
80785+
80786 #endif /* !_LINUX_CAPABILITY_H */
80787diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80788index 8609d57..86e4d79 100644
80789--- a/include/linux/cdrom.h
80790+++ b/include/linux/cdrom.h
80791@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80792
80793 /* driver specifications */
80794 const int capability; /* capability flags */
80795- int n_minors; /* number of active minor devices */
80796 /* handle uniform packets for scsi type devices (scsi,atapi) */
80797 int (*generic_packet) (struct cdrom_device_info *,
80798 struct packet_command *);
80799diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80800index 4ce9056..86caac6 100644
80801--- a/include/linux/cleancache.h
80802+++ b/include/linux/cleancache.h
80803@@ -31,7 +31,7 @@ struct cleancache_ops {
80804 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80805 void (*invalidate_inode)(int, struct cleancache_filekey);
80806 void (*invalidate_fs)(int);
80807-};
80808+} __no_const;
80809
80810 extern struct cleancache_ops *
80811 cleancache_register_ops(struct cleancache_ops *ops);
80812diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80813index 5591ea7..61b77ce 100644
80814--- a/include/linux/clk-provider.h
80815+++ b/include/linux/clk-provider.h
80816@@ -195,6 +195,7 @@ struct clk_ops {
80817 void (*init)(struct clk_hw *hw);
80818 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80819 };
80820+typedef struct clk_ops __no_const clk_ops_no_const;
80821
80822 /**
80823 * struct clk_init_data - holds init data that's common to all clocks and is
80824diff --git a/include/linux/compat.h b/include/linux/compat.h
80825index ab25814..9026bca 100644
80826--- a/include/linux/compat.h
80827+++ b/include/linux/compat.h
80828@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80829 compat_size_t __user *len_ptr);
80830
80831 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80832-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80833+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80834 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80835 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80836 compat_ssize_t msgsz, int msgflg);
80837@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80838 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80839 compat_ulong_t addr, compat_ulong_t data);
80840 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80841- compat_long_t addr, compat_long_t data);
80842+ compat_ulong_t addr, compat_ulong_t data);
80843
80844 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80845 /*
80846diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80847index 769e198..f670585 100644
80848--- a/include/linux/compiler-gcc4.h
80849+++ b/include/linux/compiler-gcc4.h
80850@@ -39,9 +39,34 @@
80851 # define __compiletime_warning(message) __attribute__((warning(message)))
80852 # define __compiletime_error(message) __attribute__((error(message)))
80853 #endif /* __CHECKER__ */
80854+
80855+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80856+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80857+#define __bos0(ptr) __bos((ptr), 0)
80858+#define __bos1(ptr) __bos((ptr), 1)
80859 #endif /* GCC_VERSION >= 40300 */
80860
80861 #if GCC_VERSION >= 40500
80862+
80863+#ifdef RANDSTRUCT_PLUGIN
80864+#define __randomize_layout __attribute__((randomize_layout))
80865+#define __no_randomize_layout __attribute__((no_randomize_layout))
80866+#endif
80867+
80868+#ifdef CONSTIFY_PLUGIN
80869+#define __no_const __attribute__((no_const))
80870+#define __do_const __attribute__((do_const))
80871+#endif
80872+
80873+#ifdef SIZE_OVERFLOW_PLUGIN
80874+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80875+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80876+#endif
80877+
80878+#ifdef LATENT_ENTROPY_PLUGIN
80879+#define __latent_entropy __attribute__((latent_entropy))
80880+#endif
80881+
80882 /*
80883 * Mark a position in code as unreachable. This can be used to
80884 * suppress control flow warnings after asm blocks that transfer
80885diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80886index efee493..c388661 100644
80887--- a/include/linux/compiler-gcc5.h
80888+++ b/include/linux/compiler-gcc5.h
80889@@ -28,6 +28,25 @@
80890 # define __compiletime_error(message) __attribute__((error(message)))
80891 #endif /* __CHECKER__ */
80892
80893+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80894+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80895+#define __bos0(ptr) __bos((ptr), 0)
80896+#define __bos1(ptr) __bos((ptr), 1)
80897+
80898+#ifdef CONSTIFY_PLUGIN
80899+#define __no_const __attribute__((no_const))
80900+#define __do_const __attribute__((do_const))
80901+#endif
80902+
80903+#ifdef SIZE_OVERFLOW_PLUGIN
80904+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80905+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80906+#endif
80907+
80908+#ifdef LATENT_ENTROPY_PLUGIN
80909+#define __latent_entropy __attribute__((latent_entropy))
80910+#endif
80911+
80912 /*
80913 * Mark a position in code as unreachable. This can be used to
80914 * suppress control flow warnings after asm blocks that transfer
80915diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80916index 1b45e4a..33028cd 100644
80917--- a/include/linux/compiler.h
80918+++ b/include/linux/compiler.h
80919@@ -5,11 +5,14 @@
80920
80921 #ifdef __CHECKER__
80922 # define __user __attribute__((noderef, address_space(1)))
80923+# define __force_user __force __user
80924 # define __kernel __attribute__((address_space(0)))
80925+# define __force_kernel __force __kernel
80926 # define __safe __attribute__((safe))
80927 # define __force __attribute__((force))
80928 # define __nocast __attribute__((nocast))
80929 # define __iomem __attribute__((noderef, address_space(2)))
80930+# define __force_iomem __force __iomem
80931 # define __must_hold(x) __attribute__((context(x,1,1)))
80932 # define __acquires(x) __attribute__((context(x,0,1)))
80933 # define __releases(x) __attribute__((context(x,1,0)))
80934@@ -17,20 +20,37 @@
80935 # define __release(x) __context__(x,-1)
80936 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80937 # define __percpu __attribute__((noderef, address_space(3)))
80938+# define __force_percpu __force __percpu
80939 #ifdef CONFIG_SPARSE_RCU_POINTER
80940 # define __rcu __attribute__((noderef, address_space(4)))
80941+# define __force_rcu __force __rcu
80942 #else
80943 # define __rcu
80944+# define __force_rcu
80945 #endif
80946 extern void __chk_user_ptr(const volatile void __user *);
80947 extern void __chk_io_ptr(const volatile void __iomem *);
80948 #else
80949-# define __user
80950-# define __kernel
80951+# ifdef CHECKER_PLUGIN
80952+//# define __user
80953+//# define __force_user
80954+//# define __kernel
80955+//# define __force_kernel
80956+# else
80957+# ifdef STRUCTLEAK_PLUGIN
80958+# define __user __attribute__((user))
80959+# else
80960+# define __user
80961+# endif
80962+# define __force_user
80963+# define __kernel
80964+# define __force_kernel
80965+# endif
80966 # define __safe
80967 # define __force
80968 # define __nocast
80969 # define __iomem
80970+# define __force_iomem
80971 # define __chk_user_ptr(x) (void)0
80972 # define __chk_io_ptr(x) (void)0
80973 # define __builtin_warning(x, y...) (1)
80974@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80975 # define __release(x) (void)0
80976 # define __cond_lock(x,c) (c)
80977 # define __percpu
80978+# define __force_percpu
80979 # define __rcu
80980+# define __force_rcu
80981 #endif
80982
80983 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80984@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80985 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80986 {
80987 switch (size) {
80988- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80989- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80990- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80991+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80992+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80993+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80994 #ifdef CONFIG_64BIT
80995- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80996+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80997 #endif
80998 default:
80999 barrier();
81000- __builtin_memcpy((void *)res, (const void *)p, size);
81001+ __builtin_memcpy(res, (const void *)p, size);
81002 data_access_exceeds_word_size();
81003 barrier();
81004 }
81005 }
81006
81007-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
81008+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
81009 {
81010 switch (size) {
81011- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
81012- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
81013- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
81014+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
81015+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
81016+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
81017 #ifdef CONFIG_64BIT
81018- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
81019+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
81020 #endif
81021 default:
81022 barrier();
81023- __builtin_memcpy((void *)p, (const void *)res, size);
81024+ __builtin_memcpy((void *)p, res, size);
81025 data_access_exceeds_word_size();
81026 barrier();
81027 }
81028@@ -364,6 +386,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81029 # define __attribute_const__ /* unimplemented */
81030 #endif
81031
81032+#ifndef __randomize_layout
81033+# define __randomize_layout
81034+#endif
81035+
81036+#ifndef __no_randomize_layout
81037+# define __no_randomize_layout
81038+#endif
81039+
81040+#ifndef __no_const
81041+# define __no_const
81042+#endif
81043+
81044+#ifndef __do_const
81045+# define __do_const
81046+#endif
81047+
81048+#ifndef __size_overflow
81049+# define __size_overflow(...)
81050+#endif
81051+
81052+#ifndef __intentional_overflow
81053+# define __intentional_overflow(...)
81054+#endif
81055+
81056+#ifndef __latent_entropy
81057+# define __latent_entropy
81058+#endif
81059+
81060 /*
81061 * Tell gcc if a function is cold. The compiler will assume any path
81062 * directly leading to the call is unlikely.
81063@@ -373,6 +423,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81064 #define __cold
81065 #endif
81066
81067+#ifndef __alloc_size
81068+#define __alloc_size(...)
81069+#endif
81070+
81071+#ifndef __bos
81072+#define __bos(ptr, arg)
81073+#endif
81074+
81075+#ifndef __bos0
81076+#define __bos0(ptr)
81077+#endif
81078+
81079+#ifndef __bos1
81080+#define __bos1(ptr)
81081+#endif
81082+
81083 /* Simple shorthand for a section definition */
81084 #ifndef __section
81085 # define __section(S) __attribute__ ((__section__(#S)))
81086@@ -387,6 +453,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81087 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
81088 #endif
81089
81090+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
81091+
81092 /* Is this type a native word size -- useful for atomic operations */
81093 #ifndef __native_word
81094 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
81095@@ -466,8 +534,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
81096 */
81097 #define __ACCESS_ONCE(x) ({ \
81098 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
81099- (volatile typeof(x) *)&(x); })
81100+ (volatile const typeof(x) *)&(x); })
81101 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
81102+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81103
81104 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81105 #ifdef CONFIG_KPROBES
81106diff --git a/include/linux/completion.h b/include/linux/completion.h
81107index 5d5aaae..0ea9b84 100644
81108--- a/include/linux/completion.h
81109+++ b/include/linux/completion.h
81110@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81111
81112 extern void wait_for_completion(struct completion *);
81113 extern void wait_for_completion_io(struct completion *);
81114-extern int wait_for_completion_interruptible(struct completion *x);
81115-extern int wait_for_completion_killable(struct completion *x);
81116+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81117+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81118 extern unsigned long wait_for_completion_timeout(struct completion *x,
81119- unsigned long timeout);
81120+ unsigned long timeout) __intentional_overflow(-1);
81121 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81122- unsigned long timeout);
81123+ unsigned long timeout) __intentional_overflow(-1);
81124 extern long wait_for_completion_interruptible_timeout(
81125- struct completion *x, unsigned long timeout);
81126+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81127 extern long wait_for_completion_killable_timeout(
81128- struct completion *x, unsigned long timeout);
81129+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81130 extern bool try_wait_for_completion(struct completion *x);
81131 extern bool completion_done(struct completion *x);
81132
81133diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81134index 34025df..d94bbbc 100644
81135--- a/include/linux/configfs.h
81136+++ b/include/linux/configfs.h
81137@@ -125,7 +125,7 @@ struct configfs_attribute {
81138 const char *ca_name;
81139 struct module *ca_owner;
81140 umode_t ca_mode;
81141-};
81142+} __do_const;
81143
81144 /*
81145 * Users often need to create attribute structures for their configurable
81146diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81147index 2ee4888..0451f5e 100644
81148--- a/include/linux/cpufreq.h
81149+++ b/include/linux/cpufreq.h
81150@@ -207,6 +207,7 @@ struct global_attr {
81151 ssize_t (*store)(struct kobject *a, struct attribute *b,
81152 const char *c, size_t count);
81153 };
81154+typedef struct global_attr __no_const global_attr_no_const;
81155
81156 #define define_one_global_ro(_name) \
81157 static struct global_attr _name = \
81158@@ -278,7 +279,7 @@ struct cpufreq_driver {
81159 bool boost_supported;
81160 bool boost_enabled;
81161 int (*set_boost)(int state);
81162-};
81163+} __do_const;
81164
81165 /* flags */
81166 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81167diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81168index 9c5e892..feb34e0 100644
81169--- a/include/linux/cpuidle.h
81170+++ b/include/linux/cpuidle.h
81171@@ -59,7 +59,8 @@ struct cpuidle_state {
81172 void (*enter_freeze) (struct cpuidle_device *dev,
81173 struct cpuidle_driver *drv,
81174 int index);
81175-};
81176+} __do_const;
81177+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81178
81179 /* Idle State Flags */
81180 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
81181@@ -227,7 +228,7 @@ struct cpuidle_governor {
81182 void (*reflect) (struct cpuidle_device *dev, int index);
81183
81184 struct module *owner;
81185-};
81186+} __do_const;
81187
81188 #ifdef CONFIG_CPU_IDLE
81189 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81190diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81191index 086549a..a572d94 100644
81192--- a/include/linux/cpumask.h
81193+++ b/include/linux/cpumask.h
81194@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81195 }
81196
81197 /* Valid inputs for n are -1 and 0. */
81198-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81199+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81200 {
81201 return n+1;
81202 }
81203
81204-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81205+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81206 {
81207 return n+1;
81208 }
81209
81210-static inline unsigned int cpumask_next_and(int n,
81211+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81212 const struct cpumask *srcp,
81213 const struct cpumask *andp)
81214 {
81215@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81216 *
81217 * Returns >= nr_cpu_ids if no further cpus set.
81218 */
81219-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81220+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81221 {
81222 /* -1 is a legal arg here. */
81223 if (n != -1)
81224@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81225 *
81226 * Returns >= nr_cpu_ids if no further cpus unset.
81227 */
81228-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81229+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81230 {
81231 /* -1 is a legal arg here. */
81232 if (n != -1)
81233@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81234 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81235 }
81236
81237-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81238+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81239 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81240 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81241
81242@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
81243 * cpumask_weight - Count of bits in *srcp
81244 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
81245 */
81246-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
81247+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
81248 {
81249 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
81250 }
81251diff --git a/include/linux/cred.h b/include/linux/cred.h
81252index 2fb2ca2..d6a3340 100644
81253--- a/include/linux/cred.h
81254+++ b/include/linux/cred.h
81255@@ -35,7 +35,7 @@ struct group_info {
81256 int nblocks;
81257 kgid_t small_block[NGROUPS_SMALL];
81258 kgid_t *blocks[0];
81259-};
81260+} __randomize_layout;
81261
81262 /**
81263 * get_group_info - Get a reference to a group info structure
81264@@ -137,7 +137,7 @@ struct cred {
81265 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81266 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81267 struct rcu_head rcu; /* RCU deletion hook */
81268-};
81269+} __randomize_layout;
81270
81271 extern void __put_cred(struct cred *);
81272 extern void exit_creds(struct task_struct *);
81273@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81274 static inline void validate_process_creds(void)
81275 {
81276 }
81277+static inline void validate_task_creds(struct task_struct *task)
81278+{
81279+}
81280 #endif
81281
81282 /**
81283@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
81284
81285 #define task_uid(task) (task_cred_xxx((task), uid))
81286 #define task_euid(task) (task_cred_xxx((task), euid))
81287+#define task_securebits(task) (task_cred_xxx((task), securebits))
81288
81289 #define current_cred_xxx(xxx) \
81290 ({ \
81291diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81292index fb5ef16..05d1e59 100644
81293--- a/include/linux/crypto.h
81294+++ b/include/linux/crypto.h
81295@@ -626,7 +626,7 @@ struct cipher_tfm {
81296 const u8 *key, unsigned int keylen);
81297 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81298 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81299-};
81300+} __no_const;
81301
81302 struct hash_tfm {
81303 int (*init)(struct hash_desc *desc);
81304@@ -647,13 +647,13 @@ struct compress_tfm {
81305 int (*cot_decompress)(struct crypto_tfm *tfm,
81306 const u8 *src, unsigned int slen,
81307 u8 *dst, unsigned int *dlen);
81308-};
81309+} __no_const;
81310
81311 struct rng_tfm {
81312 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81313 unsigned int dlen);
81314 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81315-};
81316+} __no_const;
81317
81318 #define crt_ablkcipher crt_u.ablkcipher
81319 #define crt_aead crt_u.aead
81320diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81321index 653589e..4ef254a 100644
81322--- a/include/linux/ctype.h
81323+++ b/include/linux/ctype.h
81324@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81325 * Fast implementation of tolower() for internal usage. Do not use in your
81326 * code.
81327 */
81328-static inline char _tolower(const char c)
81329+static inline unsigned char _tolower(const unsigned char c)
81330 {
81331 return c | 0x20;
81332 }
81333diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81334index d835879..c8e5b92 100644
81335--- a/include/linux/dcache.h
81336+++ b/include/linux/dcache.h
81337@@ -123,6 +123,9 @@ struct dentry {
81338 unsigned long d_time; /* used by d_revalidate */
81339 void *d_fsdata; /* fs-specific data */
81340
81341+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
81342+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
81343+#endif
81344 struct list_head d_lru; /* LRU list */
81345 struct list_head d_child; /* child of parent list */
81346 struct list_head d_subdirs; /* our children */
81347@@ -133,7 +136,7 @@ struct dentry {
81348 struct hlist_node d_alias; /* inode alias list */
81349 struct rcu_head d_rcu;
81350 } d_u;
81351-};
81352+} __randomize_layout;
81353
81354 /*
81355 * dentry->d_lock spinlock nesting subclasses:
81356@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
81357
81358 static inline unsigned d_count(const struct dentry *dentry)
81359 {
81360- return dentry->d_lockref.count;
81361+ return __lockref_read(&dentry->d_lockref);
81362 }
81363
81364 /*
81365@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
81366 static inline struct dentry *dget_dlock(struct dentry *dentry)
81367 {
81368 if (dentry)
81369- dentry->d_lockref.count++;
81370+ __lockref_inc(&dentry->d_lockref);
81371 return dentry;
81372 }
81373
81374diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81375index 7925bf0..d5143d2 100644
81376--- a/include/linux/decompress/mm.h
81377+++ b/include/linux/decompress/mm.h
81378@@ -77,7 +77,7 @@ static void free(void *where)
81379 * warnings when not needed (indeed large_malloc / large_free are not
81380 * needed by inflate */
81381
81382-#define malloc(a) kmalloc(a, GFP_KERNEL)
81383+#define malloc(a) kmalloc((a), GFP_KERNEL)
81384 #define free(a) kfree(a)
81385
81386 #define large_malloc(a) vmalloc(a)
81387diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81388index ce447f0..83c66bd 100644
81389--- a/include/linux/devfreq.h
81390+++ b/include/linux/devfreq.h
81391@@ -114,7 +114,7 @@ struct devfreq_governor {
81392 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81393 int (*event_handler)(struct devfreq *devfreq,
81394 unsigned int event, void *data);
81395-};
81396+} __do_const;
81397
81398 /**
81399 * struct devfreq - Device devfreq structure
81400diff --git a/include/linux/device.h b/include/linux/device.h
81401index 0eb8ee2..c603b6a 100644
81402--- a/include/linux/device.h
81403+++ b/include/linux/device.h
81404@@ -311,7 +311,7 @@ struct subsys_interface {
81405 struct list_head node;
81406 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81407 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81408-};
81409+} __do_const;
81410
81411 int subsys_interface_register(struct subsys_interface *sif);
81412 void subsys_interface_unregister(struct subsys_interface *sif);
81413@@ -507,7 +507,7 @@ struct device_type {
81414 void (*release)(struct device *dev);
81415
81416 const struct dev_pm_ops *pm;
81417-};
81418+} __do_const;
81419
81420 /* interface for exporting device attributes */
81421 struct device_attribute {
81422@@ -517,11 +517,12 @@ struct device_attribute {
81423 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81424 const char *buf, size_t count);
81425 };
81426+typedef struct device_attribute __no_const device_attribute_no_const;
81427
81428 struct dev_ext_attribute {
81429 struct device_attribute attr;
81430 void *var;
81431-};
81432+} __do_const;
81433
81434 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81435 char *buf);
81436diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81437index c3007cb..43efc8c 100644
81438--- a/include/linux/dma-mapping.h
81439+++ b/include/linux/dma-mapping.h
81440@@ -60,7 +60,7 @@ struct dma_map_ops {
81441 u64 (*get_required_mask)(struct device *dev);
81442 #endif
81443 int is_phys;
81444-};
81445+} __do_const;
81446
81447 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81448
81449diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81450index b6997a0..108be6c 100644
81451--- a/include/linux/dmaengine.h
81452+++ b/include/linux/dmaengine.h
81453@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
81454 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81455 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81456
81457-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81458+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81459 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81460-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81461+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81462 struct dma_pinned_list *pinned_list, struct page *page,
81463 unsigned int offset, size_t len);
81464
81465diff --git a/include/linux/efi.h b/include/linux/efi.h
81466index cf7e431..d239dce 100644
81467--- a/include/linux/efi.h
81468+++ b/include/linux/efi.h
81469@@ -1056,6 +1056,7 @@ struct efivar_operations {
81470 efi_set_variable_nonblocking_t *set_variable_nonblocking;
81471 efi_query_variable_store_t *query_variable_store;
81472 };
81473+typedef struct efivar_operations __no_const efivar_operations_no_const;
81474
81475 struct efivars {
81476 /*
81477diff --git a/include/linux/elf.h b/include/linux/elf.h
81478index 20fa8d8..3d0dd18 100644
81479--- a/include/linux/elf.h
81480+++ b/include/linux/elf.h
81481@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
81482 #define elf_note elf32_note
81483 #define elf_addr_t Elf32_Off
81484 #define Elf_Half Elf32_Half
81485+#define elf_dyn Elf32_Dyn
81486
81487 #else
81488
81489@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
81490 #define elf_note elf64_note
81491 #define elf_addr_t Elf64_Off
81492 #define Elf_Half Elf64_Half
81493+#define elf_dyn Elf64_Dyn
81494
81495 #endif
81496
81497diff --git a/include/linux/err.h b/include/linux/err.h
81498index a729120..6ede2c9 100644
81499--- a/include/linux/err.h
81500+++ b/include/linux/err.h
81501@@ -20,12 +20,12 @@
81502
81503 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81504
81505-static inline void * __must_check ERR_PTR(long error)
81506+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81507 {
81508 return (void *) error;
81509 }
81510
81511-static inline long __must_check PTR_ERR(__force const void *ptr)
81512+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81513 {
81514 return (long) ptr;
81515 }
81516diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81517index 36f49c4..a2a1f4c 100644
81518--- a/include/linux/extcon.h
81519+++ b/include/linux/extcon.h
81520@@ -135,7 +135,7 @@ struct extcon_dev {
81521 /* /sys/class/extcon/.../mutually_exclusive/... */
81522 struct attribute_group attr_g_muex;
81523 struct attribute **attrs_muex;
81524- struct device_attribute *d_attrs_muex;
81525+ device_attribute_no_const *d_attrs_muex;
81526 };
81527
81528 /**
81529diff --git a/include/linux/fb.h b/include/linux/fb.h
81530index 043f328..180ccbf 100644
81531--- a/include/linux/fb.h
81532+++ b/include/linux/fb.h
81533@@ -305,7 +305,8 @@ struct fb_ops {
81534 /* called at KDB enter and leave time to prepare the console */
81535 int (*fb_debug_enter)(struct fb_info *info);
81536 int (*fb_debug_leave)(struct fb_info *info);
81537-};
81538+} __do_const;
81539+typedef struct fb_ops __no_const fb_ops_no_const;
81540
81541 #ifdef CONFIG_FB_TILEBLITTING
81542 #define FB_TILE_CURSOR_NONE 0
81543diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81544index 230f87b..1fd0485 100644
81545--- a/include/linux/fdtable.h
81546+++ b/include/linux/fdtable.h
81547@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81548 void put_files_struct(struct files_struct *fs);
81549 void reset_files_struct(struct files_struct *);
81550 int unshare_files(struct files_struct **);
81551-struct files_struct *dup_fd(struct files_struct *, int *);
81552+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81553 void do_close_on_exec(struct files_struct *);
81554 int iterate_fd(struct files_struct *, unsigned,
81555 int (*)(const void *, struct file *, unsigned),
81556diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81557index 8293262..2b3b8bd 100644
81558--- a/include/linux/frontswap.h
81559+++ b/include/linux/frontswap.h
81560@@ -11,7 +11,7 @@ struct frontswap_ops {
81561 int (*load)(unsigned, pgoff_t, struct page *);
81562 void (*invalidate_page)(unsigned, pgoff_t);
81563 void (*invalidate_area)(unsigned);
81564-};
81565+} __no_const;
81566
81567 extern bool frontswap_enabled;
81568 extern struct frontswap_ops *
81569diff --git a/include/linux/fs.h b/include/linux/fs.h
81570index 52cc449..58b25c9 100644
81571--- a/include/linux/fs.h
81572+++ b/include/linux/fs.h
81573@@ -410,7 +410,7 @@ struct address_space {
81574 spinlock_t private_lock; /* for use by the address_space */
81575 struct list_head private_list; /* ditto */
81576 void *private_data; /* ditto */
81577-} __attribute__((aligned(sizeof(long))));
81578+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81579 /*
81580 * On most architectures that alignment is already the case; but
81581 * must be enforced here for CRIS, to let the least significant bit
81582@@ -453,7 +453,7 @@ struct block_device {
81583 int bd_fsfreeze_count;
81584 /* Mutex for freeze */
81585 struct mutex bd_fsfreeze_mutex;
81586-};
81587+} __randomize_layout;
81588
81589 /*
81590 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81591@@ -639,7 +639,7 @@ struct inode {
81592 #endif
81593
81594 void *i_private; /* fs or device private pointer */
81595-};
81596+} __randomize_layout;
81597
81598 static inline int inode_unhashed(struct inode *inode)
81599 {
81600@@ -834,7 +834,7 @@ struct file {
81601 struct list_head f_tfile_llink;
81602 #endif /* #ifdef CONFIG_EPOLL */
81603 struct address_space *f_mapping;
81604-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81605+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81606
81607 struct file_handle {
81608 __u32 handle_bytes;
81609@@ -962,7 +962,7 @@ struct file_lock {
81610 int state; /* state of grant or error if -ve */
81611 } afs;
81612 } fl_u;
81613-};
81614+} __randomize_layout;
81615
81616 struct file_lock_context {
81617 spinlock_t flc_lock;
81618@@ -1316,7 +1316,7 @@ struct super_block {
81619 * Indicates how deep in a filesystem stack this SB is
81620 */
81621 int s_stack_depth;
81622-};
81623+} __randomize_layout;
81624
81625 extern struct timespec current_fs_time(struct super_block *sb);
81626
81627@@ -1570,7 +1570,8 @@ struct file_operations {
81628 #ifndef CONFIG_MMU
81629 unsigned (*mmap_capabilities)(struct file *);
81630 #endif
81631-};
81632+} __do_const __randomize_layout;
81633+typedef struct file_operations __no_const file_operations_no_const;
81634
81635 struct inode_operations {
81636 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81637@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
81638 return !IS_DEADDIR(inode);
81639 }
81640
81641+static inline bool is_sidechannel_device(const struct inode *inode)
81642+{
81643+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81644+ umode_t mode = inode->i_mode;
81645+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81646+#else
81647+ return false;
81648+#endif
81649+}
81650+
81651 #endif /* _LINUX_FS_H */
81652diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81653index 0efc3e6..fd23610 100644
81654--- a/include/linux/fs_struct.h
81655+++ b/include/linux/fs_struct.h
81656@@ -6,13 +6,13 @@
81657 #include <linux/seqlock.h>
81658
81659 struct fs_struct {
81660- int users;
81661+ atomic_t users;
81662 spinlock_t lock;
81663 seqcount_t seq;
81664 int umask;
81665 int in_exec;
81666 struct path root, pwd;
81667-};
81668+} __randomize_layout;
81669
81670 extern struct kmem_cache *fs_cachep;
81671
81672diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81673index 7714849..a4a5c7a 100644
81674--- a/include/linux/fscache-cache.h
81675+++ b/include/linux/fscache-cache.h
81676@@ -113,7 +113,7 @@ struct fscache_operation {
81677 fscache_operation_release_t release;
81678 };
81679
81680-extern atomic_t fscache_op_debug_id;
81681+extern atomic_unchecked_t fscache_op_debug_id;
81682 extern void fscache_op_work_func(struct work_struct *work);
81683
81684 extern void fscache_enqueue_operation(struct fscache_operation *);
81685@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81686 INIT_WORK(&op->work, fscache_op_work_func);
81687 atomic_set(&op->usage, 1);
81688 op->state = FSCACHE_OP_ST_INITIALISED;
81689- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81690+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81691 op->processor = processor;
81692 op->release = release;
81693 INIT_LIST_HEAD(&op->pend_link);
81694diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81695index 115bb81..e7b812b 100644
81696--- a/include/linux/fscache.h
81697+++ b/include/linux/fscache.h
81698@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81699 * - this is mandatory for any object that may have data
81700 */
81701 void (*now_uncached)(void *cookie_netfs_data);
81702-};
81703+} __do_const;
81704
81705 /*
81706 * fscache cached network filesystem type
81707diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81708index 7ee1774..72505b8 100644
81709--- a/include/linux/fsnotify.h
81710+++ b/include/linux/fsnotify.h
81711@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81712 struct inode *inode = file_inode(file);
81713 __u32 mask = FS_ACCESS;
81714
81715+ if (is_sidechannel_device(inode))
81716+ return;
81717+
81718 if (S_ISDIR(inode->i_mode))
81719 mask |= FS_ISDIR;
81720
81721@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81722 struct inode *inode = file_inode(file);
81723 __u32 mask = FS_MODIFY;
81724
81725+ if (is_sidechannel_device(inode))
81726+ return;
81727+
81728 if (S_ISDIR(inode->i_mode))
81729 mask |= FS_ISDIR;
81730
81731@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81732 */
81733 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81734 {
81735- return kstrdup(name, GFP_KERNEL);
81736+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81737 }
81738
81739 /*
81740diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81741index ec274e0..e678159 100644
81742--- a/include/linux/genhd.h
81743+++ b/include/linux/genhd.h
81744@@ -194,7 +194,7 @@ struct gendisk {
81745 struct kobject *slave_dir;
81746
81747 struct timer_rand_state *random;
81748- atomic_t sync_io; /* RAID */
81749+ atomic_unchecked_t sync_io; /* RAID */
81750 struct disk_events *ev;
81751 #ifdef CONFIG_BLK_DEV_INTEGRITY
81752 struct blk_integrity *integrity;
81753@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81754 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81755
81756 /* drivers/char/random.c */
81757-extern void add_disk_randomness(struct gendisk *disk);
81758+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81759 extern void rand_initialize_disk(struct gendisk *disk);
81760
81761 static inline sector_t get_start_sect(struct block_device *bdev)
81762diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81763index 667c311..abac2a7 100644
81764--- a/include/linux/genl_magic_func.h
81765+++ b/include/linux/genl_magic_func.h
81766@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81767 },
81768
81769 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81770-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81771+static struct genl_ops ZZZ_genl_ops[] = {
81772 #include GENL_MAGIC_INCLUDE_FILE
81773 };
81774
81775diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81776index 51bd1e7..0486343 100644
81777--- a/include/linux/gfp.h
81778+++ b/include/linux/gfp.h
81779@@ -34,6 +34,13 @@ struct vm_area_struct;
81780 #define ___GFP_NO_KSWAPD 0x400000u
81781 #define ___GFP_OTHER_NODE 0x800000u
81782 #define ___GFP_WRITE 0x1000000u
81783+
81784+#ifdef CONFIG_PAX_USERCOPY_SLABS
81785+#define ___GFP_USERCOPY 0x2000000u
81786+#else
81787+#define ___GFP_USERCOPY 0
81788+#endif
81789+
81790 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81791
81792 /*
81793@@ -90,6 +97,7 @@ struct vm_area_struct;
81794 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81795 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81796 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81797+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81798
81799 /*
81800 * This may seem redundant, but it's a way of annotating false positives vs.
81801@@ -97,7 +105,7 @@ struct vm_area_struct;
81802 */
81803 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81804
81805-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81806+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81807 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81808
81809 /* This equals 0, but use constants in case they ever change */
81810@@ -152,6 +160,8 @@ struct vm_area_struct;
81811 /* 4GB DMA on some platforms */
81812 #define GFP_DMA32 __GFP_DMA32
81813
81814+#define GFP_USERCOPY __GFP_USERCOPY
81815+
81816 /* Convert GFP flags to their corresponding migrate type */
81817 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81818 {
81819diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81820new file mode 100644
81821index 0000000..91858e4
81822--- /dev/null
81823+++ b/include/linux/gracl.h
81824@@ -0,0 +1,342 @@
81825+#ifndef GR_ACL_H
81826+#define GR_ACL_H
81827+
81828+#include <linux/grdefs.h>
81829+#include <linux/resource.h>
81830+#include <linux/capability.h>
81831+#include <linux/dcache.h>
81832+#include <asm/resource.h>
81833+
81834+/* Major status information */
81835+
81836+#define GR_VERSION "grsecurity 3.1"
81837+#define GRSECURITY_VERSION 0x3100
81838+
81839+enum {
81840+ GR_SHUTDOWN = 0,
81841+ GR_ENABLE = 1,
81842+ GR_SPROLE = 2,
81843+ GR_OLDRELOAD = 3,
81844+ GR_SEGVMOD = 4,
81845+ GR_STATUS = 5,
81846+ GR_UNSPROLE = 6,
81847+ GR_PASSSET = 7,
81848+ GR_SPROLEPAM = 8,
81849+ GR_RELOAD = 9,
81850+};
81851+
81852+/* Password setup definitions
81853+ * kernel/grhash.c */
81854+enum {
81855+ GR_PW_LEN = 128,
81856+ GR_SALT_LEN = 16,
81857+ GR_SHA_LEN = 32,
81858+};
81859+
81860+enum {
81861+ GR_SPROLE_LEN = 64,
81862+};
81863+
81864+enum {
81865+ GR_NO_GLOB = 0,
81866+ GR_REG_GLOB,
81867+ GR_CREATE_GLOB
81868+};
81869+
81870+#define GR_NLIMITS 32
81871+
81872+/* Begin Data Structures */
81873+
81874+struct sprole_pw {
81875+ unsigned char *rolename;
81876+ unsigned char salt[GR_SALT_LEN];
81877+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81878+};
81879+
81880+struct name_entry {
81881+ __u32 key;
81882+ u64 inode;
81883+ dev_t device;
81884+ char *name;
81885+ __u16 len;
81886+ __u8 deleted;
81887+ struct name_entry *prev;
81888+ struct name_entry *next;
81889+};
81890+
81891+struct inodev_entry {
81892+ struct name_entry *nentry;
81893+ struct inodev_entry *prev;
81894+ struct inodev_entry *next;
81895+};
81896+
81897+struct acl_role_db {
81898+ struct acl_role_label **r_hash;
81899+ __u32 r_size;
81900+};
81901+
81902+struct inodev_db {
81903+ struct inodev_entry **i_hash;
81904+ __u32 i_size;
81905+};
81906+
81907+struct name_db {
81908+ struct name_entry **n_hash;
81909+ __u32 n_size;
81910+};
81911+
81912+struct crash_uid {
81913+ uid_t uid;
81914+ unsigned long expires;
81915+};
81916+
81917+struct gr_hash_struct {
81918+ void **table;
81919+ void **nametable;
81920+ void *first;
81921+ __u32 table_size;
81922+ __u32 used_size;
81923+ int type;
81924+};
81925+
81926+/* Userspace Grsecurity ACL data structures */
81927+
81928+struct acl_subject_label {
81929+ char *filename;
81930+ u64 inode;
81931+ dev_t device;
81932+ __u32 mode;
81933+ kernel_cap_t cap_mask;
81934+ kernel_cap_t cap_lower;
81935+ kernel_cap_t cap_invert_audit;
81936+
81937+ struct rlimit res[GR_NLIMITS];
81938+ __u32 resmask;
81939+
81940+ __u8 user_trans_type;
81941+ __u8 group_trans_type;
81942+ uid_t *user_transitions;
81943+ gid_t *group_transitions;
81944+ __u16 user_trans_num;
81945+ __u16 group_trans_num;
81946+
81947+ __u32 sock_families[2];
81948+ __u32 ip_proto[8];
81949+ __u32 ip_type;
81950+ struct acl_ip_label **ips;
81951+ __u32 ip_num;
81952+ __u32 inaddr_any_override;
81953+
81954+ __u32 crashes;
81955+ unsigned long expires;
81956+
81957+ struct acl_subject_label *parent_subject;
81958+ struct gr_hash_struct *hash;
81959+ struct acl_subject_label *prev;
81960+ struct acl_subject_label *next;
81961+
81962+ struct acl_object_label **obj_hash;
81963+ __u32 obj_hash_size;
81964+ __u16 pax_flags;
81965+};
81966+
81967+struct role_allowed_ip {
81968+ __u32 addr;
81969+ __u32 netmask;
81970+
81971+ struct role_allowed_ip *prev;
81972+ struct role_allowed_ip *next;
81973+};
81974+
81975+struct role_transition {
81976+ char *rolename;
81977+
81978+ struct role_transition *prev;
81979+ struct role_transition *next;
81980+};
81981+
81982+struct acl_role_label {
81983+ char *rolename;
81984+ uid_t uidgid;
81985+ __u16 roletype;
81986+
81987+ __u16 auth_attempts;
81988+ unsigned long expires;
81989+
81990+ struct acl_subject_label *root_label;
81991+ struct gr_hash_struct *hash;
81992+
81993+ struct acl_role_label *prev;
81994+ struct acl_role_label *next;
81995+
81996+ struct role_transition *transitions;
81997+ struct role_allowed_ip *allowed_ips;
81998+ uid_t *domain_children;
81999+ __u16 domain_child_num;
82000+
82001+ umode_t umask;
82002+
82003+ struct acl_subject_label **subj_hash;
82004+ __u32 subj_hash_size;
82005+};
82006+
82007+struct user_acl_role_db {
82008+ struct acl_role_label **r_table;
82009+ __u32 num_pointers; /* Number of allocations to track */
82010+ __u32 num_roles; /* Number of roles */
82011+ __u32 num_domain_children; /* Number of domain children */
82012+ __u32 num_subjects; /* Number of subjects */
82013+ __u32 num_objects; /* Number of objects */
82014+};
82015+
82016+struct acl_object_label {
82017+ char *filename;
82018+ u64 inode;
82019+ dev_t device;
82020+ __u32 mode;
82021+
82022+ struct acl_subject_label *nested;
82023+ struct acl_object_label *globbed;
82024+
82025+ /* next two structures not used */
82026+
82027+ struct acl_object_label *prev;
82028+ struct acl_object_label *next;
82029+};
82030+
82031+struct acl_ip_label {
82032+ char *iface;
82033+ __u32 addr;
82034+ __u32 netmask;
82035+ __u16 low, high;
82036+ __u8 mode;
82037+ __u32 type;
82038+ __u32 proto[8];
82039+
82040+ /* next two structures not used */
82041+
82042+ struct acl_ip_label *prev;
82043+ struct acl_ip_label *next;
82044+};
82045+
82046+struct gr_arg {
82047+ struct user_acl_role_db role_db;
82048+ unsigned char pw[GR_PW_LEN];
82049+ unsigned char salt[GR_SALT_LEN];
82050+ unsigned char sum[GR_SHA_LEN];
82051+ unsigned char sp_role[GR_SPROLE_LEN];
82052+ struct sprole_pw *sprole_pws;
82053+ dev_t segv_device;
82054+ u64 segv_inode;
82055+ uid_t segv_uid;
82056+ __u16 num_sprole_pws;
82057+ __u16 mode;
82058+};
82059+
82060+struct gr_arg_wrapper {
82061+ struct gr_arg *arg;
82062+ __u32 version;
82063+ __u32 size;
82064+};
82065+
82066+struct subject_map {
82067+ struct acl_subject_label *user;
82068+ struct acl_subject_label *kernel;
82069+ struct subject_map *prev;
82070+ struct subject_map *next;
82071+};
82072+
82073+struct acl_subj_map_db {
82074+ struct subject_map **s_hash;
82075+ __u32 s_size;
82076+};
82077+
82078+struct gr_policy_state {
82079+ struct sprole_pw **acl_special_roles;
82080+ __u16 num_sprole_pws;
82081+ struct acl_role_label *kernel_role;
82082+ struct acl_role_label *role_list;
82083+ struct acl_role_label *default_role;
82084+ struct acl_role_db acl_role_set;
82085+ struct acl_subj_map_db subj_map_set;
82086+ struct name_db name_set;
82087+ struct inodev_db inodev_set;
82088+};
82089+
82090+struct gr_alloc_state {
82091+ unsigned long alloc_stack_next;
82092+ unsigned long alloc_stack_size;
82093+ void **alloc_stack;
82094+};
82095+
82096+struct gr_reload_state {
82097+ struct gr_policy_state oldpolicy;
82098+ struct gr_alloc_state oldalloc;
82099+ struct gr_policy_state newpolicy;
82100+ struct gr_alloc_state newalloc;
82101+ struct gr_policy_state *oldpolicy_ptr;
82102+ struct gr_alloc_state *oldalloc_ptr;
82103+ unsigned char oldmode;
82104+};
82105+
82106+/* End Data Structures Section */
82107+
82108+/* Hash functions generated by empirical testing by Brad Spengler
82109+ Makes good use of the low bits of the inode. Generally 0-1 times
82110+ in loop for successful match. 0-3 for unsuccessful match.
82111+ Shift/add algorithm with modulus of table size and an XOR*/
82112+
82113+static __inline__ unsigned int
82114+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
82115+{
82116+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
82117+}
82118+
82119+ static __inline__ unsigned int
82120+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
82121+{
82122+ return ((const unsigned long)userp % sz);
82123+}
82124+
82125+static __inline__ unsigned int
82126+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
82127+{
82128+ unsigned int rem;
82129+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
82130+ return rem;
82131+}
82132+
82133+static __inline__ unsigned int
82134+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
82135+{
82136+ return full_name_hash((const unsigned char *)name, len) % sz;
82137+}
82138+
82139+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
82140+ subj = NULL; \
82141+ iter = 0; \
82142+ while (iter < role->subj_hash_size) { \
82143+ if (subj == NULL) \
82144+ subj = role->subj_hash[iter]; \
82145+ if (subj == NULL) { \
82146+ iter++; \
82147+ continue; \
82148+ }
82149+
82150+#define FOR_EACH_SUBJECT_END(subj,iter) \
82151+ subj = subj->next; \
82152+ if (subj == NULL) \
82153+ iter++; \
82154+ }
82155+
82156+
82157+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
82158+ subj = role->hash->first; \
82159+ while (subj != NULL) {
82160+
82161+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
82162+ subj = subj->next; \
82163+ }
82164+
82165+#endif
82166+
82167diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
82168new file mode 100644
82169index 0000000..af64092
82170--- /dev/null
82171+++ b/include/linux/gracl_compat.h
82172@@ -0,0 +1,156 @@
82173+#ifndef GR_ACL_COMPAT_H
82174+#define GR_ACL_COMPAT_H
82175+
82176+#include <linux/resource.h>
82177+#include <asm/resource.h>
82178+
82179+struct sprole_pw_compat {
82180+ compat_uptr_t rolename;
82181+ unsigned char salt[GR_SALT_LEN];
82182+ unsigned char sum[GR_SHA_LEN];
82183+};
82184+
82185+struct gr_hash_struct_compat {
82186+ compat_uptr_t table;
82187+ compat_uptr_t nametable;
82188+ compat_uptr_t first;
82189+ __u32 table_size;
82190+ __u32 used_size;
82191+ int type;
82192+};
82193+
82194+struct acl_subject_label_compat {
82195+ compat_uptr_t filename;
82196+ compat_u64 inode;
82197+ __u32 device;
82198+ __u32 mode;
82199+ kernel_cap_t cap_mask;
82200+ kernel_cap_t cap_lower;
82201+ kernel_cap_t cap_invert_audit;
82202+
82203+ struct compat_rlimit res[GR_NLIMITS];
82204+ __u32 resmask;
82205+
82206+ __u8 user_trans_type;
82207+ __u8 group_trans_type;
82208+ compat_uptr_t user_transitions;
82209+ compat_uptr_t group_transitions;
82210+ __u16 user_trans_num;
82211+ __u16 group_trans_num;
82212+
82213+ __u32 sock_families[2];
82214+ __u32 ip_proto[8];
82215+ __u32 ip_type;
82216+ compat_uptr_t ips;
82217+ __u32 ip_num;
82218+ __u32 inaddr_any_override;
82219+
82220+ __u32 crashes;
82221+ compat_ulong_t expires;
82222+
82223+ compat_uptr_t parent_subject;
82224+ compat_uptr_t hash;
82225+ compat_uptr_t prev;
82226+ compat_uptr_t next;
82227+
82228+ compat_uptr_t obj_hash;
82229+ __u32 obj_hash_size;
82230+ __u16 pax_flags;
82231+};
82232+
82233+struct role_allowed_ip_compat {
82234+ __u32 addr;
82235+ __u32 netmask;
82236+
82237+ compat_uptr_t prev;
82238+ compat_uptr_t next;
82239+};
82240+
82241+struct role_transition_compat {
82242+ compat_uptr_t rolename;
82243+
82244+ compat_uptr_t prev;
82245+ compat_uptr_t next;
82246+};
82247+
82248+struct acl_role_label_compat {
82249+ compat_uptr_t rolename;
82250+ uid_t uidgid;
82251+ __u16 roletype;
82252+
82253+ __u16 auth_attempts;
82254+ compat_ulong_t expires;
82255+
82256+ compat_uptr_t root_label;
82257+ compat_uptr_t hash;
82258+
82259+ compat_uptr_t prev;
82260+ compat_uptr_t next;
82261+
82262+ compat_uptr_t transitions;
82263+ compat_uptr_t allowed_ips;
82264+ compat_uptr_t domain_children;
82265+ __u16 domain_child_num;
82266+
82267+ umode_t umask;
82268+
82269+ compat_uptr_t subj_hash;
82270+ __u32 subj_hash_size;
82271+};
82272+
82273+struct user_acl_role_db_compat {
82274+ compat_uptr_t r_table;
82275+ __u32 num_pointers;
82276+ __u32 num_roles;
82277+ __u32 num_domain_children;
82278+ __u32 num_subjects;
82279+ __u32 num_objects;
82280+};
82281+
82282+struct acl_object_label_compat {
82283+ compat_uptr_t filename;
82284+ compat_u64 inode;
82285+ __u32 device;
82286+ __u32 mode;
82287+
82288+ compat_uptr_t nested;
82289+ compat_uptr_t globbed;
82290+
82291+ compat_uptr_t prev;
82292+ compat_uptr_t next;
82293+};
82294+
82295+struct acl_ip_label_compat {
82296+ compat_uptr_t iface;
82297+ __u32 addr;
82298+ __u32 netmask;
82299+ __u16 low, high;
82300+ __u8 mode;
82301+ __u32 type;
82302+ __u32 proto[8];
82303+
82304+ compat_uptr_t prev;
82305+ compat_uptr_t next;
82306+};
82307+
82308+struct gr_arg_compat {
82309+ struct user_acl_role_db_compat role_db;
82310+ unsigned char pw[GR_PW_LEN];
82311+ unsigned char salt[GR_SALT_LEN];
82312+ unsigned char sum[GR_SHA_LEN];
82313+ unsigned char sp_role[GR_SPROLE_LEN];
82314+ compat_uptr_t sprole_pws;
82315+ __u32 segv_device;
82316+ compat_u64 segv_inode;
82317+ uid_t segv_uid;
82318+ __u16 num_sprole_pws;
82319+ __u16 mode;
82320+};
82321+
82322+struct gr_arg_wrapper_compat {
82323+ compat_uptr_t arg;
82324+ __u32 version;
82325+ __u32 size;
82326+};
82327+
82328+#endif
82329diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
82330new file mode 100644
82331index 0000000..323ecf2
82332--- /dev/null
82333+++ b/include/linux/gralloc.h
82334@@ -0,0 +1,9 @@
82335+#ifndef __GRALLOC_H
82336+#define __GRALLOC_H
82337+
82338+void acl_free_all(void);
82339+int acl_alloc_stack_init(unsigned long size);
82340+void *acl_alloc(unsigned long len);
82341+void *acl_alloc_num(unsigned long num, unsigned long len);
82342+
82343+#endif
82344diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
82345new file mode 100644
82346index 0000000..be66033
82347--- /dev/null
82348+++ b/include/linux/grdefs.h
82349@@ -0,0 +1,140 @@
82350+#ifndef GRDEFS_H
82351+#define GRDEFS_H
82352+
82353+/* Begin grsecurity status declarations */
82354+
82355+enum {
82356+ GR_READY = 0x01,
82357+ GR_STATUS_INIT = 0x00 // disabled state
82358+};
82359+
82360+/* Begin ACL declarations */
82361+
82362+/* Role flags */
82363+
82364+enum {
82365+ GR_ROLE_USER = 0x0001,
82366+ GR_ROLE_GROUP = 0x0002,
82367+ GR_ROLE_DEFAULT = 0x0004,
82368+ GR_ROLE_SPECIAL = 0x0008,
82369+ GR_ROLE_AUTH = 0x0010,
82370+ GR_ROLE_NOPW = 0x0020,
82371+ GR_ROLE_GOD = 0x0040,
82372+ GR_ROLE_LEARN = 0x0080,
82373+ GR_ROLE_TPE = 0x0100,
82374+ GR_ROLE_DOMAIN = 0x0200,
82375+ GR_ROLE_PAM = 0x0400,
82376+ GR_ROLE_PERSIST = 0x0800
82377+};
82378+
82379+/* ACL Subject and Object mode flags */
82380+enum {
82381+ GR_DELETED = 0x80000000
82382+};
82383+
82384+/* ACL Object-only mode flags */
82385+enum {
82386+ GR_READ = 0x00000001,
82387+ GR_APPEND = 0x00000002,
82388+ GR_WRITE = 0x00000004,
82389+ GR_EXEC = 0x00000008,
82390+ GR_FIND = 0x00000010,
82391+ GR_INHERIT = 0x00000020,
82392+ GR_SETID = 0x00000040,
82393+ GR_CREATE = 0x00000080,
82394+ GR_DELETE = 0x00000100,
82395+ GR_LINK = 0x00000200,
82396+ GR_AUDIT_READ = 0x00000400,
82397+ GR_AUDIT_APPEND = 0x00000800,
82398+ GR_AUDIT_WRITE = 0x00001000,
82399+ GR_AUDIT_EXEC = 0x00002000,
82400+ GR_AUDIT_FIND = 0x00004000,
82401+ GR_AUDIT_INHERIT= 0x00008000,
82402+ GR_AUDIT_SETID = 0x00010000,
82403+ GR_AUDIT_CREATE = 0x00020000,
82404+ GR_AUDIT_DELETE = 0x00040000,
82405+ GR_AUDIT_LINK = 0x00080000,
82406+ GR_PTRACERD = 0x00100000,
82407+ GR_NOPTRACE = 0x00200000,
82408+ GR_SUPPRESS = 0x00400000,
82409+ GR_NOLEARN = 0x00800000,
82410+ GR_INIT_TRANSFER= 0x01000000
82411+};
82412+
82413+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
82414+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
82415+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
82416+
82417+/* ACL subject-only mode flags */
82418+enum {
82419+ GR_KILL = 0x00000001,
82420+ GR_VIEW = 0x00000002,
82421+ GR_PROTECTED = 0x00000004,
82422+ GR_LEARN = 0x00000008,
82423+ GR_OVERRIDE = 0x00000010,
82424+ /* just a placeholder, this mode is only used in userspace */
82425+ GR_DUMMY = 0x00000020,
82426+ GR_PROTSHM = 0x00000040,
82427+ GR_KILLPROC = 0x00000080,
82428+ GR_KILLIPPROC = 0x00000100,
82429+ /* just a placeholder, this mode is only used in userspace */
82430+ GR_NOTROJAN = 0x00000200,
82431+ GR_PROTPROCFD = 0x00000400,
82432+ GR_PROCACCT = 0x00000800,
82433+ GR_RELAXPTRACE = 0x00001000,
82434+ //GR_NESTED = 0x00002000,
82435+ GR_INHERITLEARN = 0x00004000,
82436+ GR_PROCFIND = 0x00008000,
82437+ GR_POVERRIDE = 0x00010000,
82438+ GR_KERNELAUTH = 0x00020000,
82439+ GR_ATSECURE = 0x00040000,
82440+ GR_SHMEXEC = 0x00080000
82441+};
82442+
82443+enum {
82444+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82445+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82446+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82447+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82448+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82449+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82450+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82451+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82452+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82453+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82454+};
82455+
82456+enum {
82457+ GR_ID_USER = 0x01,
82458+ GR_ID_GROUP = 0x02,
82459+};
82460+
82461+enum {
82462+ GR_ID_ALLOW = 0x01,
82463+ GR_ID_DENY = 0x02,
82464+};
82465+
82466+#define GR_CRASH_RES 31
82467+#define GR_UIDTABLE_MAX 500
82468+
82469+/* begin resource learning section */
82470+enum {
82471+ GR_RLIM_CPU_BUMP = 60,
82472+ GR_RLIM_FSIZE_BUMP = 50000,
82473+ GR_RLIM_DATA_BUMP = 10000,
82474+ GR_RLIM_STACK_BUMP = 1000,
82475+ GR_RLIM_CORE_BUMP = 10000,
82476+ GR_RLIM_RSS_BUMP = 500000,
82477+ GR_RLIM_NPROC_BUMP = 1,
82478+ GR_RLIM_NOFILE_BUMP = 5,
82479+ GR_RLIM_MEMLOCK_BUMP = 50000,
82480+ GR_RLIM_AS_BUMP = 500000,
82481+ GR_RLIM_LOCKS_BUMP = 2,
82482+ GR_RLIM_SIGPENDING_BUMP = 5,
82483+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82484+ GR_RLIM_NICE_BUMP = 1,
82485+ GR_RLIM_RTPRIO_BUMP = 1,
82486+ GR_RLIM_RTTIME_BUMP = 1000000
82487+};
82488+
82489+#endif
82490diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82491new file mode 100644
82492index 0000000..fb1de5d
82493--- /dev/null
82494+++ b/include/linux/grinternal.h
82495@@ -0,0 +1,230 @@
82496+#ifndef __GRINTERNAL_H
82497+#define __GRINTERNAL_H
82498+
82499+#ifdef CONFIG_GRKERNSEC
82500+
82501+#include <linux/fs.h>
82502+#include <linux/mnt_namespace.h>
82503+#include <linux/nsproxy.h>
82504+#include <linux/gracl.h>
82505+#include <linux/grdefs.h>
82506+#include <linux/grmsg.h>
82507+
82508+void gr_add_learn_entry(const char *fmt, ...)
82509+ __attribute__ ((format (printf, 1, 2)));
82510+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82511+ const struct vfsmount *mnt);
82512+__u32 gr_check_create(const struct dentry *new_dentry,
82513+ const struct dentry *parent,
82514+ const struct vfsmount *mnt, const __u32 mode);
82515+int gr_check_protected_task(const struct task_struct *task);
82516+__u32 to_gr_audit(const __u32 reqmode);
82517+int gr_set_acls(const int type);
82518+int gr_acl_is_enabled(void);
82519+char gr_roletype_to_char(void);
82520+
82521+void gr_handle_alertkill(struct task_struct *task);
82522+char *gr_to_filename(const struct dentry *dentry,
82523+ const struct vfsmount *mnt);
82524+char *gr_to_filename1(const struct dentry *dentry,
82525+ const struct vfsmount *mnt);
82526+char *gr_to_filename2(const struct dentry *dentry,
82527+ const struct vfsmount *mnt);
82528+char *gr_to_filename3(const struct dentry *dentry,
82529+ const struct vfsmount *mnt);
82530+
82531+extern int grsec_enable_ptrace_readexec;
82532+extern int grsec_enable_harden_ptrace;
82533+extern int grsec_enable_link;
82534+extern int grsec_enable_fifo;
82535+extern int grsec_enable_execve;
82536+extern int grsec_enable_shm;
82537+extern int grsec_enable_execlog;
82538+extern int grsec_enable_signal;
82539+extern int grsec_enable_audit_ptrace;
82540+extern int grsec_enable_forkfail;
82541+extern int grsec_enable_time;
82542+extern int grsec_enable_rofs;
82543+extern int grsec_deny_new_usb;
82544+extern int grsec_enable_chroot_shmat;
82545+extern int grsec_enable_chroot_mount;
82546+extern int grsec_enable_chroot_double;
82547+extern int grsec_enable_chroot_pivot;
82548+extern int grsec_enable_chroot_chdir;
82549+extern int grsec_enable_chroot_chmod;
82550+extern int grsec_enable_chroot_mknod;
82551+extern int grsec_enable_chroot_fchdir;
82552+extern int grsec_enable_chroot_nice;
82553+extern int grsec_enable_chroot_execlog;
82554+extern int grsec_enable_chroot_caps;
82555+extern int grsec_enable_chroot_rename;
82556+extern int grsec_enable_chroot_sysctl;
82557+extern int grsec_enable_chroot_unix;
82558+extern int grsec_enable_symlinkown;
82559+extern kgid_t grsec_symlinkown_gid;
82560+extern int grsec_enable_tpe;
82561+extern kgid_t grsec_tpe_gid;
82562+extern int grsec_enable_tpe_all;
82563+extern int grsec_enable_tpe_invert;
82564+extern int grsec_enable_socket_all;
82565+extern kgid_t grsec_socket_all_gid;
82566+extern int grsec_enable_socket_client;
82567+extern kgid_t grsec_socket_client_gid;
82568+extern int grsec_enable_socket_server;
82569+extern kgid_t grsec_socket_server_gid;
82570+extern kgid_t grsec_audit_gid;
82571+extern int grsec_enable_group;
82572+extern int grsec_enable_log_rwxmaps;
82573+extern int grsec_enable_mount;
82574+extern int grsec_enable_chdir;
82575+extern int grsec_resource_logging;
82576+extern int grsec_enable_blackhole;
82577+extern int grsec_lastack_retries;
82578+extern int grsec_enable_brute;
82579+extern int grsec_enable_harden_ipc;
82580+extern int grsec_lock;
82581+
82582+extern spinlock_t grsec_alert_lock;
82583+extern unsigned long grsec_alert_wtime;
82584+extern unsigned long grsec_alert_fyet;
82585+
82586+extern spinlock_t grsec_audit_lock;
82587+
82588+extern rwlock_t grsec_exec_file_lock;
82589+
82590+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82591+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82592+ (tsk)->exec_file->f_path.mnt) : "/")
82593+
82594+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82595+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82596+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82597+
82598+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82599+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82600+ (tsk)->exec_file->f_path.mnt) : "/")
82601+
82602+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82603+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82604+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82605+
82606+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82607+
82608+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82609+
82610+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82611+{
82612+ if (file1 && file2) {
82613+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82614+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82615+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82616+ return true;
82617+ }
82618+
82619+ return false;
82620+}
82621+
82622+#define GR_CHROOT_CAPS {{ \
82623+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82624+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82625+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82626+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82627+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82628+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82629+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82630+
82631+#define security_learn(normal_msg,args...) \
82632+({ \
82633+ read_lock(&grsec_exec_file_lock); \
82634+ gr_add_learn_entry(normal_msg "\n", ## args); \
82635+ read_unlock(&grsec_exec_file_lock); \
82636+})
82637+
82638+enum {
82639+ GR_DO_AUDIT,
82640+ GR_DONT_AUDIT,
82641+ /* used for non-audit messages that we shouldn't kill the task on */
82642+ GR_DONT_AUDIT_GOOD
82643+};
82644+
82645+enum {
82646+ GR_TTYSNIFF,
82647+ GR_RBAC,
82648+ GR_RBAC_STR,
82649+ GR_STR_RBAC,
82650+ GR_RBAC_MODE2,
82651+ GR_RBAC_MODE3,
82652+ GR_FILENAME,
82653+ GR_SYSCTL_HIDDEN,
82654+ GR_NOARGS,
82655+ GR_ONE_INT,
82656+ GR_ONE_INT_TWO_STR,
82657+ GR_ONE_STR,
82658+ GR_STR_INT,
82659+ GR_TWO_STR_INT,
82660+ GR_TWO_INT,
82661+ GR_TWO_U64,
82662+ GR_THREE_INT,
82663+ GR_FIVE_INT_TWO_STR,
82664+ GR_TWO_STR,
82665+ GR_THREE_STR,
82666+ GR_FOUR_STR,
82667+ GR_STR_FILENAME,
82668+ GR_FILENAME_STR,
82669+ GR_FILENAME_TWO_INT,
82670+ GR_FILENAME_TWO_INT_STR,
82671+ GR_TEXTREL,
82672+ GR_PTRACE,
82673+ GR_RESOURCE,
82674+ GR_CAP,
82675+ GR_SIG,
82676+ GR_SIG2,
82677+ GR_CRASH1,
82678+ GR_CRASH2,
82679+ GR_PSACCT,
82680+ GR_RWXMAP,
82681+ GR_RWXMAPVMA
82682+};
82683+
82684+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82685+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82686+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82687+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82688+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82689+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82690+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82691+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82692+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82693+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82694+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82695+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82696+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82697+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82698+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82699+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82700+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82701+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82702+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82703+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82704+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82705+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82706+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82707+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82708+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82709+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82710+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82711+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82712+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82713+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82714+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82715+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82716+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82717+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82718+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82719+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82720+
82721+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82722+
82723+#endif
82724+
82725+#endif
82726diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82727new file mode 100644
82728index 0000000..26ef560
82729--- /dev/null
82730+++ b/include/linux/grmsg.h
82731@@ -0,0 +1,118 @@
82732+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82733+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82734+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82735+#define GR_STOPMOD_MSG "denied modification of module state by "
82736+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82737+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82738+#define GR_IOPERM_MSG "denied use of ioperm() by "
82739+#define GR_IOPL_MSG "denied use of iopl() by "
82740+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82741+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82742+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82743+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82744+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82745+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82746+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82747+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82748+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82749+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82750+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82751+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82752+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82753+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82754+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82755+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82756+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82757+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82758+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82759+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82760+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82761+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82762+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82763+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82764+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82765+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82766+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82767+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82768+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82769+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82770+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82771+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82772+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82773+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82774+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82775+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82776+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82777+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82778+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82779+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82780+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82781+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82782+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82783+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82784+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82785+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82786+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82787+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82788+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82789+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82790+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82791+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82792+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82793+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82794+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82795+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82796+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82797+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82798+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82799+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82800+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82801+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82802+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82803+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82804+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82805+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82806+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82807+#define GR_NICE_CHROOT_MSG "denied priority change by "
82808+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82809+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82810+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82811+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82812+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82813+#define GR_TIME_MSG "time set by "
82814+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82815+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82816+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82817+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82818+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82819+#define GR_BIND_MSG "denied bind() by "
82820+#define GR_CONNECT_MSG "denied connect() by "
82821+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82822+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82823+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82824+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82825+#define GR_CAP_ACL_MSG "use of %s denied for "
82826+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82827+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82828+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82829+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82830+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82831+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82832+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82833+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82834+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82835+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82836+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82837+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82838+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82839+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82840+#define GR_VM86_MSG "denied use of vm86 by "
82841+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82842+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82843+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82844+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82845+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82846+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82847+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82848+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82849+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82850diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82851new file mode 100644
82852index 0000000..63c1850
82853--- /dev/null
82854+++ b/include/linux/grsecurity.h
82855@@ -0,0 +1,250 @@
82856+#ifndef GR_SECURITY_H
82857+#define GR_SECURITY_H
82858+#include <linux/fs.h>
82859+#include <linux/fs_struct.h>
82860+#include <linux/binfmts.h>
82861+#include <linux/gracl.h>
82862+
82863+/* notify of brain-dead configs */
82864+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82865+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82866+#endif
82867+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82868+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82869+#endif
82870+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82871+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82872+#endif
82873+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82874+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82875+#endif
82876+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82877+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82878+#endif
82879+
82880+int gr_handle_new_usb(void);
82881+
82882+void gr_handle_brute_attach(int dumpable);
82883+void gr_handle_brute_check(void);
82884+void gr_handle_kernel_exploit(void);
82885+
82886+char gr_roletype_to_char(void);
82887+
82888+int gr_proc_is_restricted(void);
82889+
82890+int gr_acl_enable_at_secure(void);
82891+
82892+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82893+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82894+
82895+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82896+
82897+void gr_del_task_from_ip_table(struct task_struct *p);
82898+
82899+int gr_pid_is_chrooted(struct task_struct *p);
82900+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82901+int gr_handle_chroot_nice(void);
82902+int gr_handle_chroot_sysctl(const int op);
82903+int gr_handle_chroot_setpriority(struct task_struct *p,
82904+ const int niceval);
82905+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82906+int gr_chroot_fhandle(void);
82907+int gr_handle_chroot_chroot(const struct dentry *dentry,
82908+ const struct vfsmount *mnt);
82909+void gr_handle_chroot_chdir(const struct path *path);
82910+int gr_handle_chroot_chmod(const struct dentry *dentry,
82911+ const struct vfsmount *mnt, const int mode);
82912+int gr_handle_chroot_mknod(const struct dentry *dentry,
82913+ const struct vfsmount *mnt, const int mode);
82914+int gr_handle_chroot_mount(const struct dentry *dentry,
82915+ const struct vfsmount *mnt,
82916+ const char *dev_name);
82917+int gr_handle_chroot_pivot(void);
82918+int gr_handle_chroot_unix(const pid_t pid);
82919+
82920+int gr_handle_rawio(const struct inode *inode);
82921+
82922+void gr_handle_ioperm(void);
82923+void gr_handle_iopl(void);
82924+void gr_handle_msr_write(void);
82925+
82926+umode_t gr_acl_umask(void);
82927+
82928+int gr_tpe_allow(const struct file *file);
82929+
82930+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82931+void gr_clear_chroot_entries(struct task_struct *task);
82932+
82933+void gr_log_forkfail(const int retval);
82934+void gr_log_timechange(void);
82935+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82936+void gr_log_chdir(const struct dentry *dentry,
82937+ const struct vfsmount *mnt);
82938+void gr_log_chroot_exec(const struct dentry *dentry,
82939+ const struct vfsmount *mnt);
82940+void gr_log_remount(const char *devname, const int retval);
82941+void gr_log_unmount(const char *devname, const int retval);
82942+void gr_log_mount(const char *from, struct path *to, const int retval);
82943+void gr_log_textrel(struct vm_area_struct *vma);
82944+void gr_log_ptgnustack(struct file *file);
82945+void gr_log_rwxmmap(struct file *file);
82946+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82947+
82948+int gr_handle_follow_link(const struct inode *parent,
82949+ const struct inode *inode,
82950+ const struct dentry *dentry,
82951+ const struct vfsmount *mnt);
82952+int gr_handle_fifo(const struct dentry *dentry,
82953+ const struct vfsmount *mnt,
82954+ const struct dentry *dir, const int flag,
82955+ const int acc_mode);
82956+int gr_handle_hardlink(const struct dentry *dentry,
82957+ const struct vfsmount *mnt,
82958+ struct inode *inode,
82959+ const int mode, const struct filename *to);
82960+
82961+int gr_is_capable(const int cap);
82962+int gr_is_capable_nolog(const int cap);
82963+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82964+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82965+
82966+void gr_copy_label(struct task_struct *tsk);
82967+void gr_handle_crash(struct task_struct *task, const int sig);
82968+int gr_handle_signal(const struct task_struct *p, const int sig);
82969+int gr_check_crash_uid(const kuid_t uid);
82970+int gr_check_protected_task(const struct task_struct *task);
82971+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82972+int gr_acl_handle_mmap(const struct file *file,
82973+ const unsigned long prot);
82974+int gr_acl_handle_mprotect(const struct file *file,
82975+ const unsigned long prot);
82976+int gr_check_hidden_task(const struct task_struct *tsk);
82977+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82978+ const struct vfsmount *mnt);
82979+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82980+ const struct vfsmount *mnt);
82981+__u32 gr_acl_handle_access(const struct dentry *dentry,
82982+ const struct vfsmount *mnt, const int fmode);
82983+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82984+ const struct vfsmount *mnt, umode_t *mode);
82985+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82986+ const struct vfsmount *mnt);
82987+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82988+ const struct vfsmount *mnt);
82989+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82990+ const struct vfsmount *mnt);
82991+int gr_handle_ptrace(struct task_struct *task, const long request);
82992+int gr_handle_proc_ptrace(struct task_struct *task);
82993+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82994+ const struct vfsmount *mnt);
82995+int gr_check_crash_exec(const struct file *filp);
82996+int gr_acl_is_enabled(void);
82997+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82998+ const kgid_t gid);
82999+int gr_set_proc_label(const struct dentry *dentry,
83000+ const struct vfsmount *mnt,
83001+ const int unsafe_flags);
83002+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
83003+ const struct vfsmount *mnt);
83004+__u32 gr_acl_handle_open(const struct dentry *dentry,
83005+ const struct vfsmount *mnt, int acc_mode);
83006+__u32 gr_acl_handle_creat(const struct dentry *dentry,
83007+ const struct dentry *p_dentry,
83008+ const struct vfsmount *p_mnt,
83009+ int open_flags, int acc_mode, const int imode);
83010+void gr_handle_create(const struct dentry *dentry,
83011+ const struct vfsmount *mnt);
83012+void gr_handle_proc_create(const struct dentry *dentry,
83013+ const struct inode *inode);
83014+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
83015+ const struct dentry *parent_dentry,
83016+ const struct vfsmount *parent_mnt,
83017+ const int mode);
83018+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
83019+ const struct dentry *parent_dentry,
83020+ const struct vfsmount *parent_mnt);
83021+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
83022+ const struct vfsmount *mnt);
83023+void gr_handle_delete(const u64 ino, const dev_t dev);
83024+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
83025+ const struct vfsmount *mnt);
83026+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
83027+ const struct dentry *parent_dentry,
83028+ const struct vfsmount *parent_mnt,
83029+ const struct filename *from);
83030+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
83031+ const struct dentry *parent_dentry,
83032+ const struct vfsmount *parent_mnt,
83033+ const struct dentry *old_dentry,
83034+ const struct vfsmount *old_mnt, const struct filename *to);
83035+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
83036+int gr_acl_handle_rename(struct dentry *new_dentry,
83037+ struct dentry *parent_dentry,
83038+ const struct vfsmount *parent_mnt,
83039+ struct dentry *old_dentry,
83040+ struct inode *old_parent_inode,
83041+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
83042+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
83043+ struct dentry *old_dentry,
83044+ struct dentry *new_dentry,
83045+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
83046+__u32 gr_check_link(const struct dentry *new_dentry,
83047+ const struct dentry *parent_dentry,
83048+ const struct vfsmount *parent_mnt,
83049+ const struct dentry *old_dentry,
83050+ const struct vfsmount *old_mnt);
83051+int gr_acl_handle_filldir(const struct file *file, const char *name,
83052+ const unsigned int namelen, const u64 ino);
83053+
83054+__u32 gr_acl_handle_unix(const struct dentry *dentry,
83055+ const struct vfsmount *mnt);
83056+void gr_acl_handle_exit(void);
83057+void gr_acl_handle_psacct(struct task_struct *task, const long code);
83058+int gr_acl_handle_procpidmem(const struct task_struct *task);
83059+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
83060+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
83061+void gr_audit_ptrace(struct task_struct *task);
83062+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
83063+u64 gr_get_ino_from_dentry(struct dentry *dentry);
83064+void gr_put_exec_file(struct task_struct *task);
83065+
83066+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
83067+
83068+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
83069+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
83070+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
83071+ struct dentry *newdentry, struct vfsmount *newmnt);
83072+
83073+#ifdef CONFIG_GRKERNSEC_RESLOG
83074+extern void gr_log_resource(const struct task_struct *task, const int res,
83075+ const unsigned long wanted, const int gt);
83076+#else
83077+static inline void gr_log_resource(const struct task_struct *task, const int res,
83078+ const unsigned long wanted, const int gt)
83079+{
83080+}
83081+#endif
83082+
83083+#ifdef CONFIG_GRKERNSEC
83084+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
83085+void gr_handle_vm86(void);
83086+void gr_handle_mem_readwrite(u64 from, u64 to);
83087+
83088+void gr_log_badprocpid(const char *entry);
83089+
83090+extern int grsec_enable_dmesg;
83091+extern int grsec_disable_privio;
83092+
83093+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83094+extern kgid_t grsec_proc_gid;
83095+#endif
83096+
83097+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83098+extern int grsec_enable_chroot_findtask;
83099+#endif
83100+#ifdef CONFIG_GRKERNSEC_SETXID
83101+extern int grsec_enable_setxid;
83102+#endif
83103+#endif
83104+
83105+#endif
83106diff --git a/include/linux/grsock.h b/include/linux/grsock.h
83107new file mode 100644
83108index 0000000..e7ffaaf
83109--- /dev/null
83110+++ b/include/linux/grsock.h
83111@@ -0,0 +1,19 @@
83112+#ifndef __GRSOCK_H
83113+#define __GRSOCK_H
83114+
83115+extern void gr_attach_curr_ip(const struct sock *sk);
83116+extern int gr_handle_sock_all(const int family, const int type,
83117+ const int protocol);
83118+extern int gr_handle_sock_server(const struct sockaddr *sck);
83119+extern int gr_handle_sock_server_other(const struct sock *sck);
83120+extern int gr_handle_sock_client(const struct sockaddr *sck);
83121+extern int gr_search_connect(struct socket * sock,
83122+ struct sockaddr_in * addr);
83123+extern int gr_search_bind(struct socket * sock,
83124+ struct sockaddr_in * addr);
83125+extern int gr_search_listen(struct socket * sock);
83126+extern int gr_search_accept(struct socket * sock);
83127+extern int gr_search_socket(const int domain, const int type,
83128+ const int protocol);
83129+
83130+#endif
83131diff --git a/include/linux/highmem.h b/include/linux/highmem.h
83132index 9286a46..373f27f 100644
83133--- a/include/linux/highmem.h
83134+++ b/include/linux/highmem.h
83135@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
83136 kunmap_atomic(kaddr);
83137 }
83138
83139+static inline void sanitize_highpage(struct page *page)
83140+{
83141+ void *kaddr;
83142+ unsigned long flags;
83143+
83144+ local_irq_save(flags);
83145+ kaddr = kmap_atomic(page);
83146+ clear_page(kaddr);
83147+ kunmap_atomic(kaddr);
83148+ local_irq_restore(flags);
83149+}
83150+
83151 static inline void zero_user_segments(struct page *page,
83152 unsigned start1, unsigned end1,
83153 unsigned start2, unsigned end2)
83154diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
83155index 1c7b89a..7dda400 100644
83156--- a/include/linux/hwmon-sysfs.h
83157+++ b/include/linux/hwmon-sysfs.h
83158@@ -25,7 +25,8 @@
83159 struct sensor_device_attribute{
83160 struct device_attribute dev_attr;
83161 int index;
83162-};
83163+} __do_const;
83164+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
83165 #define to_sensor_dev_attr(_dev_attr) \
83166 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
83167
83168@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
83169 struct device_attribute dev_attr;
83170 u8 index;
83171 u8 nr;
83172-};
83173+} __do_const;
83174+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
83175 #define to_sensor_dev_attr_2(_dev_attr) \
83176 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
83177
83178diff --git a/include/linux/i2c.h b/include/linux/i2c.h
83179index f17da50..2f8b203 100644
83180--- a/include/linux/i2c.h
83181+++ b/include/linux/i2c.h
83182@@ -409,6 +409,7 @@ struct i2c_algorithm {
83183 int (*unreg_slave)(struct i2c_client *client);
83184 #endif
83185 };
83186+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
83187
83188 /**
83189 * struct i2c_bus_recovery_info - I2C bus recovery information
83190diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
83191index aff7ad8..3942bbd 100644
83192--- a/include/linux/if_pppox.h
83193+++ b/include/linux/if_pppox.h
83194@@ -76,7 +76,7 @@ struct pppox_proto {
83195 int (*ioctl)(struct socket *sock, unsigned int cmd,
83196 unsigned long arg);
83197 struct module *owner;
83198-};
83199+} __do_const;
83200
83201 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
83202 extern void unregister_pppox_proto(int proto_num);
83203diff --git a/include/linux/init.h b/include/linux/init.h
83204index 2df8e8d..3e1280d 100644
83205--- a/include/linux/init.h
83206+++ b/include/linux/init.h
83207@@ -37,9 +37,17 @@
83208 * section.
83209 */
83210
83211+#define add_init_latent_entropy __latent_entropy
83212+
83213+#ifdef CONFIG_MEMORY_HOTPLUG
83214+#define add_meminit_latent_entropy
83215+#else
83216+#define add_meminit_latent_entropy __latent_entropy
83217+#endif
83218+
83219 /* These are for everybody (although not all archs will actually
83220 discard it in modules) */
83221-#define __init __section(.init.text) __cold notrace
83222+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
83223 #define __initdata __section(.init.data)
83224 #define __initconst __constsection(.init.rodata)
83225 #define __exitdata __section(.exit.data)
83226@@ -100,7 +108,7 @@
83227 #define __cpuexitconst
83228
83229 /* Used for MEMORY_HOTPLUG */
83230-#define __meminit __section(.meminit.text) __cold notrace
83231+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
83232 #define __meminitdata __section(.meminit.data)
83233 #define __meminitconst __constsection(.meminit.rodata)
83234 #define __memexit __section(.memexit.text) __exitused __cold notrace
83235diff --git a/include/linux/init_task.h b/include/linux/init_task.h
83236index 696d223..6d6b39f 100644
83237--- a/include/linux/init_task.h
83238+++ b/include/linux/init_task.h
83239@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
83240
83241 #define INIT_TASK_COMM "swapper"
83242
83243+#ifdef CONFIG_X86
83244+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
83245+#else
83246+#define INIT_TASK_THREAD_INFO
83247+#endif
83248+
83249 #ifdef CONFIG_RT_MUTEXES
83250 # define INIT_RT_MUTEXES(tsk) \
83251 .pi_waiters = RB_ROOT, \
83252@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
83253 RCU_POINTER_INITIALIZER(cred, &init_cred), \
83254 .comm = INIT_TASK_COMM, \
83255 .thread = INIT_THREAD, \
83256+ INIT_TASK_THREAD_INFO \
83257 .fs = &init_fs, \
83258 .files = &init_files, \
83259 .signal = &init_signals, \
83260diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
83261index 2e88580..f6a99a0 100644
83262--- a/include/linux/interrupt.h
83263+++ b/include/linux/interrupt.h
83264@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
83265
83266 struct softirq_action
83267 {
83268- void (*action)(struct softirq_action *);
83269-};
83270+ void (*action)(void);
83271+} __no_const;
83272
83273 asmlinkage void do_softirq(void);
83274 asmlinkage void __do_softirq(void);
83275@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
83276 }
83277 #endif
83278
83279-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
83280+extern void open_softirq(int nr, void (*action)(void));
83281 extern void softirq_init(void);
83282 extern void __raise_softirq_irqoff(unsigned int nr);
83283
83284diff --git a/include/linux/iommu.h b/include/linux/iommu.h
83285index 38daa45..4de4317 100644
83286--- a/include/linux/iommu.h
83287+++ b/include/linux/iommu.h
83288@@ -147,7 +147,7 @@ struct iommu_ops {
83289
83290 unsigned long pgsize_bitmap;
83291 void *priv;
83292-};
83293+} __do_const;
83294
83295 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
83296 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
83297diff --git a/include/linux/ioport.h b/include/linux/ioport.h
83298index 2c525022..345b106 100644
83299--- a/include/linux/ioport.h
83300+++ b/include/linux/ioport.h
83301@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
83302 int adjust_resource(struct resource *res, resource_size_t start,
83303 resource_size_t size);
83304 resource_size_t resource_alignment(struct resource *res);
83305-static inline resource_size_t resource_size(const struct resource *res)
83306+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
83307 {
83308 return res->end - res->start + 1;
83309 }
83310diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
83311index 1eee6bc..9cf4912 100644
83312--- a/include/linux/ipc_namespace.h
83313+++ b/include/linux/ipc_namespace.h
83314@@ -60,7 +60,7 @@ struct ipc_namespace {
83315 struct user_namespace *user_ns;
83316
83317 struct ns_common ns;
83318-};
83319+} __randomize_layout;
83320
83321 extern struct ipc_namespace init_ipc_ns;
83322 extern atomic_t nr_ipc_ns;
83323diff --git a/include/linux/irq.h b/include/linux/irq.h
83324index d09ec7a..f373eb5 100644
83325--- a/include/linux/irq.h
83326+++ b/include/linux/irq.h
83327@@ -364,7 +364,8 @@ struct irq_chip {
83328 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
83329
83330 unsigned long flags;
83331-};
83332+} __do_const;
83333+typedef struct irq_chip __no_const irq_chip_no_const;
83334
83335 /*
83336 * irq_chip specific flags
83337diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
83338index 71d706d..817cdec 100644
83339--- a/include/linux/irqchip/arm-gic.h
83340+++ b/include/linux/irqchip/arm-gic.h
83341@@ -95,7 +95,7 @@
83342
83343 struct device_node;
83344
83345-extern struct irq_chip gic_arch_extn;
83346+extern irq_chip_no_const gic_arch_extn;
83347
83348 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
83349 u32 offset, struct device_node *);
83350diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
83351index dd1109f..4f4fdda 100644
83352--- a/include/linux/irqdesc.h
83353+++ b/include/linux/irqdesc.h
83354@@ -61,7 +61,7 @@ struct irq_desc {
83355 unsigned int irq_count; /* For detecting broken IRQs */
83356 unsigned long last_unhandled; /* Aging timer for unhandled count */
83357 unsigned int irqs_unhandled;
83358- atomic_t threads_handled;
83359+ atomic_unchecked_t threads_handled;
83360 int threads_handled_last;
83361 raw_spinlock_t lock;
83362 struct cpumask *percpu_enabled;
83363diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
83364index c367cbd..c9b79e6 100644
83365--- a/include/linux/jiffies.h
83366+++ b/include/linux/jiffies.h
83367@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
83368 /*
83369 * Convert various time units to each other:
83370 */
83371-extern unsigned int jiffies_to_msecs(const unsigned long j);
83372-extern unsigned int jiffies_to_usecs(const unsigned long j);
83373+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
83374+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
83375
83376-static inline u64 jiffies_to_nsecs(const unsigned long j)
83377+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
83378 {
83379 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
83380 }
83381
83382-extern unsigned long msecs_to_jiffies(const unsigned int m);
83383-extern unsigned long usecs_to_jiffies(const unsigned int u);
83384+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
83385+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
83386 extern unsigned long timespec_to_jiffies(const struct timespec *value);
83387 extern void jiffies_to_timespec(const unsigned long jiffies,
83388- struct timespec *value);
83389-extern unsigned long timeval_to_jiffies(const struct timeval *value);
83390+ struct timespec *value) __intentional_overflow(-1);
83391+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
83392 extern void jiffies_to_timeval(const unsigned long jiffies,
83393 struct timeval *value);
83394
83395diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
83396index 6883e19..e854fcb 100644
83397--- a/include/linux/kallsyms.h
83398+++ b/include/linux/kallsyms.h
83399@@ -15,7 +15,8 @@
83400
83401 struct module;
83402
83403-#ifdef CONFIG_KALLSYMS
83404+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
83405+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83406 /* Lookup the address for a symbol. Returns 0 if not found. */
83407 unsigned long kallsyms_lookup_name(const char *name);
83408
83409@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
83410 /* Stupid that this does nothing, but I didn't create this mess. */
83411 #define __print_symbol(fmt, addr)
83412 #endif /*CONFIG_KALLSYMS*/
83413+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
83414+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
83415+extern unsigned long kallsyms_lookup_name(const char *name);
83416+extern void __print_symbol(const char *fmt, unsigned long address);
83417+extern int sprint_backtrace(char *buffer, unsigned long address);
83418+extern int sprint_symbol(char *buffer, unsigned long address);
83419+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
83420+const char *kallsyms_lookup(unsigned long addr,
83421+ unsigned long *symbolsize,
83422+ unsigned long *offset,
83423+ char **modname, char *namebuf);
83424+extern int kallsyms_lookup_size_offset(unsigned long addr,
83425+ unsigned long *symbolsize,
83426+ unsigned long *offset);
83427+#endif
83428
83429 /* This macro allows us to keep printk typechecking */
83430 static __printf(1, 2)
83431diff --git a/include/linux/kernel.h b/include/linux/kernel.h
83432index d6d630d..feea1f5 100644
83433--- a/include/linux/kernel.h
83434+++ b/include/linux/kernel.h
83435@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
83436 /* Obsolete, do not use. Use kstrto<foo> instead */
83437
83438 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
83439-extern long simple_strtol(const char *,char **,unsigned int);
83440+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
83441 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
83442 extern long long simple_strtoll(const char *,char **,unsigned int);
83443
83444diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83445index ff9f1d3..6712be5 100644
83446--- a/include/linux/key-type.h
83447+++ b/include/linux/key-type.h
83448@@ -152,7 +152,7 @@ struct key_type {
83449 /* internal fields */
83450 struct list_head link; /* link in types list */
83451 struct lock_class_key lock_class; /* key->sem lock class */
83452-};
83453+} __do_const;
83454
83455 extern struct key_type key_type_keyring;
83456
83457diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83458index e465bb1..19f605fd 100644
83459--- a/include/linux/kgdb.h
83460+++ b/include/linux/kgdb.h
83461@@ -52,7 +52,7 @@ extern int kgdb_connected;
83462 extern int kgdb_io_module_registered;
83463
83464 extern atomic_t kgdb_setting_breakpoint;
83465-extern atomic_t kgdb_cpu_doing_single_step;
83466+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83467
83468 extern struct task_struct *kgdb_usethread;
83469 extern struct task_struct *kgdb_contthread;
83470@@ -254,7 +254,7 @@ struct kgdb_arch {
83471 void (*correct_hw_break)(void);
83472
83473 void (*enable_nmi)(bool on);
83474-};
83475+} __do_const;
83476
83477 /**
83478 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83479@@ -279,7 +279,7 @@ struct kgdb_io {
83480 void (*pre_exception) (void);
83481 void (*post_exception) (void);
83482 int is_console;
83483-};
83484+} __do_const;
83485
83486 extern struct kgdb_arch arch_kgdb_ops;
83487
83488diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
83489index e705467..a92471d 100644
83490--- a/include/linux/kmemleak.h
83491+++ b/include/linux/kmemleak.h
83492@@ -27,7 +27,7 @@
83493
83494 extern void kmemleak_init(void) __ref;
83495 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83496- gfp_t gfp) __ref;
83497+ gfp_t gfp) __ref __size_overflow(2);
83498 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
83499 extern void kmemleak_free(const void *ptr) __ref;
83500 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
83501@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
83502 static inline void kmemleak_init(void)
83503 {
83504 }
83505-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83506+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
83507 gfp_t gfp)
83508 {
83509 }
83510diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83511index 0555cc6..40116ce 100644
83512--- a/include/linux/kmod.h
83513+++ b/include/linux/kmod.h
83514@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83515 * usually useless though. */
83516 extern __printf(2, 3)
83517 int __request_module(bool wait, const char *name, ...);
83518+extern __printf(3, 4)
83519+int ___request_module(bool wait, char *param_name, const char *name, ...);
83520 #define request_module(mod...) __request_module(true, mod)
83521 #define request_module_nowait(mod...) __request_module(false, mod)
83522 #define try_then_request_module(x, mod...) \
83523@@ -57,6 +59,9 @@ struct subprocess_info {
83524 struct work_struct work;
83525 struct completion *complete;
83526 char *path;
83527+#ifdef CONFIG_GRKERNSEC
83528+ char *origpath;
83529+#endif
83530 char **argv;
83531 char **envp;
83532 int wait;
83533diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83534index 2d61b90..a1d0a13 100644
83535--- a/include/linux/kobject.h
83536+++ b/include/linux/kobject.h
83537@@ -118,7 +118,7 @@ struct kobj_type {
83538 struct attribute **default_attrs;
83539 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83540 const void *(*namespace)(struct kobject *kobj);
83541-};
83542+} __do_const;
83543
83544 struct kobj_uevent_env {
83545 char *argv[3];
83546@@ -142,6 +142,7 @@ struct kobj_attribute {
83547 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83548 const char *buf, size_t count);
83549 };
83550+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83551
83552 extern const struct sysfs_ops kobj_sysfs_ops;
83553
83554@@ -169,7 +170,7 @@ struct kset {
83555 spinlock_t list_lock;
83556 struct kobject kobj;
83557 const struct kset_uevent_ops *uevent_ops;
83558-};
83559+} __randomize_layout;
83560
83561 extern void kset_init(struct kset *kset);
83562 extern int __must_check kset_register(struct kset *kset);
83563diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83564index df32d25..fb52e27 100644
83565--- a/include/linux/kobject_ns.h
83566+++ b/include/linux/kobject_ns.h
83567@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83568 const void *(*netlink_ns)(struct sock *sk);
83569 const void *(*initial_ns)(void);
83570 void (*drop_ns)(void *);
83571-};
83572+} __do_const;
83573
83574 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83575 int kobj_ns_type_registered(enum kobj_ns_type type);
83576diff --git a/include/linux/kref.h b/include/linux/kref.h
83577index 484604d..0f6c5b6 100644
83578--- a/include/linux/kref.h
83579+++ b/include/linux/kref.h
83580@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83581 static inline int kref_sub(struct kref *kref, unsigned int count,
83582 void (*release)(struct kref *kref))
83583 {
83584- WARN_ON(release == NULL);
83585+ BUG_ON(release == NULL);
83586
83587 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83588 release(kref);
83589diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83590index d12b210..d91fd76 100644
83591--- a/include/linux/kvm_host.h
83592+++ b/include/linux/kvm_host.h
83593@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
83594 {
83595 }
83596 #endif
83597-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83598+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83599 struct module *module);
83600 void kvm_exit(void);
83601
83602@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83603 struct kvm_guest_debug *dbg);
83604 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83605
83606-int kvm_arch_init(void *opaque);
83607+int kvm_arch_init(const void *opaque);
83608 void kvm_arch_exit(void);
83609
83610 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83611diff --git a/include/linux/libata.h b/include/linux/libata.h
83612index 6b08cc1..248c5e9 100644
83613--- a/include/linux/libata.h
83614+++ b/include/linux/libata.h
83615@@ -980,7 +980,7 @@ struct ata_port_operations {
83616 * fields must be pointers.
83617 */
83618 const struct ata_port_operations *inherits;
83619-};
83620+} __do_const;
83621
83622 struct ata_port_info {
83623 unsigned long flags;
83624diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83625index a6a42dd..6c5ebce 100644
83626--- a/include/linux/linkage.h
83627+++ b/include/linux/linkage.h
83628@@ -36,6 +36,7 @@
83629 #endif
83630
83631 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83632+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83633 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83634
83635 /*
83636diff --git a/include/linux/list.h b/include/linux/list.h
83637index feb773c..98f3075 100644
83638--- a/include/linux/list.h
83639+++ b/include/linux/list.h
83640@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83641 extern void list_del(struct list_head *entry);
83642 #endif
83643
83644+extern void __pax_list_add(struct list_head *new,
83645+ struct list_head *prev,
83646+ struct list_head *next);
83647+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83648+{
83649+ __pax_list_add(new, head, head->next);
83650+}
83651+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83652+{
83653+ __pax_list_add(new, head->prev, head);
83654+}
83655+extern void pax_list_del(struct list_head *entry);
83656+
83657 /**
83658 * list_replace - replace old entry by new one
83659 * @old : the element to be replaced
83660@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83661 INIT_LIST_HEAD(entry);
83662 }
83663
83664+extern void pax_list_del_init(struct list_head *entry);
83665+
83666 /**
83667 * list_move - delete from one list and add as another's head
83668 * @list: the entry to move
83669diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83670index b10b122..d37b3de 100644
83671--- a/include/linux/lockref.h
83672+++ b/include/linux/lockref.h
83673@@ -28,7 +28,7 @@ struct lockref {
83674 #endif
83675 struct {
83676 spinlock_t lock;
83677- int count;
83678+ atomic_t count;
83679 };
83680 };
83681 };
83682@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
83683 extern int lockref_get_not_dead(struct lockref *);
83684
83685 /* Must be called under spinlock for reliable results */
83686-static inline int __lockref_is_dead(const struct lockref *l)
83687+static inline int __lockref_is_dead(const struct lockref *lockref)
83688 {
83689- return ((int)l->count < 0);
83690+ return atomic_read(&lockref->count) < 0;
83691+}
83692+
83693+static inline int __lockref_read(const struct lockref *lockref)
83694+{
83695+ return atomic_read(&lockref->count);
83696+}
83697+
83698+static inline void __lockref_set(struct lockref *lockref, int count)
83699+{
83700+ atomic_set(&lockref->count, count);
83701+}
83702+
83703+static inline void __lockref_inc(struct lockref *lockref)
83704+{
83705+ atomic_inc(&lockref->count);
83706+}
83707+
83708+static inline void __lockref_dec(struct lockref *lockref)
83709+{
83710+ atomic_dec(&lockref->count);
83711 }
83712
83713 #endif /* __LINUX_LOCKREF_H */
83714diff --git a/include/linux/math64.h b/include/linux/math64.h
83715index c45c089..298841c 100644
83716--- a/include/linux/math64.h
83717+++ b/include/linux/math64.h
83718@@ -15,7 +15,7 @@
83719 * This is commonly provided by 32bit archs to provide an optimized 64bit
83720 * divide.
83721 */
83722-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83723+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83724 {
83725 *remainder = dividend % divisor;
83726 return dividend / divisor;
83727@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83728 /**
83729 * div64_u64 - unsigned 64bit divide with 64bit divisor
83730 */
83731-static inline u64 div64_u64(u64 dividend, u64 divisor)
83732+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83733 {
83734 return dividend / divisor;
83735 }
83736@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83737 #define div64_ul(x, y) div_u64((x), (y))
83738
83739 #ifndef div_u64_rem
83740-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83741+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83742 {
83743 *remainder = do_div(dividend, divisor);
83744 return dividend;
83745@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83746 #endif
83747
83748 #ifndef div64_u64
83749-extern u64 div64_u64(u64 dividend, u64 divisor);
83750+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83751 #endif
83752
83753 #ifndef div64_s64
83754@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83755 * divide.
83756 */
83757 #ifndef div_u64
83758-static inline u64 div_u64(u64 dividend, u32 divisor)
83759+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83760 {
83761 u32 remainder;
83762 return div_u64_rem(dividend, divisor, &remainder);
83763diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83764index 3d385c8..deacb6a 100644
83765--- a/include/linux/mempolicy.h
83766+++ b/include/linux/mempolicy.h
83767@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83768 }
83769
83770 #define vma_policy(vma) ((vma)->vm_policy)
83771+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83772+{
83773+ vma->vm_policy = pol;
83774+}
83775
83776 static inline void mpol_get(struct mempolicy *pol)
83777 {
83778@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83779 }
83780
83781 #define vma_policy(vma) NULL
83782+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83783+{
83784+}
83785
83786 static inline int
83787 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83788diff --git a/include/linux/mm.h b/include/linux/mm.h
83789index 47a9392..ef645bc 100644
83790--- a/include/linux/mm.h
83791+++ b/include/linux/mm.h
83792@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83793
83794 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83795 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83796+
83797+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83798+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83799+#endif
83800+
83801 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83802 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83803 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83804@@ -254,8 +259,8 @@ struct vm_operations_struct {
83805 /* called by access_process_vm when get_user_pages() fails, typically
83806 * for use by special VMAs that can switch between memory and hardware
83807 */
83808- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83809- void *buf, int len, int write);
83810+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83811+ void *buf, size_t len, int write);
83812
83813 /* Called by the /proc/PID/maps code to ask the vma whether it
83814 * has a special name. Returning non-NULL will also cause this
83815@@ -293,6 +298,7 @@ struct vm_operations_struct {
83816 struct page *(*find_special_page)(struct vm_area_struct *vma,
83817 unsigned long addr);
83818 };
83819+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83820
83821 struct mmu_gather;
83822 struct inode;
83823@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83824 unsigned long *pfn);
83825 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83826 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83827-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83828- void *buf, int len, int write);
83829+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83830+ void *buf, size_t len, int write);
83831
83832 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83833 loff_t const holebegin, loff_t const holelen)
83834@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83835 }
83836 #endif
83837
83838-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83839-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83840- void *buf, int len, int write);
83841+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83842+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83843+ void *buf, size_t len, int write);
83844
83845 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83846 unsigned long start, unsigned long nr_pages,
83847@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
83848 int clear_page_dirty_for_io(struct page *page);
83849 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83850
83851-/* Is the vma a continuation of the stack vma above it? */
83852-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83853-{
83854- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83855-}
83856-
83857-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83858- unsigned long addr)
83859-{
83860- return (vma->vm_flags & VM_GROWSDOWN) &&
83861- (vma->vm_start == addr) &&
83862- !vma_growsdown(vma->vm_prev, addr);
83863-}
83864-
83865-/* Is the vma a continuation of the stack vma below it? */
83866-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83867-{
83868- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83869-}
83870-
83871-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83872- unsigned long addr)
83873-{
83874- return (vma->vm_flags & VM_GROWSUP) &&
83875- (vma->vm_end == addr) &&
83876- !vma_growsup(vma->vm_next, addr);
83877-}
83878-
83879 extern struct task_struct *task_of_stack(struct task_struct *task,
83880 struct vm_area_struct *vma, bool in_group);
83881
83882@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83883 {
83884 return 0;
83885 }
83886+
83887+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83888+ unsigned long address)
83889+{
83890+ return 0;
83891+}
83892 #else
83893 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83894+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83895 #endif
83896
83897 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
83898@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83899 return 0;
83900 }
83901
83902+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83903+ unsigned long address)
83904+{
83905+ return 0;
83906+}
83907+
83908 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
83909
83910 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
83911@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
83912
83913 #else
83914 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83915+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83916
83917 static inline void mm_nr_pmds_init(struct mm_struct *mm)
83918 {
83919@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83920 NULL: pud_offset(pgd, address);
83921 }
83922
83923+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83924+{
83925+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83926+ NULL: pud_offset(pgd, address);
83927+}
83928+
83929 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83930 {
83931 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83932 NULL: pmd_offset(pud, address);
83933 }
83934+
83935+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83936+{
83937+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83938+ NULL: pmd_offset(pud, address);
83939+}
83940 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83941
83942 #if USE_SPLIT_PTE_PTLOCKS
83943@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83944 bool *need_rmap_locks);
83945 extern void exit_mmap(struct mm_struct *);
83946
83947+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83948+extern void gr_learn_resource(const struct task_struct *task, const int res,
83949+ const unsigned long wanted, const int gt);
83950+#else
83951+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83952+ const unsigned long wanted, const int gt)
83953+{
83954+}
83955+#endif
83956+
83957 static inline int check_data_rlimit(unsigned long rlim,
83958 unsigned long new,
83959 unsigned long start,
83960 unsigned long end_data,
83961 unsigned long start_data)
83962 {
83963+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83964 if (rlim < RLIM_INFINITY) {
83965 if (((new - start) + (end_data - start_data)) > rlim)
83966 return -ENOSPC;
83967@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83968 unsigned long addr, unsigned long len,
83969 unsigned long flags, struct page **pages);
83970
83971-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83972+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83973
83974 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83975 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83976@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83977 unsigned long len, unsigned long prot, unsigned long flags,
83978 unsigned long pgoff, unsigned long *populate);
83979 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83980+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83981
83982 #ifdef CONFIG_MMU
83983 extern int __mm_populate(unsigned long addr, unsigned long len,
83984@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
83985 unsigned long high_limit;
83986 unsigned long align_mask;
83987 unsigned long align_offset;
83988+ unsigned long threadstack_offset;
83989 };
83990
83991-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83992-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83993+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83994+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83995
83996 /*
83997 * Search for an unmapped address range.
83998@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83999 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
84000 */
84001 static inline unsigned long
84002-vm_unmapped_area(struct vm_unmapped_area_info *info)
84003+vm_unmapped_area(const struct vm_unmapped_area_info *info)
84004 {
84005 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
84006 return unmapped_area(info);
84007@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
84008 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
84009 struct vm_area_struct **pprev);
84010
84011+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
84012+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
84013+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
84014+
84015 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
84016 NULL if none. Assume start_addr < end_addr. */
84017 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84018@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84019 }
84020
84021 #ifdef CONFIG_MMU
84022-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84023+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84024 void vma_set_page_prot(struct vm_area_struct *vma);
84025 #else
84026-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84027+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84028 {
84029 return __pgprot(0);
84030 }
84031@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84032 static inline void vm_stat_account(struct mm_struct *mm,
84033 unsigned long flags, struct file *file, long pages)
84034 {
84035+
84036+#ifdef CONFIG_PAX_RANDMMAP
84037+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
84038+#endif
84039+
84040 mm->total_vm += pages;
84041 }
84042 #endif /* CONFIG_PROC_FS */
84043@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
84044 extern int sysctl_memory_failure_early_kill;
84045 extern int sysctl_memory_failure_recovery;
84046 extern void shake_page(struct page *p, int access);
84047-extern atomic_long_t num_poisoned_pages;
84048+extern atomic_long_unchecked_t num_poisoned_pages;
84049 extern int soft_offline_page(struct page *page, int flags);
84050
84051 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
84052@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
84053 static inline void setup_nr_node_ids(void) {}
84054 #endif
84055
84056+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84057+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
84058+#else
84059+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
84060+#endif
84061+
84062 #endif /* __KERNEL__ */
84063 #endif /* _LINUX_MM_H */
84064diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
84065index 199a03a..7328440 100644
84066--- a/include/linux/mm_types.h
84067+++ b/include/linux/mm_types.h
84068@@ -313,7 +313,9 @@ struct vm_area_struct {
84069 #ifdef CONFIG_NUMA
84070 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
84071 #endif
84072-};
84073+
84074+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
84075+} __randomize_layout;
84076
84077 struct core_thread {
84078 struct task_struct *task;
84079@@ -464,7 +466,25 @@ struct mm_struct {
84080 /* address of the bounds directory */
84081 void __user *bd_addr;
84082 #endif
84083-};
84084+
84085+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84086+ unsigned long pax_flags;
84087+#endif
84088+
84089+#ifdef CONFIG_PAX_DLRESOLVE
84090+ unsigned long call_dl_resolve;
84091+#endif
84092+
84093+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
84094+ unsigned long call_syscall;
84095+#endif
84096+
84097+#ifdef CONFIG_PAX_ASLR
84098+ unsigned long delta_mmap; /* randomized offset */
84099+ unsigned long delta_stack; /* randomized offset */
84100+#endif
84101+
84102+} __randomize_layout;
84103
84104 static inline void mm_init_cpumask(struct mm_struct *mm)
84105 {
84106diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
84107index 160448f..7b332b7 100644
84108--- a/include/linux/mmc/core.h
84109+++ b/include/linux/mmc/core.h
84110@@ -79,7 +79,7 @@ struct mmc_command {
84111 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
84112
84113 unsigned int retries; /* max number of retries */
84114- unsigned int error; /* command error */
84115+ int error; /* command error */
84116
84117 /*
84118 * Standard errno values are used for errors, but some have specific
84119diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
84120index c5d5278..f0b68c8 100644
84121--- a/include/linux/mmiotrace.h
84122+++ b/include/linux/mmiotrace.h
84123@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
84124 /* Called from ioremap.c */
84125 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
84126 void __iomem *addr);
84127-extern void mmiotrace_iounmap(volatile void __iomem *addr);
84128+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
84129
84130 /* For anyone to insert markers. Remember trailing newline. */
84131 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
84132@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
84133 {
84134 }
84135
84136-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
84137+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
84138 {
84139 }
84140
84141diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
84142index 2782df4..abe756e 100644
84143--- a/include/linux/mmzone.h
84144+++ b/include/linux/mmzone.h
84145@@ -526,7 +526,7 @@ struct zone {
84146
84147 ZONE_PADDING(_pad3_)
84148 /* Zone statistics */
84149- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84150+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84151 } ____cacheline_internodealigned_in_smp;
84152
84153 enum zone_flags {
84154diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
84155index e530533..c9620c7 100644
84156--- a/include/linux/mod_devicetable.h
84157+++ b/include/linux/mod_devicetable.h
84158@@ -139,7 +139,7 @@ struct usb_device_id {
84159 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
84160 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
84161
84162-#define HID_ANY_ID (~0)
84163+#define HID_ANY_ID (~0U)
84164 #define HID_BUS_ANY 0xffff
84165 #define HID_GROUP_ANY 0x0000
84166
84167@@ -470,7 +470,7 @@ struct dmi_system_id {
84168 const char *ident;
84169 struct dmi_strmatch matches[4];
84170 void *driver_data;
84171-};
84172+} __do_const;
84173 /*
84174 * struct dmi_device_id appears during expansion of
84175 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
84176diff --git a/include/linux/module.h b/include/linux/module.h
84177index b03485b..a26974f 100644
84178--- a/include/linux/module.h
84179+++ b/include/linux/module.h
84180@@ -17,9 +17,11 @@
84181 #include <linux/moduleparam.h>
84182 #include <linux/jump_label.h>
84183 #include <linux/export.h>
84184+#include <linux/fs.h>
84185
84186 #include <linux/percpu.h>
84187 #include <asm/module.h>
84188+#include <asm/pgtable.h>
84189
84190 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
84191 #define MODULE_SIG_STRING "~Module signature appended~\n"
84192@@ -42,7 +44,7 @@ struct module_kobject {
84193 struct kobject *drivers_dir;
84194 struct module_param_attrs *mp;
84195 struct completion *kobj_completion;
84196-};
84197+} __randomize_layout;
84198
84199 struct module_attribute {
84200 struct attribute attr;
84201@@ -54,12 +56,13 @@ struct module_attribute {
84202 int (*test)(struct module *);
84203 void (*free)(struct module *);
84204 };
84205+typedef struct module_attribute __no_const module_attribute_no_const;
84206
84207 struct module_version_attribute {
84208 struct module_attribute mattr;
84209 const char *module_name;
84210 const char *version;
84211-} __attribute__ ((__aligned__(sizeof(void *))));
84212+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
84213
84214 extern ssize_t __modver_version_show(struct module_attribute *,
84215 struct module_kobject *, char *);
84216@@ -221,7 +224,7 @@ struct module {
84217
84218 /* Sysfs stuff. */
84219 struct module_kobject mkobj;
84220- struct module_attribute *modinfo_attrs;
84221+ module_attribute_no_const *modinfo_attrs;
84222 const char *version;
84223 const char *srcversion;
84224 struct kobject *holders_dir;
84225@@ -270,19 +273,16 @@ struct module {
84226 int (*init)(void);
84227
84228 /* If this is non-NULL, vfree after init() returns */
84229- void *module_init;
84230+ void *module_init_rx, *module_init_rw;
84231
84232 /* Here is the actual code + data, vfree'd on unload. */
84233- void *module_core;
84234+ void *module_core_rx, *module_core_rw;
84235
84236 /* Here are the sizes of the init and core sections */
84237- unsigned int init_size, core_size;
84238+ unsigned int init_size_rw, core_size_rw;
84239
84240 /* The size of the executable code in each section. */
84241- unsigned int init_text_size, core_text_size;
84242-
84243- /* Size of RO sections of the module (text+rodata) */
84244- unsigned int init_ro_size, core_ro_size;
84245+ unsigned int init_size_rx, core_size_rx;
84246
84247 /* Arch-specific module values */
84248 struct mod_arch_specific arch;
84249@@ -338,6 +338,10 @@ struct module {
84250 #ifdef CONFIG_EVENT_TRACING
84251 struct ftrace_event_call **trace_events;
84252 unsigned int num_trace_events;
84253+ struct file_operations trace_id;
84254+ struct file_operations trace_enable;
84255+ struct file_operations trace_format;
84256+ struct file_operations trace_filter;
84257 #endif
84258 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
84259 unsigned int num_ftrace_callsites;
84260@@ -365,7 +369,7 @@ struct module {
84261 ctor_fn_t *ctors;
84262 unsigned int num_ctors;
84263 #endif
84264-};
84265+} __randomize_layout;
84266 #ifndef MODULE_ARCH_INIT
84267 #define MODULE_ARCH_INIT {}
84268 #endif
84269@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
84270 bool is_module_percpu_address(unsigned long addr);
84271 bool is_module_text_address(unsigned long addr);
84272
84273+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
84274+{
84275+
84276+#ifdef CONFIG_PAX_KERNEXEC
84277+ if (ktla_ktva(addr) >= (unsigned long)start &&
84278+ ktla_ktva(addr) < (unsigned long)start + size)
84279+ return 1;
84280+#endif
84281+
84282+ return ((void *)addr >= start && (void *)addr < start + size);
84283+}
84284+
84285+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
84286+{
84287+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
84288+}
84289+
84290+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
84291+{
84292+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
84293+}
84294+
84295+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
84296+{
84297+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
84298+}
84299+
84300+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
84301+{
84302+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
84303+}
84304+
84305 static inline bool within_module_core(unsigned long addr,
84306 const struct module *mod)
84307 {
84308- return (unsigned long)mod->module_core <= addr &&
84309- addr < (unsigned long)mod->module_core + mod->core_size;
84310+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
84311 }
84312
84313 static inline bool within_module_init(unsigned long addr,
84314 const struct module *mod)
84315 {
84316- return (unsigned long)mod->module_init <= addr &&
84317- addr < (unsigned long)mod->module_init + mod->init_size;
84318+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
84319 }
84320
84321 static inline bool within_module(unsigned long addr, const struct module *mod)
84322diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
84323index 4d0cb9b..3169ac7 100644
84324--- a/include/linux/moduleloader.h
84325+++ b/include/linux/moduleloader.h
84326@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
84327 sections. Returns NULL on failure. */
84328 void *module_alloc(unsigned long size);
84329
84330+#ifdef CONFIG_PAX_KERNEXEC
84331+void *module_alloc_exec(unsigned long size);
84332+#else
84333+#define module_alloc_exec(x) module_alloc(x)
84334+#endif
84335+
84336 /* Free memory returned from module_alloc. */
84337 void module_memfree(void *module_region);
84338
84339+#ifdef CONFIG_PAX_KERNEXEC
84340+void module_memfree_exec(void *module_region);
84341+#else
84342+#define module_memfree_exec(x) module_memfree((x))
84343+#endif
84344+
84345 /*
84346 * Apply the given relocation to the (simplified) ELF. Return -error
84347 * or 0.
84348@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
84349 unsigned int relsec,
84350 struct module *me)
84351 {
84352+#ifdef CONFIG_MODULES
84353 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84354 module_name(me));
84355+#endif
84356 return -ENOEXEC;
84357 }
84358 #endif
84359@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
84360 unsigned int relsec,
84361 struct module *me)
84362 {
84363+#ifdef CONFIG_MODULES
84364 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84365 module_name(me));
84366+#endif
84367 return -ENOEXEC;
84368 }
84369 #endif
84370diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
84371index 1c9effa..1160bdd 100644
84372--- a/include/linux/moduleparam.h
84373+++ b/include/linux/moduleparam.h
84374@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
84375 * @len is usually just sizeof(string).
84376 */
84377 #define module_param_string(name, string, len, perm) \
84378- static const struct kparam_string __param_string_##name \
84379+ static const struct kparam_string __param_string_##name __used \
84380 = { len, string }; \
84381 __module_param_call(MODULE_PARAM_PREFIX, name, \
84382 &param_ops_string, \
84383@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
84384 */
84385 #define module_param_array_named(name, array, type, nump, perm) \
84386 param_check_##type(name, &(array)[0]); \
84387- static const struct kparam_array __param_arr_##name \
84388+ static const struct kparam_array __param_arr_##name __used \
84389 = { .max = ARRAY_SIZE(array), .num = nump, \
84390 .ops = &param_ops_##type, \
84391 .elemsize = sizeof(array[0]), .elem = array }; \
84392diff --git a/include/linux/mount.h b/include/linux/mount.h
84393index c2c561d..a5f2a8c 100644
84394--- a/include/linux/mount.h
84395+++ b/include/linux/mount.h
84396@@ -66,7 +66,7 @@ struct vfsmount {
84397 struct dentry *mnt_root; /* root of the mounted tree */
84398 struct super_block *mnt_sb; /* pointer to superblock */
84399 int mnt_flags;
84400-};
84401+} __randomize_layout;
84402
84403 struct file; /* forward dec */
84404 struct path;
84405diff --git a/include/linux/namei.h b/include/linux/namei.h
84406index c899077..b9a2010 100644
84407--- a/include/linux/namei.h
84408+++ b/include/linux/namei.h
84409@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
84410 extern void unlock_rename(struct dentry *, struct dentry *);
84411
84412 extern void nd_jump_link(struct nameidata *nd, struct path *path);
84413-extern void nd_set_link(struct nameidata *nd, char *path);
84414-extern char *nd_get_link(struct nameidata *nd);
84415+extern void nd_set_link(struct nameidata *nd, const char *path);
84416+extern const char *nd_get_link(const struct nameidata *nd);
84417
84418 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
84419 {
84420diff --git a/include/linux/net.h b/include/linux/net.h
84421index 17d8339..81656c0 100644
84422--- a/include/linux/net.h
84423+++ b/include/linux/net.h
84424@@ -192,7 +192,7 @@ struct net_proto_family {
84425 int (*create)(struct net *net, struct socket *sock,
84426 int protocol, int kern);
84427 struct module *owner;
84428-};
84429+} __do_const;
84430
84431 struct iovec;
84432 struct kvec;
84433diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
84434index 2787388..1dd8e88 100644
84435--- a/include/linux/netdevice.h
84436+++ b/include/linux/netdevice.h
84437@@ -1198,6 +1198,7 @@ struct net_device_ops {
84438 u8 state);
84439 #endif
84440 };
84441+typedef struct net_device_ops __no_const net_device_ops_no_const;
84442
84443 /**
84444 * enum net_device_priv_flags - &struct net_device priv_flags
84445@@ -1546,10 +1547,10 @@ struct net_device {
84446
84447 struct net_device_stats stats;
84448
84449- atomic_long_t rx_dropped;
84450- atomic_long_t tx_dropped;
84451+ atomic_long_unchecked_t rx_dropped;
84452+ atomic_long_unchecked_t tx_dropped;
84453
84454- atomic_t carrier_changes;
84455+ atomic_unchecked_t carrier_changes;
84456
84457 #ifdef CONFIG_WIRELESS_EXT
84458 const struct iw_handler_def * wireless_handlers;
84459diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
84460index 2517ece..0bbfcfb 100644
84461--- a/include/linux/netfilter.h
84462+++ b/include/linux/netfilter.h
84463@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
84464 #endif
84465 /* Use the module struct to lock set/get code in place */
84466 struct module *owner;
84467-};
84468+} __do_const;
84469
84470 /* Function to register/unregister hook points. */
84471 int nf_register_hook(struct nf_hook_ops *reg);
84472diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84473index e955d47..04a5338 100644
84474--- a/include/linux/netfilter/nfnetlink.h
84475+++ b/include/linux/netfilter/nfnetlink.h
84476@@ -19,7 +19,7 @@ struct nfnl_callback {
84477 const struct nlattr * const cda[]);
84478 const struct nla_policy *policy; /* netlink attribute policy */
84479 const u_int16_t attr_count; /* number of nlattr's */
84480-};
84481+} __do_const;
84482
84483 struct nfnetlink_subsystem {
84484 const char *name;
84485diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84486new file mode 100644
84487index 0000000..33f4af8
84488--- /dev/null
84489+++ b/include/linux/netfilter/xt_gradm.h
84490@@ -0,0 +1,9 @@
84491+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84492+#define _LINUX_NETFILTER_XT_GRADM_H 1
84493+
84494+struct xt_gradm_mtinfo {
84495+ __u16 flags;
84496+ __u16 invflags;
84497+};
84498+
84499+#endif
84500diff --git a/include/linux/nls.h b/include/linux/nls.h
84501index 520681b..2b7fabb 100644
84502--- a/include/linux/nls.h
84503+++ b/include/linux/nls.h
84504@@ -31,7 +31,7 @@ struct nls_table {
84505 const unsigned char *charset2upper;
84506 struct module *owner;
84507 struct nls_table *next;
84508-};
84509+} __do_const;
84510
84511 /* this value hold the maximum octet of charset */
84512 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84513@@ -46,7 +46,7 @@ enum utf16_endian {
84514 /* nls_base.c */
84515 extern int __register_nls(struct nls_table *, struct module *);
84516 extern int unregister_nls(struct nls_table *);
84517-extern struct nls_table *load_nls(char *);
84518+extern struct nls_table *load_nls(const char *);
84519 extern void unload_nls(struct nls_table *);
84520 extern struct nls_table *load_nls_default(void);
84521 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84522diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84523index d14a4c3..a078786 100644
84524--- a/include/linux/notifier.h
84525+++ b/include/linux/notifier.h
84526@@ -54,7 +54,8 @@ struct notifier_block {
84527 notifier_fn_t notifier_call;
84528 struct notifier_block __rcu *next;
84529 int priority;
84530-};
84531+} __do_const;
84532+typedef struct notifier_block __no_const notifier_block_no_const;
84533
84534 struct atomic_notifier_head {
84535 spinlock_t lock;
84536diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84537index b2a0f15..4d7da32 100644
84538--- a/include/linux/oprofile.h
84539+++ b/include/linux/oprofile.h
84540@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84541 int oprofilefs_create_ro_ulong(struct dentry * root,
84542 char const * name, ulong * val);
84543
84544-/** Create a file for read-only access to an atomic_t. */
84545+/** Create a file for read-only access to an atomic_unchecked_t. */
84546 int oprofilefs_create_ro_atomic(struct dentry * root,
84547- char const * name, atomic_t * val);
84548+ char const * name, atomic_unchecked_t * val);
84549
84550 /** create a directory */
84551 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84552diff --git a/include/linux/padata.h b/include/linux/padata.h
84553index 4386946..f50c615 100644
84554--- a/include/linux/padata.h
84555+++ b/include/linux/padata.h
84556@@ -129,7 +129,7 @@ struct parallel_data {
84557 struct padata_serial_queue __percpu *squeue;
84558 atomic_t reorder_objects;
84559 atomic_t refcnt;
84560- atomic_t seq_nr;
84561+ atomic_unchecked_t seq_nr;
84562 struct padata_cpumask cpumask;
84563 spinlock_t lock ____cacheline_aligned;
84564 unsigned int processed;
84565diff --git a/include/linux/path.h b/include/linux/path.h
84566index d137218..be0c176 100644
84567--- a/include/linux/path.h
84568+++ b/include/linux/path.h
84569@@ -1,13 +1,15 @@
84570 #ifndef _LINUX_PATH_H
84571 #define _LINUX_PATH_H
84572
84573+#include <linux/compiler.h>
84574+
84575 struct dentry;
84576 struct vfsmount;
84577
84578 struct path {
84579 struct vfsmount *mnt;
84580 struct dentry *dentry;
84581-};
84582+} __randomize_layout;
84583
84584 extern void path_get(const struct path *);
84585 extern void path_put(const struct path *);
84586diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84587index 8c78950..0d74ed9 100644
84588--- a/include/linux/pci_hotplug.h
84589+++ b/include/linux/pci_hotplug.h
84590@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84591 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84592 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84593 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84594-};
84595+} __do_const;
84596+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84597
84598 /**
84599 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84600diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84601index caebf2a..4c3ae9d 100644
84602--- a/include/linux/percpu.h
84603+++ b/include/linux/percpu.h
84604@@ -34,7 +34,7 @@
84605 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84606 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84607 */
84608-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84609+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84610 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84611
84612 /*
84613diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84614index 2b62198..2b74233 100644
84615--- a/include/linux/perf_event.h
84616+++ b/include/linux/perf_event.h
84617@@ -343,8 +343,8 @@ struct perf_event {
84618
84619 enum perf_event_active_state state;
84620 unsigned int attach_state;
84621- local64_t count;
84622- atomic64_t child_count;
84623+ local64_t count; /* PaX: fix it one day */
84624+ atomic64_unchecked_t child_count;
84625
84626 /*
84627 * These are the total time in nanoseconds that the event
84628@@ -395,8 +395,8 @@ struct perf_event {
84629 * These accumulate total time (in nanoseconds) that children
84630 * events have been enabled and running, respectively.
84631 */
84632- atomic64_t child_total_time_enabled;
84633- atomic64_t child_total_time_running;
84634+ atomic64_unchecked_t child_total_time_enabled;
84635+ atomic64_unchecked_t child_total_time_running;
84636
84637 /*
84638 * Protect attach/detach and child_list:
84639@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84640 entry->ip[entry->nr++] = ip;
84641 }
84642
84643-extern int sysctl_perf_event_paranoid;
84644+extern int sysctl_perf_event_legitimately_concerned;
84645 extern int sysctl_perf_event_mlock;
84646 extern int sysctl_perf_event_sample_rate;
84647 extern int sysctl_perf_cpu_time_max_percent;
84648@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84649 loff_t *ppos);
84650
84651
84652+static inline bool perf_paranoid_any(void)
84653+{
84654+ return sysctl_perf_event_legitimately_concerned > 2;
84655+}
84656+
84657 static inline bool perf_paranoid_tracepoint_raw(void)
84658 {
84659- return sysctl_perf_event_paranoid > -1;
84660+ return sysctl_perf_event_legitimately_concerned > -1;
84661 }
84662
84663 static inline bool perf_paranoid_cpu(void)
84664 {
84665- return sysctl_perf_event_paranoid > 0;
84666+ return sysctl_perf_event_legitimately_concerned > 0;
84667 }
84668
84669 static inline bool perf_paranoid_kernel(void)
84670 {
84671- return sysctl_perf_event_paranoid > 1;
84672+ return sysctl_perf_event_legitimately_concerned > 1;
84673 }
84674
84675 extern void perf_event_init(void);
84676@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
84677 struct device_attribute attr;
84678 u64 id;
84679 const char *event_str;
84680-};
84681+} __do_const;
84682
84683 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
84684 char *page);
84685diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84686index 918b117..7af374b7 100644
84687--- a/include/linux/pid_namespace.h
84688+++ b/include/linux/pid_namespace.h
84689@@ -45,7 +45,7 @@ struct pid_namespace {
84690 int hide_pid;
84691 int reboot; /* group exit code if this pidns was rebooted */
84692 struct ns_common ns;
84693-};
84694+} __randomize_layout;
84695
84696 extern struct pid_namespace init_pid_ns;
84697
84698diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84699index eb8b8ac..62649e1 100644
84700--- a/include/linux/pipe_fs_i.h
84701+++ b/include/linux/pipe_fs_i.h
84702@@ -47,10 +47,10 @@ struct pipe_inode_info {
84703 struct mutex mutex;
84704 wait_queue_head_t wait;
84705 unsigned int nrbufs, curbuf, buffers;
84706- unsigned int readers;
84707- unsigned int writers;
84708- unsigned int files;
84709- unsigned int waiting_writers;
84710+ atomic_t readers;
84711+ atomic_t writers;
84712+ atomic_t files;
84713+ atomic_t waiting_writers;
84714 unsigned int r_counter;
84715 unsigned int w_counter;
84716 struct page *tmp_page;
84717diff --git a/include/linux/pm.h b/include/linux/pm.h
84718index e2f1be6..78a0506 100644
84719--- a/include/linux/pm.h
84720+++ b/include/linux/pm.h
84721@@ -608,6 +608,7 @@ struct dev_pm_domain {
84722 struct dev_pm_ops ops;
84723 void (*detach)(struct device *dev, bool power_off);
84724 };
84725+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84726
84727 /*
84728 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84729diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84730index 080e778..cbdaef7 100644
84731--- a/include/linux/pm_domain.h
84732+++ b/include/linux/pm_domain.h
84733@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84734 int (*save_state)(struct device *dev);
84735 int (*restore_state)(struct device *dev);
84736 bool (*active_wakeup)(struct device *dev);
84737-};
84738+} __no_const;
84739
84740 struct gpd_cpuidle_data {
84741 unsigned int saved_exit_latency;
84742- struct cpuidle_state *idle_state;
84743+ cpuidle_state_no_const *idle_state;
84744 };
84745
84746 struct generic_pm_domain {
84747diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84748index 30e84d4..22278b4 100644
84749--- a/include/linux/pm_runtime.h
84750+++ b/include/linux/pm_runtime.h
84751@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84752
84753 static inline void pm_runtime_mark_last_busy(struct device *dev)
84754 {
84755- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84756+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84757 }
84758
84759 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84760diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84761index 6512e9c..ec27fa2 100644
84762--- a/include/linux/pnp.h
84763+++ b/include/linux/pnp.h
84764@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84765 struct pnp_fixup {
84766 char id[7];
84767 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84768-};
84769+} __do_const;
84770
84771 /* config parameters */
84772 #define PNP_CONFIG_NORMAL 0x0001
84773diff --git a/include/linux/poison.h b/include/linux/poison.h
84774index 2110a81..13a11bb 100644
84775--- a/include/linux/poison.h
84776+++ b/include/linux/poison.h
84777@@ -19,8 +19,8 @@
84778 * under normal circumstances, used to verify that nobody uses
84779 * non-initialized list entries.
84780 */
84781-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84782-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84783+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84784+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84785
84786 /********** include/linux/timer.h **********/
84787 /*
84788diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84789index d8b187c3..9a9257a 100644
84790--- a/include/linux/power/smartreflex.h
84791+++ b/include/linux/power/smartreflex.h
84792@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84793 int (*notify)(struct omap_sr *sr, u32 status);
84794 u8 notify_flags;
84795 u8 class_type;
84796-};
84797+} __do_const;
84798
84799 /**
84800 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84801diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84802index 4ea1d37..80f4b33 100644
84803--- a/include/linux/ppp-comp.h
84804+++ b/include/linux/ppp-comp.h
84805@@ -84,7 +84,7 @@ struct compressor {
84806 struct module *owner;
84807 /* Extra skb space needed by the compressor algorithm */
84808 unsigned int comp_extra;
84809-};
84810+} __do_const;
84811
84812 /*
84813 * The return value from decompress routine is the length of the
84814diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84815index de83b4e..c4b997d 100644
84816--- a/include/linux/preempt.h
84817+++ b/include/linux/preempt.h
84818@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84819 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84820 #endif
84821
84822+#define raw_preempt_count_add(val) __preempt_count_add(val)
84823+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84824+
84825 #define __preempt_count_inc() __preempt_count_add(1)
84826 #define __preempt_count_dec() __preempt_count_sub(1)
84827
84828 #define preempt_count_inc() preempt_count_add(1)
84829+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84830 #define preempt_count_dec() preempt_count_sub(1)
84831+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84832
84833 #ifdef CONFIG_PREEMPT_COUNT
84834
84835@@ -41,6 +46,12 @@ do { \
84836 barrier(); \
84837 } while (0)
84838
84839+#define raw_preempt_disable() \
84840+do { \
84841+ raw_preempt_count_inc(); \
84842+ barrier(); \
84843+} while (0)
84844+
84845 #define sched_preempt_enable_no_resched() \
84846 do { \
84847 barrier(); \
84848@@ -49,6 +60,12 @@ do { \
84849
84850 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84851
84852+#define raw_preempt_enable_no_resched() \
84853+do { \
84854+ barrier(); \
84855+ raw_preempt_count_dec(); \
84856+} while (0)
84857+
84858 #ifdef CONFIG_PREEMPT
84859 #define preempt_enable() \
84860 do { \
84861@@ -113,8 +130,10 @@ do { \
84862 * region.
84863 */
84864 #define preempt_disable() barrier()
84865+#define raw_preempt_disable() barrier()
84866 #define sched_preempt_enable_no_resched() barrier()
84867 #define preempt_enable_no_resched() barrier()
84868+#define raw_preempt_enable_no_resched() barrier()
84869 #define preempt_enable() barrier()
84870 #define preempt_check_resched() do { } while (0)
84871
84872@@ -128,11 +147,13 @@ do { \
84873 /*
84874 * Modules have no business playing preemption tricks.
84875 */
84876+#ifndef CONFIG_PAX_KERNEXEC
84877 #undef sched_preempt_enable_no_resched
84878 #undef preempt_enable_no_resched
84879 #undef preempt_enable_no_resched_notrace
84880 #undef preempt_check_resched
84881 #endif
84882+#endif
84883
84884 #define preempt_set_need_resched() \
84885 do { \
84886diff --git a/include/linux/printk.h b/include/linux/printk.h
84887index baa3f97..168cff1 100644
84888--- a/include/linux/printk.h
84889+++ b/include/linux/printk.h
84890@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84891 #endif
84892
84893 typedef int(*printk_func_t)(const char *fmt, va_list args);
84894+extern int kptr_restrict;
84895
84896 #ifdef CONFIG_PRINTK
84897 asmlinkage __printf(5, 0)
84898@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84899
84900 extern int printk_delay_msec;
84901 extern int dmesg_restrict;
84902-extern int kptr_restrict;
84903
84904 extern void wake_up_klogd(void);
84905
84906diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84907index b97bf2e..f14c92d4 100644
84908--- a/include/linux/proc_fs.h
84909+++ b/include/linux/proc_fs.h
84910@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84911 extern struct proc_dir_entry *proc_symlink(const char *,
84912 struct proc_dir_entry *, const char *);
84913 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84914+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84915 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84916 struct proc_dir_entry *, void *);
84917+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84918+ struct proc_dir_entry *, void *);
84919 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84920 struct proc_dir_entry *);
84921
84922@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84923 return proc_create_data(name, mode, parent, proc_fops, NULL);
84924 }
84925
84926+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84927+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84928+{
84929+#ifdef CONFIG_GRKERNSEC_PROC_USER
84930+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84931+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84932+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84933+#else
84934+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84935+#endif
84936+}
84937+
84938+
84939 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84940 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84941 extern void *PDE_DATA(const struct inode *);
84942@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84943 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84944 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84945 struct proc_dir_entry *parent) {return NULL;}
84946+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84947+ struct proc_dir_entry *parent) { return NULL; }
84948 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84949 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84950+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84951+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84952 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84953 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84954 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84955@@ -79,7 +99,7 @@ struct net;
84956 static inline struct proc_dir_entry *proc_net_mkdir(
84957 struct net *net, const char *name, struct proc_dir_entry *parent)
84958 {
84959- return proc_mkdir_data(name, 0, parent, net);
84960+ return proc_mkdir_data_restrict(name, 0, parent, net);
84961 }
84962
84963 #endif /* _LINUX_PROC_FS_H */
84964diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84965index 42dfc61..8113a99 100644
84966--- a/include/linux/proc_ns.h
84967+++ b/include/linux/proc_ns.h
84968@@ -16,7 +16,7 @@ struct proc_ns_operations {
84969 struct ns_common *(*get)(struct task_struct *task);
84970 void (*put)(struct ns_common *ns);
84971 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84972-};
84973+} __do_const __randomize_layout;
84974
84975 extern const struct proc_ns_operations netns_operations;
84976 extern const struct proc_ns_operations utsns_operations;
84977diff --git a/include/linux/quota.h b/include/linux/quota.h
84978index d534e8e..782e604 100644
84979--- a/include/linux/quota.h
84980+++ b/include/linux/quota.h
84981@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84982
84983 extern bool qid_eq(struct kqid left, struct kqid right);
84984 extern bool qid_lt(struct kqid left, struct kqid right);
84985-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84986+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84987 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84988 extern bool qid_valid(struct kqid qid);
84989
84990diff --git a/include/linux/random.h b/include/linux/random.h
84991index b05856e..0a9f14e 100644
84992--- a/include/linux/random.h
84993+++ b/include/linux/random.h
84994@@ -9,9 +9,19 @@
84995 #include <uapi/linux/random.h>
84996
84997 extern void add_device_randomness(const void *, unsigned int);
84998+
84999+static inline void add_latent_entropy(void)
85000+{
85001+
85002+#ifdef LATENT_ENTROPY_PLUGIN
85003+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
85004+#endif
85005+
85006+}
85007+
85008 extern void add_input_randomness(unsigned int type, unsigned int code,
85009- unsigned int value);
85010-extern void add_interrupt_randomness(int irq, int irq_flags);
85011+ unsigned int value) __latent_entropy;
85012+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
85013
85014 extern void get_random_bytes(void *buf, int nbytes);
85015 extern void get_random_bytes_arch(void *buf, int nbytes);
85016@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
85017 extern const struct file_operations random_fops, urandom_fops;
85018 #endif
85019
85020-unsigned int get_random_int(void);
85021+unsigned int __intentional_overflow(-1) get_random_int(void);
85022 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
85023
85024-u32 prandom_u32(void);
85025+u32 prandom_u32(void) __intentional_overflow(-1);
85026 void prandom_bytes(void *buf, size_t nbytes);
85027 void prandom_seed(u32 seed);
85028 void prandom_reseed_late(void);
85029@@ -37,6 +47,11 @@ struct rnd_state {
85030 u32 prandom_u32_state(struct rnd_state *state);
85031 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
85032
85033+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
85034+{
85035+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
85036+}
85037+
85038 /**
85039 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
85040 * @ep_ro: right open interval endpoint
85041@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
85042 *
85043 * Returns: pseudo-random number in interval [0, ep_ro)
85044 */
85045-static inline u32 prandom_u32_max(u32 ep_ro)
85046+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
85047 {
85048 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
85049 }
85050diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
85051index 378c5ee..aa84a47 100644
85052--- a/include/linux/rbtree_augmented.h
85053+++ b/include/linux/rbtree_augmented.h
85054@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
85055 old->rbaugmented = rbcompute(old); \
85056 } \
85057 rbstatic const struct rb_augment_callbacks rbname = { \
85058- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
85059+ .propagate = rbname ## _propagate, \
85060+ .copy = rbname ## _copy, \
85061+ .rotate = rbname ## _rotate \
85062 };
85063
85064
85065diff --git a/include/linux/rculist.h b/include/linux/rculist.h
85066index a18b16f..2683096 100644
85067--- a/include/linux/rculist.h
85068+++ b/include/linux/rculist.h
85069@@ -29,8 +29,8 @@
85070 */
85071 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
85072 {
85073- ACCESS_ONCE(list->next) = list;
85074- ACCESS_ONCE(list->prev) = list;
85075+ ACCESS_ONCE_RW(list->next) = list;
85076+ ACCESS_ONCE_RW(list->prev) = list;
85077 }
85078
85079 /*
85080@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
85081 struct list_head *prev, struct list_head *next);
85082 #endif
85083
85084+void __pax_list_add_rcu(struct list_head *new,
85085+ struct list_head *prev, struct list_head *next);
85086+
85087 /**
85088 * list_add_rcu - add a new entry to rcu-protected list
85089 * @new: new entry to be added
85090@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
85091 __list_add_rcu(new, head, head->next);
85092 }
85093
85094+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
85095+{
85096+ __pax_list_add_rcu(new, head, head->next);
85097+}
85098+
85099 /**
85100 * list_add_tail_rcu - add a new entry to rcu-protected list
85101 * @new: new entry to be added
85102@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
85103 __list_add_rcu(new, head->prev, head);
85104 }
85105
85106+static inline void pax_list_add_tail_rcu(struct list_head *new,
85107+ struct list_head *head)
85108+{
85109+ __pax_list_add_rcu(new, head->prev, head);
85110+}
85111+
85112 /**
85113 * list_del_rcu - deletes entry from list without re-initialization
85114 * @entry: the element to delete from the list.
85115@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
85116 entry->prev = LIST_POISON2;
85117 }
85118
85119+extern void pax_list_del_rcu(struct list_head *entry);
85120+
85121 /**
85122 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
85123 * @n: the element to delete from the hash list.
85124diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
85125index 7809749..1cd9315 100644
85126--- a/include/linux/rcupdate.h
85127+++ b/include/linux/rcupdate.h
85128@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
85129 do { \
85130 rcu_all_qs(); \
85131 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
85132- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
85133+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
85134 } while (0)
85135 #else /* #ifdef CONFIG_TASKS_RCU */
85136 #define TASKS_RCU(x) do { } while (0)
85137diff --git a/include/linux/reboot.h b/include/linux/reboot.h
85138index 67fc8fc..a90f7d8 100644
85139--- a/include/linux/reboot.h
85140+++ b/include/linux/reboot.h
85141@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
85142 */
85143
85144 extern void migrate_to_reboot_cpu(void);
85145-extern void machine_restart(char *cmd);
85146-extern void machine_halt(void);
85147-extern void machine_power_off(void);
85148+extern void machine_restart(char *cmd) __noreturn;
85149+extern void machine_halt(void) __noreturn;
85150+extern void machine_power_off(void) __noreturn;
85151
85152 extern void machine_shutdown(void);
85153 struct pt_regs;
85154@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
85155 */
85156
85157 extern void kernel_restart_prepare(char *cmd);
85158-extern void kernel_restart(char *cmd);
85159-extern void kernel_halt(void);
85160-extern void kernel_power_off(void);
85161+extern void kernel_restart(char *cmd) __noreturn;
85162+extern void kernel_halt(void) __noreturn;
85163+extern void kernel_power_off(void) __noreturn;
85164
85165 extern int C_A_D; /* for sysctl */
85166 void ctrl_alt_del(void);
85167@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
85168 * Emergency restart, callable from an interrupt handler.
85169 */
85170
85171-extern void emergency_restart(void);
85172+extern void emergency_restart(void) __noreturn;
85173 #include <asm/emergency-restart.h>
85174
85175 #endif /* _LINUX_REBOOT_H */
85176diff --git a/include/linux/regset.h b/include/linux/regset.h
85177index 8e0c9fe..ac4d221 100644
85178--- a/include/linux/regset.h
85179+++ b/include/linux/regset.h
85180@@ -161,7 +161,8 @@ struct user_regset {
85181 unsigned int align;
85182 unsigned int bias;
85183 unsigned int core_note_type;
85184-};
85185+} __do_const;
85186+typedef struct user_regset __no_const user_regset_no_const;
85187
85188 /**
85189 * struct user_regset_view - available regsets
85190diff --git a/include/linux/relay.h b/include/linux/relay.h
85191index d7c8359..818daf5 100644
85192--- a/include/linux/relay.h
85193+++ b/include/linux/relay.h
85194@@ -157,7 +157,7 @@ struct rchan_callbacks
85195 * The callback should return 0 if successful, negative if not.
85196 */
85197 int (*remove_buf_file)(struct dentry *dentry);
85198-};
85199+} __no_const;
85200
85201 /*
85202 * CONFIG_RELAY kernel API, kernel/relay.c
85203diff --git a/include/linux/rio.h b/include/linux/rio.h
85204index 6bda06f..bf39a9b 100644
85205--- a/include/linux/rio.h
85206+++ b/include/linux/rio.h
85207@@ -358,7 +358,7 @@ struct rio_ops {
85208 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
85209 u64 rstart, u32 size, u32 flags);
85210 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
85211-};
85212+} __no_const;
85213
85214 #define RIO_RESOURCE_MEM 0x00000100
85215 #define RIO_RESOURCE_DOORBELL 0x00000200
85216diff --git a/include/linux/rmap.h b/include/linux/rmap.h
85217index c4c559a..6ba9a26 100644
85218--- a/include/linux/rmap.h
85219+++ b/include/linux/rmap.h
85220@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
85221 void anon_vma_init(void); /* create anon_vma_cachep */
85222 int anon_vma_prepare(struct vm_area_struct *);
85223 void unlink_anon_vmas(struct vm_area_struct *);
85224-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
85225-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
85226+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
85227+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
85228
85229 static inline void anon_vma_merge(struct vm_area_struct *vma,
85230 struct vm_area_struct *next)
85231diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
85232index ed8f9e70..999bc96 100644
85233--- a/include/linux/scatterlist.h
85234+++ b/include/linux/scatterlist.h
85235@@ -1,6 +1,7 @@
85236 #ifndef _LINUX_SCATTERLIST_H
85237 #define _LINUX_SCATTERLIST_H
85238
85239+#include <linux/sched.h>
85240 #include <linux/string.h>
85241 #include <linux/bug.h>
85242 #include <linux/mm.h>
85243@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
85244 #ifdef CONFIG_DEBUG_SG
85245 BUG_ON(!virt_addr_valid(buf));
85246 #endif
85247+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85248+ if (object_starts_on_stack(buf)) {
85249+ void *adjbuf = buf - current->stack + current->lowmem_stack;
85250+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
85251+ } else
85252+#endif
85253 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
85254 }
85255
85256diff --git a/include/linux/sched.h b/include/linux/sched.h
85257index a419b65..6dd8f3f 100644
85258--- a/include/linux/sched.h
85259+++ b/include/linux/sched.h
85260@@ -133,6 +133,7 @@ struct fs_struct;
85261 struct perf_event_context;
85262 struct blk_plug;
85263 struct filename;
85264+struct linux_binprm;
85265
85266 #define VMACACHE_BITS 2
85267 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
85268@@ -412,7 +413,7 @@ extern char __sched_text_start[], __sched_text_end[];
85269 extern int in_sched_functions(unsigned long addr);
85270
85271 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
85272-extern signed long schedule_timeout(signed long timeout);
85273+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
85274 extern signed long schedule_timeout_interruptible(signed long timeout);
85275 extern signed long schedule_timeout_killable(signed long timeout);
85276 extern signed long schedule_timeout_uninterruptible(signed long timeout);
85277@@ -430,6 +431,19 @@ struct nsproxy;
85278 struct user_namespace;
85279
85280 #ifdef CONFIG_MMU
85281+
85282+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
85283+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
85284+#else
85285+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
85286+{
85287+ return 0;
85288+}
85289+#endif
85290+
85291+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
85292+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
85293+
85294 extern void arch_pick_mmap_layout(struct mm_struct *mm);
85295 extern unsigned long
85296 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
85297@@ -728,6 +742,17 @@ struct signal_struct {
85298 #ifdef CONFIG_TASKSTATS
85299 struct taskstats *stats;
85300 #endif
85301+
85302+#ifdef CONFIG_GRKERNSEC
85303+ u32 curr_ip;
85304+ u32 saved_ip;
85305+ u32 gr_saddr;
85306+ u32 gr_daddr;
85307+ u16 gr_sport;
85308+ u16 gr_dport;
85309+ u8 used_accept:1;
85310+#endif
85311+
85312 #ifdef CONFIG_AUDIT
85313 unsigned audit_tty;
85314 unsigned audit_tty_log_passwd;
85315@@ -754,7 +779,7 @@ struct signal_struct {
85316 struct mutex cred_guard_mutex; /* guard against foreign influences on
85317 * credential calculations
85318 * (notably. ptrace) */
85319-};
85320+} __randomize_layout;
85321
85322 /*
85323 * Bits in flags field of signal_struct.
85324@@ -807,6 +832,14 @@ struct user_struct {
85325 struct key *session_keyring; /* UID's default session keyring */
85326 #endif
85327
85328+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
85329+ unsigned char kernel_banned;
85330+#endif
85331+#ifdef CONFIG_GRKERNSEC_BRUTE
85332+ unsigned char suid_banned;
85333+ unsigned long suid_ban_expires;
85334+#endif
85335+
85336 /* Hash table maintenance information */
85337 struct hlist_node uidhash_node;
85338 kuid_t uid;
85339@@ -814,7 +847,7 @@ struct user_struct {
85340 #ifdef CONFIG_PERF_EVENTS
85341 atomic_long_t locked_vm;
85342 #endif
85343-};
85344+} __randomize_layout;
85345
85346 extern int uids_sysfs_init(void);
85347
85348@@ -1278,6 +1311,9 @@ enum perf_event_task_context {
85349 struct task_struct {
85350 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
85351 void *stack;
85352+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85353+ void *lowmem_stack;
85354+#endif
85355 atomic_t usage;
85356 unsigned int flags; /* per process flags, defined below */
85357 unsigned int ptrace;
85358@@ -1411,8 +1447,8 @@ struct task_struct {
85359 struct list_head thread_node;
85360
85361 struct completion *vfork_done; /* for vfork() */
85362- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
85363- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85364+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
85365+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85366
85367 cputime_t utime, stime, utimescaled, stimescaled;
85368 cputime_t gtime;
85369@@ -1437,11 +1473,6 @@ struct task_struct {
85370 struct task_cputime cputime_expires;
85371 struct list_head cpu_timers[3];
85372
85373-/* process credentials */
85374- const struct cred __rcu *real_cred; /* objective and real subjective task
85375- * credentials (COW) */
85376- const struct cred __rcu *cred; /* effective (overridable) subjective task
85377- * credentials (COW) */
85378 char comm[TASK_COMM_LEN]; /* executable name excluding path
85379 - access with [gs]et_task_comm (which lock
85380 it with task_lock())
85381@@ -1459,6 +1490,10 @@ struct task_struct {
85382 #endif
85383 /* CPU-specific state of this task */
85384 struct thread_struct thread;
85385+/* thread_info moved to task_struct */
85386+#ifdef CONFIG_X86
85387+ struct thread_info tinfo;
85388+#endif
85389 /* filesystem information */
85390 struct fs_struct *fs;
85391 /* open file information */
85392@@ -1533,6 +1568,10 @@ struct task_struct {
85393 gfp_t lockdep_reclaim_gfp;
85394 #endif
85395
85396+/* process credentials */
85397+ const struct cred __rcu *real_cred; /* objective and real subjective task
85398+ * credentials (COW) */
85399+
85400 /* journalling filesystem info */
85401 void *journal_info;
85402
85403@@ -1571,6 +1610,10 @@ struct task_struct {
85404 /* cg_list protected by css_set_lock and tsk->alloc_lock */
85405 struct list_head cg_list;
85406 #endif
85407+
85408+ const struct cred __rcu *cred; /* effective (overridable) subjective task
85409+ * credentials (COW) */
85410+
85411 #ifdef CONFIG_FUTEX
85412 struct robust_list_head __user *robust_list;
85413 #ifdef CONFIG_COMPAT
85414@@ -1682,7 +1725,7 @@ struct task_struct {
85415 * Number of functions that haven't been traced
85416 * because of depth overrun.
85417 */
85418- atomic_t trace_overrun;
85419+ atomic_unchecked_t trace_overrun;
85420 /* Pause for the tracing */
85421 atomic_t tracing_graph_pause;
85422 #endif
85423@@ -1710,7 +1753,78 @@ struct task_struct {
85424 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
85425 unsigned long task_state_change;
85426 #endif
85427-};
85428+
85429+#ifdef CONFIG_GRKERNSEC
85430+ /* grsecurity */
85431+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85432+ u64 exec_id;
85433+#endif
85434+#ifdef CONFIG_GRKERNSEC_SETXID
85435+ const struct cred *delayed_cred;
85436+#endif
85437+ struct dentry *gr_chroot_dentry;
85438+ struct acl_subject_label *acl;
85439+ struct acl_subject_label *tmpacl;
85440+ struct acl_role_label *role;
85441+ struct file *exec_file;
85442+ unsigned long brute_expires;
85443+ u16 acl_role_id;
85444+ u8 inherited;
85445+ /* is this the task that authenticated to the special role */
85446+ u8 acl_sp_role;
85447+ u8 is_writable;
85448+ u8 brute;
85449+ u8 gr_is_chrooted;
85450+#endif
85451+
85452+} __randomize_layout;
85453+
85454+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
85455+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
85456+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
85457+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
85458+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
85459+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
85460+
85461+#ifdef CONFIG_PAX_SOFTMODE
85462+extern int pax_softmode;
85463+#endif
85464+
85465+extern int pax_check_flags(unsigned long *);
85466+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
85467+
85468+/* if tsk != current then task_lock must be held on it */
85469+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85470+static inline unsigned long pax_get_flags(struct task_struct *tsk)
85471+{
85472+ if (likely(tsk->mm))
85473+ return tsk->mm->pax_flags;
85474+ else
85475+ return 0UL;
85476+}
85477+
85478+/* if tsk != current then task_lock must be held on it */
85479+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
85480+{
85481+ if (likely(tsk->mm)) {
85482+ tsk->mm->pax_flags = flags;
85483+ return 0;
85484+ }
85485+ return -EINVAL;
85486+}
85487+#endif
85488+
85489+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
85490+extern void pax_set_initial_flags(struct linux_binprm *bprm);
85491+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
85492+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
85493+#endif
85494+
85495+struct path;
85496+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85497+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85498+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85499+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85500
85501 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85502 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85503@@ -1793,7 +1907,7 @@ struct pid_namespace;
85504 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85505 struct pid_namespace *ns);
85506
85507-static inline pid_t task_pid_nr(struct task_struct *tsk)
85508+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85509 {
85510 return tsk->pid;
85511 }
85512@@ -2161,6 +2275,25 @@ extern u64 sched_clock_cpu(int cpu);
85513
85514 extern void sched_clock_init(void);
85515
85516+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85517+static inline void populate_stack(void)
85518+{
85519+ struct task_struct *curtask = current;
85520+ int c;
85521+ int *ptr = curtask->stack;
85522+ int *end = curtask->stack + THREAD_SIZE;
85523+
85524+ while (ptr < end) {
85525+ c = *(volatile int *)ptr;
85526+ ptr += PAGE_SIZE/sizeof(int);
85527+ }
85528+}
85529+#else
85530+static inline void populate_stack(void)
85531+{
85532+}
85533+#endif
85534+
85535 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85536 static inline void sched_clock_tick(void)
85537 {
85538@@ -2294,7 +2427,9 @@ void yield(void);
85539 extern struct exec_domain default_exec_domain;
85540
85541 union thread_union {
85542+#ifndef CONFIG_X86
85543 struct thread_info thread_info;
85544+#endif
85545 unsigned long stack[THREAD_SIZE/sizeof(long)];
85546 };
85547
85548@@ -2327,6 +2462,7 @@ extern struct pid_namespace init_pid_ns;
85549 */
85550
85551 extern struct task_struct *find_task_by_vpid(pid_t nr);
85552+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85553 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85554 struct pid_namespace *ns);
85555
85556@@ -2491,7 +2627,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85557 extern void exit_itimers(struct signal_struct *);
85558 extern void flush_itimer_signals(void);
85559
85560-extern void do_group_exit(int);
85561+extern __noreturn void do_group_exit(int);
85562
85563 extern int do_execve(struct filename *,
85564 const char __user * const __user *,
85565@@ -2712,9 +2848,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85566 #define task_stack_end_corrupted(task) \
85567 (*(end_of_stack(task)) != STACK_END_MAGIC)
85568
85569-static inline int object_is_on_stack(void *obj)
85570+static inline int object_starts_on_stack(const void *obj)
85571 {
85572- void *stack = task_stack_page(current);
85573+ const void *stack = task_stack_page(current);
85574
85575 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85576 }
85577diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85578index 596a0e0..bea77ec 100644
85579--- a/include/linux/sched/sysctl.h
85580+++ b/include/linux/sched/sysctl.h
85581@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85582 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85583
85584 extern int sysctl_max_map_count;
85585+extern unsigned long sysctl_heap_stack_gap;
85586
85587 extern unsigned int sysctl_sched_latency;
85588 extern unsigned int sysctl_sched_min_granularity;
85589diff --git a/include/linux/security.h b/include/linux/security.h
85590index a1b7dbd..036f47f 100644
85591--- a/include/linux/security.h
85592+++ b/include/linux/security.h
85593@@ -27,6 +27,7 @@
85594 #include <linux/slab.h>
85595 #include <linux/err.h>
85596 #include <linux/string.h>
85597+#include <linux/grsecurity.h>
85598
85599 struct linux_binprm;
85600 struct cred;
85601@@ -116,8 +117,6 @@ struct seq_file;
85602
85603 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85604
85605-void reset_security_ops(void);
85606-
85607 #ifdef CONFIG_MMU
85608 extern unsigned long mmap_min_addr;
85609 extern unsigned long dac_mmap_min_addr;
85610@@ -1756,7 +1755,7 @@ struct security_operations {
85611 struct audit_context *actx);
85612 void (*audit_rule_free) (void *lsmrule);
85613 #endif /* CONFIG_AUDIT */
85614-};
85615+} __randomize_layout;
85616
85617 /* prototypes */
85618 extern int security_init(void);
85619diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85620index dc368b8..e895209 100644
85621--- a/include/linux/semaphore.h
85622+++ b/include/linux/semaphore.h
85623@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85624 }
85625
85626 extern void down(struct semaphore *sem);
85627-extern int __must_check down_interruptible(struct semaphore *sem);
85628+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85629 extern int __must_check down_killable(struct semaphore *sem);
85630 extern int __must_check down_trylock(struct semaphore *sem);
85631 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85632diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85633index afbb1fd..e1d205d 100644
85634--- a/include/linux/seq_file.h
85635+++ b/include/linux/seq_file.h
85636@@ -27,6 +27,9 @@ struct seq_file {
85637 struct mutex lock;
85638 const struct seq_operations *op;
85639 int poll_event;
85640+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85641+ u64 exec_id;
85642+#endif
85643 #ifdef CONFIG_USER_NS
85644 struct user_namespace *user_ns;
85645 #endif
85646@@ -39,6 +42,7 @@ struct seq_operations {
85647 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85648 int (*show) (struct seq_file *m, void *v);
85649 };
85650+typedef struct seq_operations __no_const seq_operations_no_const;
85651
85652 #define SEQ_SKIP 1
85653
85654@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85655
85656 char *mangle_path(char *s, const char *p, const char *esc);
85657 int seq_open(struct file *, const struct seq_operations *);
85658+int seq_open_restrict(struct file *, const struct seq_operations *);
85659 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85660 loff_t seq_lseek(struct file *, loff_t, int);
85661 int seq_release(struct inode *, struct file *);
85662@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
85663 const struct path *root, const char *esc);
85664
85665 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85666+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85667 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85668 int single_release(struct inode *, struct file *);
85669 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85670diff --git a/include/linux/shm.h b/include/linux/shm.h
85671index 6fb8016..ab4465e 100644
85672--- a/include/linux/shm.h
85673+++ b/include/linux/shm.h
85674@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85675 /* The task created the shm object. NULL if the task is dead. */
85676 struct task_struct *shm_creator;
85677 struct list_head shm_clist; /* list by creator */
85678+#ifdef CONFIG_GRKERNSEC
85679+ u64 shm_createtime;
85680+ pid_t shm_lapid;
85681+#endif
85682 };
85683
85684 /* shm_mode upper byte flags */
85685diff --git a/include/linux/signal.h b/include/linux/signal.h
85686index ab1e039..ad4229e 100644
85687--- a/include/linux/signal.h
85688+++ b/include/linux/signal.h
85689@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85690 * know it'll be handled, so that they don't get converted to
85691 * SIGKILL or just silently dropped.
85692 */
85693- kernel_sigaction(sig, (__force __sighandler_t)2);
85694+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85695 }
85696
85697 static inline void disallow_signal(int sig)
85698diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85699index f54d665..e41848d 100644
85700--- a/include/linux/skbuff.h
85701+++ b/include/linux/skbuff.h
85702@@ -770,7 +770,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85703 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85704 int node);
85705 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85706-static inline struct sk_buff *alloc_skb(unsigned int size,
85707+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85708 gfp_t priority)
85709 {
85710 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85711@@ -1966,7 +1966,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85712 return skb->inner_transport_header - skb->inner_network_header;
85713 }
85714
85715-static inline int skb_network_offset(const struct sk_buff *skb)
85716+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85717 {
85718 return skb_network_header(skb) - skb->data;
85719 }
85720@@ -2026,7 +2026,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85721 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85722 */
85723 #ifndef NET_SKB_PAD
85724-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85725+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85726 #endif
85727
85728 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85729@@ -2668,9 +2668,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85730 int *err);
85731 unsigned int datagram_poll(struct file *file, struct socket *sock,
85732 struct poll_table_struct *wait);
85733-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85734+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85735 struct iov_iter *to, int size);
85736-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85737+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85738 struct msghdr *msg, int size)
85739 {
85740 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85741@@ -3180,6 +3180,9 @@ static inline void nf_reset(struct sk_buff *skb)
85742 nf_bridge_put(skb->nf_bridge);
85743 skb->nf_bridge = NULL;
85744 #endif
85745+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85746+ skb->nf_trace = 0;
85747+#endif
85748 }
85749
85750 static inline void nf_reset_trace(struct sk_buff *skb)
85751diff --git a/include/linux/slab.h b/include/linux/slab.h
85752index 76f1fee..d95e6d2 100644
85753--- a/include/linux/slab.h
85754+++ b/include/linux/slab.h
85755@@ -14,15 +14,29 @@
85756 #include <linux/gfp.h>
85757 #include <linux/types.h>
85758 #include <linux/workqueue.h>
85759-
85760+#include <linux/err.h>
85761
85762 /*
85763 * Flags to pass to kmem_cache_create().
85764 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85765 */
85766 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85767+
85768+#ifdef CONFIG_PAX_USERCOPY_SLABS
85769+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85770+#else
85771+#define SLAB_USERCOPY 0x00000000UL
85772+#endif
85773+
85774 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85775 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85776+
85777+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85778+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85779+#else
85780+#define SLAB_NO_SANITIZE 0x00000000UL
85781+#endif
85782+
85783 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85784 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85785 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85786@@ -98,10 +112,13 @@
85787 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85788 * Both make kfree a no-op.
85789 */
85790-#define ZERO_SIZE_PTR ((void *)16)
85791+#define ZERO_SIZE_PTR \
85792+({ \
85793+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85794+ (void *)(-MAX_ERRNO-1L); \
85795+})
85796
85797-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85798- (unsigned long)ZERO_SIZE_PTR)
85799+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85800
85801 #include <linux/kmemleak.h>
85802 #include <linux/kasan.h>
85803@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85804 void kfree(const void *);
85805 void kzfree(const void *);
85806 size_t ksize(const void *);
85807+const char *check_heap_object(const void *ptr, unsigned long n);
85808+bool is_usercopy_object(const void *ptr);
85809
85810 /*
85811 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85812@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85813 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85814 #endif
85815
85816+#ifdef CONFIG_PAX_USERCOPY_SLABS
85817+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85818+#endif
85819+
85820 /*
85821 * Figure out which kmalloc slab an allocation of a certain size
85822 * belongs to.
85823@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85824 * 2 = 120 .. 192 bytes
85825 * n = 2^(n-1) .. 2^n -1
85826 */
85827-static __always_inline int kmalloc_index(size_t size)
85828+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85829 {
85830 if (!size)
85831 return 0;
85832@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
85833 }
85834 #endif /* !CONFIG_SLOB */
85835
85836-void *__kmalloc(size_t size, gfp_t flags);
85837+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85838 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85839 void kmem_cache_free(struct kmem_cache *, void *);
85840
85841 #ifdef CONFIG_NUMA
85842-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85843+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85844 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85845 #else
85846-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85847+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85848 {
85849 return __kmalloc(size, flags);
85850 }
85851diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85852index 33d0490..70a6313 100644
85853--- a/include/linux/slab_def.h
85854+++ b/include/linux/slab_def.h
85855@@ -40,7 +40,7 @@ struct kmem_cache {
85856 /* 4) cache creation/removal */
85857 const char *name;
85858 struct list_head list;
85859- int refcount;
85860+ atomic_t refcount;
85861 int object_size;
85862 int align;
85863
85864@@ -56,10 +56,14 @@ struct kmem_cache {
85865 unsigned long node_allocs;
85866 unsigned long node_frees;
85867 unsigned long node_overflow;
85868- atomic_t allochit;
85869- atomic_t allocmiss;
85870- atomic_t freehit;
85871- atomic_t freemiss;
85872+ atomic_unchecked_t allochit;
85873+ atomic_unchecked_t allocmiss;
85874+ atomic_unchecked_t freehit;
85875+ atomic_unchecked_t freemiss;
85876+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85877+ atomic_unchecked_t sanitized;
85878+ atomic_unchecked_t not_sanitized;
85879+#endif
85880
85881 /*
85882 * If debugging is enabled, then the allocator can add additional
85883diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85884index 3388511..6252f90 100644
85885--- a/include/linux/slub_def.h
85886+++ b/include/linux/slub_def.h
85887@@ -74,7 +74,7 @@ struct kmem_cache {
85888 struct kmem_cache_order_objects max;
85889 struct kmem_cache_order_objects min;
85890 gfp_t allocflags; /* gfp flags to use on each alloc */
85891- int refcount; /* Refcount for slab cache destroy */
85892+ atomic_t refcount; /* Refcount for slab cache destroy */
85893 void (*ctor)(void *);
85894 int inuse; /* Offset to metadata */
85895 int align; /* Alignment */
85896diff --git a/include/linux/smp.h b/include/linux/smp.h
85897index be91db2..3f23232 100644
85898--- a/include/linux/smp.h
85899+++ b/include/linux/smp.h
85900@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
85901 #endif
85902
85903 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85904+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85905 #define put_cpu() preempt_enable()
85906+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85907
85908 /*
85909 * Callback to arch code if there's nosmp or maxcpus=0 on the
85910diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85911index 46cca4c..3323536 100644
85912--- a/include/linux/sock_diag.h
85913+++ b/include/linux/sock_diag.h
85914@@ -11,7 +11,7 @@ struct sock;
85915 struct sock_diag_handler {
85916 __u8 family;
85917 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85918-};
85919+} __do_const;
85920
85921 int sock_diag_register(const struct sock_diag_handler *h);
85922 void sock_diag_unregister(const struct sock_diag_handler *h);
85923diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85924index 680f9a3..f13aeb0 100644
85925--- a/include/linux/sonet.h
85926+++ b/include/linux/sonet.h
85927@@ -7,7 +7,7 @@
85928 #include <uapi/linux/sonet.h>
85929
85930 struct k_sonet_stats {
85931-#define __HANDLE_ITEM(i) atomic_t i
85932+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85933 __SONET_ITEMS
85934 #undef __HANDLE_ITEM
85935 };
85936diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85937index 07d8e53..dc934c9 100644
85938--- a/include/linux/sunrpc/addr.h
85939+++ b/include/linux/sunrpc/addr.h
85940@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85941 {
85942 switch (sap->sa_family) {
85943 case AF_INET:
85944- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85945+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85946 case AF_INET6:
85947- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85948+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85949 }
85950 return 0;
85951 }
85952@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85953 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85954 const struct sockaddr *src)
85955 {
85956- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85957+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85958 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85959
85960 dsin->sin_family = ssin->sin_family;
85961@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85962 if (sa->sa_family != AF_INET6)
85963 return 0;
85964
85965- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85966+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85967 }
85968
85969 #endif /* _LINUX_SUNRPC_ADDR_H */
85970diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85971index 598ba80..d90cba6 100644
85972--- a/include/linux/sunrpc/clnt.h
85973+++ b/include/linux/sunrpc/clnt.h
85974@@ -100,7 +100,7 @@ struct rpc_procinfo {
85975 unsigned int p_timer; /* Which RTT timer to use */
85976 u32 p_statidx; /* Which procedure to account */
85977 const char * p_name; /* name of procedure */
85978-};
85979+} __do_const;
85980
85981 #ifdef __KERNEL__
85982
85983diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85984index fae6fb9..023fbcd 100644
85985--- a/include/linux/sunrpc/svc.h
85986+++ b/include/linux/sunrpc/svc.h
85987@@ -420,7 +420,7 @@ struct svc_procedure {
85988 unsigned int pc_count; /* call count */
85989 unsigned int pc_cachetype; /* cache info (NFS) */
85990 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85991-};
85992+} __do_const;
85993
85994 /*
85995 * Function prototypes.
85996diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85997index df8edf8..d140fec 100644
85998--- a/include/linux/sunrpc/svc_rdma.h
85999+++ b/include/linux/sunrpc/svc_rdma.h
86000@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
86001 extern unsigned int svcrdma_max_requests;
86002 extern unsigned int svcrdma_max_req_size;
86003
86004-extern atomic_t rdma_stat_recv;
86005-extern atomic_t rdma_stat_read;
86006-extern atomic_t rdma_stat_write;
86007-extern atomic_t rdma_stat_sq_starve;
86008-extern atomic_t rdma_stat_rq_starve;
86009-extern atomic_t rdma_stat_rq_poll;
86010-extern atomic_t rdma_stat_rq_prod;
86011-extern atomic_t rdma_stat_sq_poll;
86012-extern atomic_t rdma_stat_sq_prod;
86013+extern atomic_unchecked_t rdma_stat_recv;
86014+extern atomic_unchecked_t rdma_stat_read;
86015+extern atomic_unchecked_t rdma_stat_write;
86016+extern atomic_unchecked_t rdma_stat_sq_starve;
86017+extern atomic_unchecked_t rdma_stat_rq_starve;
86018+extern atomic_unchecked_t rdma_stat_rq_poll;
86019+extern atomic_unchecked_t rdma_stat_rq_prod;
86020+extern atomic_unchecked_t rdma_stat_sq_poll;
86021+extern atomic_unchecked_t rdma_stat_sq_prod;
86022
86023 /*
86024 * Contexts are built when an RDMA request is created and are a
86025diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
86026index 8d71d65..f79586e 100644
86027--- a/include/linux/sunrpc/svcauth.h
86028+++ b/include/linux/sunrpc/svcauth.h
86029@@ -120,7 +120,7 @@ struct auth_ops {
86030 int (*release)(struct svc_rqst *rq);
86031 void (*domain_release)(struct auth_domain *);
86032 int (*set_client)(struct svc_rqst *rq);
86033-};
86034+} __do_const;
86035
86036 #define SVC_GARBAGE 1
86037 #define SVC_SYSERR 2
86038diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
86039index e7a018e..49f8b17 100644
86040--- a/include/linux/swiotlb.h
86041+++ b/include/linux/swiotlb.h
86042@@ -60,7 +60,8 @@ extern void
86043
86044 extern void
86045 swiotlb_free_coherent(struct device *hwdev, size_t size,
86046- void *vaddr, dma_addr_t dma_handle);
86047+ void *vaddr, dma_addr_t dma_handle,
86048+ struct dma_attrs *attrs);
86049
86050 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
86051 unsigned long offset, size_t size,
86052diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
86053index 76d1e38..d92ff38 100644
86054--- a/include/linux/syscalls.h
86055+++ b/include/linux/syscalls.h
86056@@ -102,7 +102,12 @@ union bpf_attr;
86057 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
86058 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
86059 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
86060-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
86061+#define __SC_LONG(t, a) __typeof__( \
86062+ __builtin_choose_expr( \
86063+ sizeof(t) > sizeof(int), \
86064+ (t) 0, \
86065+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
86066+ )) a
86067 #define __SC_CAST(t, a) (t) a
86068 #define __SC_ARGS(t, a) a
86069 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
86070@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
86071 asmlinkage long sys_fsync(unsigned int fd);
86072 asmlinkage long sys_fdatasync(unsigned int fd);
86073 asmlinkage long sys_bdflush(int func, long data);
86074-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
86075- char __user *type, unsigned long flags,
86076+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
86077+ const char __user *type, unsigned long flags,
86078 void __user *data);
86079-asmlinkage long sys_umount(char __user *name, int flags);
86080-asmlinkage long sys_oldumount(char __user *name);
86081+asmlinkage long sys_umount(const char __user *name, int flags);
86082+asmlinkage long sys_oldumount(const char __user *name);
86083 asmlinkage long sys_truncate(const char __user *path, long length);
86084 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
86085 asmlinkage long sys_stat(const char __user *filename,
86086@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
86087 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
86088 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
86089 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
86090- struct sockaddr __user *, int);
86091+ struct sockaddr __user *, int) __intentional_overflow(0);
86092 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
86093 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
86094 unsigned int vlen, unsigned flags);
86095diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
86096index 27b3b0b..e093dd9 100644
86097--- a/include/linux/syscore_ops.h
86098+++ b/include/linux/syscore_ops.h
86099@@ -16,7 +16,7 @@ struct syscore_ops {
86100 int (*suspend)(void);
86101 void (*resume)(void);
86102 void (*shutdown)(void);
86103-};
86104+} __do_const;
86105
86106 extern void register_syscore_ops(struct syscore_ops *ops);
86107 extern void unregister_syscore_ops(struct syscore_ops *ops);
86108diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
86109index b7361f8..341a15a 100644
86110--- a/include/linux/sysctl.h
86111+++ b/include/linux/sysctl.h
86112@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
86113
86114 extern int proc_dostring(struct ctl_table *, int,
86115 void __user *, size_t *, loff_t *);
86116+extern int proc_dostring_modpriv(struct ctl_table *, int,
86117+ void __user *, size_t *, loff_t *);
86118 extern int proc_dointvec(struct ctl_table *, int,
86119 void __user *, size_t *, loff_t *);
86120 extern int proc_dointvec_minmax(struct ctl_table *, int,
86121@@ -113,7 +115,8 @@ struct ctl_table
86122 struct ctl_table_poll *poll;
86123 void *extra1;
86124 void *extra2;
86125-};
86126+} __do_const __randomize_layout;
86127+typedef struct ctl_table __no_const ctl_table_no_const;
86128
86129 struct ctl_node {
86130 struct rb_node node;
86131diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
86132index ddad161..a3efd26 100644
86133--- a/include/linux/sysfs.h
86134+++ b/include/linux/sysfs.h
86135@@ -34,7 +34,8 @@ struct attribute {
86136 struct lock_class_key *key;
86137 struct lock_class_key skey;
86138 #endif
86139-};
86140+} __do_const;
86141+typedef struct attribute __no_const attribute_no_const;
86142
86143 /**
86144 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
86145@@ -63,7 +64,8 @@ struct attribute_group {
86146 struct attribute *, int);
86147 struct attribute **attrs;
86148 struct bin_attribute **bin_attrs;
86149-};
86150+} __do_const;
86151+typedef struct attribute_group __no_const attribute_group_no_const;
86152
86153 /**
86154 * Use these macros to make defining attributes easier. See include/linux/device.h
86155@@ -137,7 +139,8 @@ struct bin_attribute {
86156 char *, loff_t, size_t);
86157 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
86158 struct vm_area_struct *vma);
86159-};
86160+} __do_const;
86161+typedef struct bin_attribute __no_const bin_attribute_no_const;
86162
86163 /**
86164 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
86165diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
86166index 387fa7d..3fcde6b 100644
86167--- a/include/linux/sysrq.h
86168+++ b/include/linux/sysrq.h
86169@@ -16,6 +16,7 @@
86170
86171 #include <linux/errno.h>
86172 #include <linux/types.h>
86173+#include <linux/compiler.h>
86174
86175 /* Possible values of bitmask for enabling sysrq functions */
86176 /* 0x0001 is reserved for enable everything */
86177@@ -33,7 +34,7 @@ struct sysrq_key_op {
86178 char *help_msg;
86179 char *action_msg;
86180 int enable_mask;
86181-};
86182+} __do_const;
86183
86184 #ifdef CONFIG_MAGIC_SYSRQ
86185
86186diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
86187index ff307b5..f1a4468 100644
86188--- a/include/linux/thread_info.h
86189+++ b/include/linux/thread_info.h
86190@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
86191 #error "no set_restore_sigmask() provided and default one won't work"
86192 #endif
86193
86194+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
86195+
86196+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
86197+{
86198+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
86199+}
86200+
86201 #endif /* __KERNEL__ */
86202
86203 #endif /* _LINUX_THREAD_INFO_H */
86204diff --git a/include/linux/tty.h b/include/linux/tty.h
86205index 358a337..8829c1f 100644
86206--- a/include/linux/tty.h
86207+++ b/include/linux/tty.h
86208@@ -225,7 +225,7 @@ struct tty_port {
86209 const struct tty_port_operations *ops; /* Port operations */
86210 spinlock_t lock; /* Lock protecting tty field */
86211 int blocked_open; /* Waiting to open */
86212- int count; /* Usage count */
86213+ atomic_t count; /* Usage count */
86214 wait_queue_head_t open_wait; /* Open waiters */
86215 wait_queue_head_t close_wait; /* Close waiters */
86216 wait_queue_head_t delta_msr_wait; /* Modem status change */
86217@@ -313,7 +313,7 @@ struct tty_struct {
86218 /* If the tty has a pending do_SAK, queue it here - akpm */
86219 struct work_struct SAK_work;
86220 struct tty_port *port;
86221-};
86222+} __randomize_layout;
86223
86224 /* Each of a tty's open files has private_data pointing to tty_file_private */
86225 struct tty_file_private {
86226@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
86227 struct tty_struct *tty, struct file *filp);
86228 static inline int tty_port_users(struct tty_port *port)
86229 {
86230- return port->count + port->blocked_open;
86231+ return atomic_read(&port->count) + port->blocked_open;
86232 }
86233
86234 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
86235diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
86236index 92e337c..f46757b 100644
86237--- a/include/linux/tty_driver.h
86238+++ b/include/linux/tty_driver.h
86239@@ -291,7 +291,7 @@ struct tty_operations {
86240 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
86241 #endif
86242 const struct file_operations *proc_fops;
86243-};
86244+} __do_const __randomize_layout;
86245
86246 struct tty_driver {
86247 int magic; /* magic number for this structure */
86248@@ -325,7 +325,7 @@ struct tty_driver {
86249
86250 const struct tty_operations *ops;
86251 struct list_head tty_drivers;
86252-};
86253+} __randomize_layout;
86254
86255 extern struct list_head tty_drivers;
86256
86257diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
86258index 00c9d68..bc0188b 100644
86259--- a/include/linux/tty_ldisc.h
86260+++ b/include/linux/tty_ldisc.h
86261@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
86262
86263 struct module *owner;
86264
86265- int refcount;
86266+ atomic_t refcount;
86267 };
86268
86269 struct tty_ldisc {
86270diff --git a/include/linux/types.h b/include/linux/types.h
86271index 6747247..fc7ec8b 100644
86272--- a/include/linux/types.h
86273+++ b/include/linux/types.h
86274@@ -174,10 +174,26 @@ typedef struct {
86275 int counter;
86276 } atomic_t;
86277
86278+#ifdef CONFIG_PAX_REFCOUNT
86279+typedef struct {
86280+ int counter;
86281+} atomic_unchecked_t;
86282+#else
86283+typedef atomic_t atomic_unchecked_t;
86284+#endif
86285+
86286 #ifdef CONFIG_64BIT
86287 typedef struct {
86288 long counter;
86289 } atomic64_t;
86290+
86291+#ifdef CONFIG_PAX_REFCOUNT
86292+typedef struct {
86293+ long counter;
86294+} atomic64_unchecked_t;
86295+#else
86296+typedef atomic64_t atomic64_unchecked_t;
86297+#endif
86298 #endif
86299
86300 struct list_head {
86301diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
86302index ecd3319..8a36ded 100644
86303--- a/include/linux/uaccess.h
86304+++ b/include/linux/uaccess.h
86305@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
86306 long ret; \
86307 mm_segment_t old_fs = get_fs(); \
86308 \
86309- set_fs(KERNEL_DS); \
86310 pagefault_disable(); \
86311- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
86312- pagefault_enable(); \
86313+ set_fs(KERNEL_DS); \
86314+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
86315 set_fs(old_fs); \
86316+ pagefault_enable(); \
86317 ret; \
86318 })
86319
86320diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
86321index 2d1f9b6..d7a9fce 100644
86322--- a/include/linux/uidgid.h
86323+++ b/include/linux/uidgid.h
86324@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
86325
86326 #endif /* CONFIG_USER_NS */
86327
86328+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
86329+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
86330+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
86331+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
86332+
86333 #endif /* _LINUX_UIDGID_H */
86334diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
86335index 32c0e83..671eb35 100644
86336--- a/include/linux/uio_driver.h
86337+++ b/include/linux/uio_driver.h
86338@@ -67,7 +67,7 @@ struct uio_device {
86339 struct module *owner;
86340 struct device *dev;
86341 int minor;
86342- atomic_t event;
86343+ atomic_unchecked_t event;
86344 struct fasync_struct *async_queue;
86345 wait_queue_head_t wait;
86346 struct uio_info *info;
86347diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
86348index 99c1b4d..562e6f3 100644
86349--- a/include/linux/unaligned/access_ok.h
86350+++ b/include/linux/unaligned/access_ok.h
86351@@ -4,34 +4,34 @@
86352 #include <linux/kernel.h>
86353 #include <asm/byteorder.h>
86354
86355-static inline u16 get_unaligned_le16(const void *p)
86356+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
86357 {
86358- return le16_to_cpup((__le16 *)p);
86359+ return le16_to_cpup((const __le16 *)p);
86360 }
86361
86362-static inline u32 get_unaligned_le32(const void *p)
86363+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
86364 {
86365- return le32_to_cpup((__le32 *)p);
86366+ return le32_to_cpup((const __le32 *)p);
86367 }
86368
86369-static inline u64 get_unaligned_le64(const void *p)
86370+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
86371 {
86372- return le64_to_cpup((__le64 *)p);
86373+ return le64_to_cpup((const __le64 *)p);
86374 }
86375
86376-static inline u16 get_unaligned_be16(const void *p)
86377+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
86378 {
86379- return be16_to_cpup((__be16 *)p);
86380+ return be16_to_cpup((const __be16 *)p);
86381 }
86382
86383-static inline u32 get_unaligned_be32(const void *p)
86384+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
86385 {
86386- return be32_to_cpup((__be32 *)p);
86387+ return be32_to_cpup((const __be32 *)p);
86388 }
86389
86390-static inline u64 get_unaligned_be64(const void *p)
86391+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
86392 {
86393- return be64_to_cpup((__be64 *)p);
86394+ return be64_to_cpup((const __be64 *)p);
86395 }
86396
86397 static inline void put_unaligned_le16(u16 val, void *p)
86398diff --git a/include/linux/usb.h b/include/linux/usb.h
86399index 7ee1b5c..82e2c1a 100644
86400--- a/include/linux/usb.h
86401+++ b/include/linux/usb.h
86402@@ -566,7 +566,7 @@ struct usb_device {
86403 int maxchild;
86404
86405 u32 quirks;
86406- atomic_t urbnum;
86407+ atomic_unchecked_t urbnum;
86408
86409 unsigned long active_duration;
86410
86411@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
86412
86413 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
86414 __u8 request, __u8 requesttype, __u16 value, __u16 index,
86415- void *data, __u16 size, int timeout);
86416+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
86417 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
86418 void *data, int len, int *actual_length, int timeout);
86419 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
86420diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
86421index 9fd9e48..e2c5f35 100644
86422--- a/include/linux/usb/renesas_usbhs.h
86423+++ b/include/linux/usb/renesas_usbhs.h
86424@@ -39,7 +39,7 @@ enum {
86425 */
86426 struct renesas_usbhs_driver_callback {
86427 int (*notify_hotplug)(struct platform_device *pdev);
86428-};
86429+} __no_const;
86430
86431 /*
86432 * callback functions for platform
86433diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
86434index 8297e5b..0dfae27 100644
86435--- a/include/linux/user_namespace.h
86436+++ b/include/linux/user_namespace.h
86437@@ -39,7 +39,7 @@ struct user_namespace {
86438 struct key *persistent_keyring_register;
86439 struct rw_semaphore persistent_keyring_register_sem;
86440 #endif
86441-};
86442+} __randomize_layout;
86443
86444 extern struct user_namespace init_user_ns;
86445
86446diff --git a/include/linux/utsname.h b/include/linux/utsname.h
86447index 5093f58..c103e58 100644
86448--- a/include/linux/utsname.h
86449+++ b/include/linux/utsname.h
86450@@ -25,7 +25,7 @@ struct uts_namespace {
86451 struct new_utsname name;
86452 struct user_namespace *user_ns;
86453 struct ns_common ns;
86454-};
86455+} __randomize_layout;
86456 extern struct uts_namespace init_uts_ns;
86457
86458 #ifdef CONFIG_UTS_NS
86459diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
86460index 6f8fbcf..4efc177 100644
86461--- a/include/linux/vermagic.h
86462+++ b/include/linux/vermagic.h
86463@@ -25,9 +25,42 @@
86464 #define MODULE_ARCH_VERMAGIC ""
86465 #endif
86466
86467+#ifdef CONFIG_PAX_REFCOUNT
86468+#define MODULE_PAX_REFCOUNT "REFCOUNT "
86469+#else
86470+#define MODULE_PAX_REFCOUNT ""
86471+#endif
86472+
86473+#ifdef CONSTIFY_PLUGIN
86474+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
86475+#else
86476+#define MODULE_CONSTIFY_PLUGIN ""
86477+#endif
86478+
86479+#ifdef STACKLEAK_PLUGIN
86480+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86481+#else
86482+#define MODULE_STACKLEAK_PLUGIN ""
86483+#endif
86484+
86485+#ifdef RANDSTRUCT_PLUGIN
86486+#include <generated/randomize_layout_hash.h>
86487+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86488+#else
86489+#define MODULE_RANDSTRUCT_PLUGIN
86490+#endif
86491+
86492+#ifdef CONFIG_GRKERNSEC
86493+#define MODULE_GRSEC "GRSEC "
86494+#else
86495+#define MODULE_GRSEC ""
86496+#endif
86497+
86498 #define VERMAGIC_STRING \
86499 UTS_RELEASE " " \
86500 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86501 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86502- MODULE_ARCH_VERMAGIC
86503+ MODULE_ARCH_VERMAGIC \
86504+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86505+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86506
86507diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86508index b483abd..af305ad 100644
86509--- a/include/linux/vga_switcheroo.h
86510+++ b/include/linux/vga_switcheroo.h
86511@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86512
86513 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86514
86515-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86516+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86517 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86518-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86519+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86520 #else
86521
86522 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86523@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86524
86525 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86526
86527-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86528+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86529 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86530-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86531+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86532
86533 #endif
86534 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86535diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86536index 0ec5983..cc61051 100644
86537--- a/include/linux/vmalloc.h
86538+++ b/include/linux/vmalloc.h
86539@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86540 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86541 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
86542 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
86543+
86544+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86545+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
86546+#endif
86547+
86548 /* bits [20..32] reserved for arch specific ioremap internals */
86549
86550 /*
86551@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86552 unsigned long flags, pgprot_t prot);
86553 extern void vunmap(const void *addr);
86554
86555+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86556+extern void unmap_process_stacks(struct task_struct *task);
86557+#endif
86558+
86559 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86560 unsigned long uaddr, void *kaddr,
86561 unsigned long size);
86562@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
86563
86564 /* for /dev/kmem */
86565 extern long vread(char *buf, char *addr, unsigned long count);
86566-extern long vwrite(char *buf, char *addr, unsigned long count);
86567+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86568
86569 /*
86570 * Internals. Dont't use..
86571diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86572index 82e7db7..f8ce3d0 100644
86573--- a/include/linux/vmstat.h
86574+++ b/include/linux/vmstat.h
86575@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86576 /*
86577 * Zone based page accounting with per cpu differentials.
86578 */
86579-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86580+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86581
86582 static inline void zone_page_state_add(long x, struct zone *zone,
86583 enum zone_stat_item item)
86584 {
86585- atomic_long_add(x, &zone->vm_stat[item]);
86586- atomic_long_add(x, &vm_stat[item]);
86587+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86588+ atomic_long_add_unchecked(x, &vm_stat[item]);
86589 }
86590
86591-static inline unsigned long global_page_state(enum zone_stat_item item)
86592+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86593 {
86594- long x = atomic_long_read(&vm_stat[item]);
86595+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86596 #ifdef CONFIG_SMP
86597 if (x < 0)
86598 x = 0;
86599@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86600 return x;
86601 }
86602
86603-static inline unsigned long zone_page_state(struct zone *zone,
86604+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86605 enum zone_stat_item item)
86606 {
86607- long x = atomic_long_read(&zone->vm_stat[item]);
86608+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86609 #ifdef CONFIG_SMP
86610 if (x < 0)
86611 x = 0;
86612@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86613 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86614 enum zone_stat_item item)
86615 {
86616- long x = atomic_long_read(&zone->vm_stat[item]);
86617+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86618
86619 #ifdef CONFIG_SMP
86620 int cpu;
86621@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86622
86623 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86624 {
86625- atomic_long_inc(&zone->vm_stat[item]);
86626- atomic_long_inc(&vm_stat[item]);
86627+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86628+ atomic_long_inc_unchecked(&vm_stat[item]);
86629 }
86630
86631 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86632 {
86633- atomic_long_dec(&zone->vm_stat[item]);
86634- atomic_long_dec(&vm_stat[item]);
86635+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86636+ atomic_long_dec_unchecked(&vm_stat[item]);
86637 }
86638
86639 static inline void __inc_zone_page_state(struct page *page,
86640diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86641index 91b0a68..0e9adf6 100644
86642--- a/include/linux/xattr.h
86643+++ b/include/linux/xattr.h
86644@@ -28,7 +28,7 @@ struct xattr_handler {
86645 size_t size, int handler_flags);
86646 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86647 size_t size, int flags, int handler_flags);
86648-};
86649+} __do_const;
86650
86651 struct xattr {
86652 const char *name;
86653@@ -37,6 +37,9 @@ struct xattr {
86654 };
86655
86656 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86657+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86658+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86659+#endif
86660 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86661 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86662 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86663diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86664index 92dbbd3..13ab0b3 100644
86665--- a/include/linux/zlib.h
86666+++ b/include/linux/zlib.h
86667@@ -31,6 +31,7 @@
86668 #define _ZLIB_H
86669
86670 #include <linux/zconf.h>
86671+#include <linux/compiler.h>
86672
86673 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86674 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86675@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86676
86677 /* basic functions */
86678
86679-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86680+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86681 /*
86682 Returns the number of bytes that needs to be allocated for a per-
86683 stream workspace with the specified parameters. A pointer to this
86684diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86685index 3e4fddf..5ec9104 100644
86686--- a/include/media/v4l2-dev.h
86687+++ b/include/media/v4l2-dev.h
86688@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86689 int (*mmap) (struct file *, struct vm_area_struct *);
86690 int (*open) (struct file *);
86691 int (*release) (struct file *);
86692-};
86693+} __do_const;
86694
86695 /*
86696 * Newer version of video_device, handled by videodev2.c
86697diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86698index ffb69da..040393e 100644
86699--- a/include/media/v4l2-device.h
86700+++ b/include/media/v4l2-device.h
86701@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86702 this function returns 0. If the name ends with a digit (e.g. cx18),
86703 then the name will be set to cx18-0 since cx180 looks really odd. */
86704 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86705- atomic_t *instance);
86706+ atomic_unchecked_t *instance);
86707
86708 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86709 Since the parent disappears this ensures that v4l2_dev doesn't have an
86710diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86711index 2a25dec..bf6dd8a 100644
86712--- a/include/net/9p/transport.h
86713+++ b/include/net/9p/transport.h
86714@@ -62,7 +62,7 @@ struct p9_trans_module {
86715 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86716 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86717 char *, char *, int , int, int, int);
86718-};
86719+} __do_const;
86720
86721 void v9fs_register_trans(struct p9_trans_module *m);
86722 void v9fs_unregister_trans(struct p9_trans_module *m);
86723diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86724index a175ba4..196eb8242 100644
86725--- a/include/net/af_unix.h
86726+++ b/include/net/af_unix.h
86727@@ -36,7 +36,7 @@ struct unix_skb_parms {
86728 u32 secid; /* Security ID */
86729 #endif
86730 u32 consumed;
86731-};
86732+} __randomize_layout;
86733
86734 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86735 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86736diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86737index 2239a37..a83461f 100644
86738--- a/include/net/bluetooth/l2cap.h
86739+++ b/include/net/bluetooth/l2cap.h
86740@@ -609,7 +609,7 @@ struct l2cap_ops {
86741 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86742 unsigned long hdr_len,
86743 unsigned long len, int nb);
86744-};
86745+} __do_const;
86746
86747 struct l2cap_conn {
86748 struct hci_conn *hcon;
86749diff --git a/include/net/bonding.h b/include/net/bonding.h
86750index fda6fee..dbdf83c 100644
86751--- a/include/net/bonding.h
86752+++ b/include/net/bonding.h
86753@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
86754
86755 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86756 {
86757- atomic_long_inc(&dev->tx_dropped);
86758+ atomic_long_inc_unchecked(&dev->tx_dropped);
86759 dev_kfree_skb_any(skb);
86760 }
86761
86762diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86763index f2ae33d..c457cf0 100644
86764--- a/include/net/caif/cfctrl.h
86765+++ b/include/net/caif/cfctrl.h
86766@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86767 void (*radioset_rsp)(void);
86768 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86769 struct cflayer *client_layer);
86770-};
86771+} __no_const;
86772
86773 /* Link Setup Parameters for CAIF-Links. */
86774 struct cfctrl_link_param {
86775@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86776 struct cfctrl {
86777 struct cfsrvl serv;
86778 struct cfctrl_rsp res;
86779- atomic_t req_seq_no;
86780- atomic_t rsp_seq_no;
86781+ atomic_unchecked_t req_seq_no;
86782+ atomic_unchecked_t rsp_seq_no;
86783 struct list_head list;
86784 /* Protects from simultaneous access to first_req list */
86785 spinlock_t info_list_lock;
86786diff --git a/include/net/flow.h b/include/net/flow.h
86787index 8109a15..504466d 100644
86788--- a/include/net/flow.h
86789+++ b/include/net/flow.h
86790@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86791
86792 void flow_cache_flush(struct net *net);
86793 void flow_cache_flush_deferred(struct net *net);
86794-extern atomic_t flow_cache_genid;
86795+extern atomic_unchecked_t flow_cache_genid;
86796
86797 #endif
86798diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86799index 0574abd..0f16881 100644
86800--- a/include/net/genetlink.h
86801+++ b/include/net/genetlink.h
86802@@ -130,7 +130,7 @@ struct genl_ops {
86803 u8 cmd;
86804 u8 internal_flags;
86805 u8 flags;
86806-};
86807+} __do_const;
86808
86809 int __genl_register_family(struct genl_family *family);
86810
86811diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86812index 0f712c0..cd762c4 100644
86813--- a/include/net/gro_cells.h
86814+++ b/include/net/gro_cells.h
86815@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86816 cell = this_cpu_ptr(gcells->cells);
86817
86818 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86819- atomic_long_inc(&dev->rx_dropped);
86820+ atomic_long_inc_unchecked(&dev->rx_dropped);
86821 kfree_skb(skb);
86822 return;
86823 }
86824diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86825index 5976bde..3a81660 100644
86826--- a/include/net/inet_connection_sock.h
86827+++ b/include/net/inet_connection_sock.h
86828@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86829 int (*bind_conflict)(const struct sock *sk,
86830 const struct inet_bind_bucket *tb, bool relax);
86831 void (*mtu_reduced)(struct sock *sk);
86832-};
86833+} __do_const;
86834
86835 /** inet_connection_sock - INET connection oriented sock
86836 *
86837diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86838index 80479ab..0c3f647 100644
86839--- a/include/net/inetpeer.h
86840+++ b/include/net/inetpeer.h
86841@@ -47,7 +47,7 @@ struct inet_peer {
86842 */
86843 union {
86844 struct {
86845- atomic_t rid; /* Frag reception counter */
86846+ atomic_unchecked_t rid; /* Frag reception counter */
86847 };
86848 struct rcu_head rcu;
86849 struct inet_peer *gc_next;
86850diff --git a/include/net/ip.h b/include/net/ip.h
86851index 6cc1eaf..14059b0 100644
86852--- a/include/net/ip.h
86853+++ b/include/net/ip.h
86854@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86855 }
86856 }
86857
86858-u32 ip_idents_reserve(u32 hash, int segs);
86859+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86860 void __ip_select_ident(struct iphdr *iph, int segs);
86861
86862 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86863diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86864index 5bd120e4..03fb812 100644
86865--- a/include/net/ip_fib.h
86866+++ b/include/net/ip_fib.h
86867@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86868
86869 #define FIB_RES_SADDR(net, res) \
86870 ((FIB_RES_NH(res).nh_saddr_genid == \
86871- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86872+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86873 FIB_RES_NH(res).nh_saddr : \
86874 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86875 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86876diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86877index 615b20b..fd4cbd8 100644
86878--- a/include/net/ip_vs.h
86879+++ b/include/net/ip_vs.h
86880@@ -534,7 +534,7 @@ struct ip_vs_conn {
86881 struct ip_vs_conn *control; /* Master control connection */
86882 atomic_t n_control; /* Number of controlled ones */
86883 struct ip_vs_dest *dest; /* real server */
86884- atomic_t in_pkts; /* incoming packet counter */
86885+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86886
86887 /* Packet transmitter for different forwarding methods. If it
86888 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86889@@ -682,7 +682,7 @@ struct ip_vs_dest {
86890 __be16 port; /* port number of the server */
86891 union nf_inet_addr addr; /* IP address of the server */
86892 volatile unsigned int flags; /* dest status flags */
86893- atomic_t conn_flags; /* flags to copy to conn */
86894+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86895 atomic_t weight; /* server weight */
86896
86897 atomic_t refcnt; /* reference counter */
86898@@ -928,11 +928,11 @@ struct netns_ipvs {
86899 /* ip_vs_lblc */
86900 int sysctl_lblc_expiration;
86901 struct ctl_table_header *lblc_ctl_header;
86902- struct ctl_table *lblc_ctl_table;
86903+ ctl_table_no_const *lblc_ctl_table;
86904 /* ip_vs_lblcr */
86905 int sysctl_lblcr_expiration;
86906 struct ctl_table_header *lblcr_ctl_header;
86907- struct ctl_table *lblcr_ctl_table;
86908+ ctl_table_no_const *lblcr_ctl_table;
86909 /* ip_vs_est */
86910 struct list_head est_list; /* estimator list */
86911 spinlock_t est_lock;
86912diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86913index 8d4f588..2e37ad2 100644
86914--- a/include/net/irda/ircomm_tty.h
86915+++ b/include/net/irda/ircomm_tty.h
86916@@ -33,6 +33,7 @@
86917 #include <linux/termios.h>
86918 #include <linux/timer.h>
86919 #include <linux/tty.h> /* struct tty_struct */
86920+#include <asm/local.h>
86921
86922 #include <net/irda/irias_object.h>
86923 #include <net/irda/ircomm_core.h>
86924diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86925index 714cc9a..ea05f3e 100644
86926--- a/include/net/iucv/af_iucv.h
86927+++ b/include/net/iucv/af_iucv.h
86928@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86929 struct iucv_sock_list {
86930 struct hlist_head head;
86931 rwlock_t lock;
86932- atomic_t autobind_name;
86933+ atomic_unchecked_t autobind_name;
86934 };
86935
86936 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86937diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86938index f3be818..bf46196 100644
86939--- a/include/net/llc_c_ac.h
86940+++ b/include/net/llc_c_ac.h
86941@@ -87,7 +87,7 @@
86942 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86943 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86944
86945-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86946+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86947
86948 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86949 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86950diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86951index 3948cf1..83b28c4 100644
86952--- a/include/net/llc_c_ev.h
86953+++ b/include/net/llc_c_ev.h
86954@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86955 return (struct llc_conn_state_ev *)skb->cb;
86956 }
86957
86958-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86959-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86960+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86961+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86962
86963 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86964 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86965diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86966index 48f3f89..0e92c50 100644
86967--- a/include/net/llc_c_st.h
86968+++ b/include/net/llc_c_st.h
86969@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86970 u8 next_state;
86971 const llc_conn_ev_qfyr_t *ev_qualifiers;
86972 const llc_conn_action_t *ev_actions;
86973-};
86974+} __do_const;
86975
86976 struct llc_conn_state {
86977 u8 current_state;
86978diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86979index a61b98c..aade1eb 100644
86980--- a/include/net/llc_s_ac.h
86981+++ b/include/net/llc_s_ac.h
86982@@ -23,7 +23,7 @@
86983 #define SAP_ACT_TEST_IND 9
86984
86985 /* All action functions must look like this */
86986-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86987+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86988
86989 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86990 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86991diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86992index c4359e2..76dbc4a 100644
86993--- a/include/net/llc_s_st.h
86994+++ b/include/net/llc_s_st.h
86995@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86996 llc_sap_ev_t ev;
86997 u8 next_state;
86998 const llc_sap_action_t *ev_actions;
86999-};
87000+} __do_const;
87001
87002 struct llc_sap_state {
87003 u8 curr_state;
87004diff --git a/include/net/mac80211.h b/include/net/mac80211.h
87005index d52914b..2b13cec 100644
87006--- a/include/net/mac80211.h
87007+++ b/include/net/mac80211.h
87008@@ -4915,7 +4915,7 @@ struct rate_control_ops {
87009 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
87010
87011 u32 (*get_expected_throughput)(void *priv_sta);
87012-};
87013+} __do_const;
87014
87015 static inline int rate_supported(struct ieee80211_sta *sta,
87016 enum ieee80211_band band,
87017diff --git a/include/net/neighbour.h b/include/net/neighbour.h
87018index 76f7084..8f36e39 100644
87019--- a/include/net/neighbour.h
87020+++ b/include/net/neighbour.h
87021@@ -163,7 +163,7 @@ struct neigh_ops {
87022 void (*error_report)(struct neighbour *, struct sk_buff *);
87023 int (*output)(struct neighbour *, struct sk_buff *);
87024 int (*connected_output)(struct neighbour *, struct sk_buff *);
87025-};
87026+} __do_const;
87027
87028 struct pneigh_entry {
87029 struct pneigh_entry *next;
87030@@ -217,7 +217,7 @@ struct neigh_table {
87031 struct neigh_statistics __percpu *stats;
87032 struct neigh_hash_table __rcu *nht;
87033 struct pneigh_entry **phash_buckets;
87034-};
87035+} __randomize_layout;
87036
87037 enum {
87038 NEIGH_ARP_TABLE = 0,
87039diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
87040index 36faf49..6927638 100644
87041--- a/include/net/net_namespace.h
87042+++ b/include/net/net_namespace.h
87043@@ -131,8 +131,8 @@ struct net {
87044 struct netns_ipvs *ipvs;
87045 #endif
87046 struct sock *diag_nlsk;
87047- atomic_t fnhe_genid;
87048-};
87049+ atomic_unchecked_t fnhe_genid;
87050+} __randomize_layout;
87051
87052 #include <linux/seq_file_net.h>
87053
87054@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
87055 #define __net_init __init
87056 #define __net_exit __exit_refok
87057 #define __net_initdata __initdata
87058+#ifdef CONSTIFY_PLUGIN
87059 #define __net_initconst __initconst
87060+#else
87061+#define __net_initconst __initdata
87062+#endif
87063 #endif
87064
87065 int peernet2id(struct net *net, struct net *peer);
87066@@ -301,7 +305,7 @@ struct pernet_operations {
87067 void (*exit_batch)(struct list_head *net_exit_list);
87068 int *id;
87069 size_t size;
87070-};
87071+} __do_const;
87072
87073 /*
87074 * Use these carefully. If you implement a network device and it
87075@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
87076
87077 static inline int rt_genid_ipv4(struct net *net)
87078 {
87079- return atomic_read(&net->ipv4.rt_genid);
87080+ return atomic_read_unchecked(&net->ipv4.rt_genid);
87081 }
87082
87083 static inline void rt_genid_bump_ipv4(struct net *net)
87084 {
87085- atomic_inc(&net->ipv4.rt_genid);
87086+ atomic_inc_unchecked(&net->ipv4.rt_genid);
87087 }
87088
87089 extern void (*__fib6_flush_trees)(struct net *net);
87090@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
87091
87092 static inline int fnhe_genid(struct net *net)
87093 {
87094- return atomic_read(&net->fnhe_genid);
87095+ return atomic_read_unchecked(&net->fnhe_genid);
87096 }
87097
87098 static inline void fnhe_genid_bump(struct net *net)
87099 {
87100- atomic_inc(&net->fnhe_genid);
87101+ atomic_inc_unchecked(&net->fnhe_genid);
87102 }
87103
87104 #endif /* __NET_NET_NAMESPACE_H */
87105diff --git a/include/net/netlink.h b/include/net/netlink.h
87106index e010ee8..405b9f4 100644
87107--- a/include/net/netlink.h
87108+++ b/include/net/netlink.h
87109@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
87110 {
87111 if (mark) {
87112 WARN_ON((unsigned char *) mark < skb->data);
87113- skb_trim(skb, (unsigned char *) mark - skb->data);
87114+ skb_trim(skb, (const unsigned char *) mark - skb->data);
87115 }
87116 }
87117
87118diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
87119index 29d6a94..235d3d84 100644
87120--- a/include/net/netns/conntrack.h
87121+++ b/include/net/netns/conntrack.h
87122@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
87123 struct nf_proto_net {
87124 #ifdef CONFIG_SYSCTL
87125 struct ctl_table_header *ctl_table_header;
87126- struct ctl_table *ctl_table;
87127+ ctl_table_no_const *ctl_table;
87128 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
87129 struct ctl_table_header *ctl_compat_header;
87130- struct ctl_table *ctl_compat_table;
87131+ ctl_table_no_const *ctl_compat_table;
87132 #endif
87133 #endif
87134 unsigned int users;
87135@@ -60,7 +60,7 @@ struct nf_ip_net {
87136 struct nf_icmp_net icmpv6;
87137 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
87138 struct ctl_table_header *ctl_table_header;
87139- struct ctl_table *ctl_table;
87140+ ctl_table_no_const *ctl_table;
87141 #endif
87142 };
87143
87144diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
87145index dbe2254..ed0c151 100644
87146--- a/include/net/netns/ipv4.h
87147+++ b/include/net/netns/ipv4.h
87148@@ -87,7 +87,7 @@ struct netns_ipv4 {
87149
87150 struct ping_group_range ping_group_range;
87151
87152- atomic_t dev_addr_genid;
87153+ atomic_unchecked_t dev_addr_genid;
87154
87155 #ifdef CONFIG_SYSCTL
87156 unsigned long *sysctl_local_reserved_ports;
87157@@ -101,6 +101,6 @@ struct netns_ipv4 {
87158 struct fib_rules_ops *mr_rules_ops;
87159 #endif
87160 #endif
87161- atomic_t rt_genid;
87162+ atomic_unchecked_t rt_genid;
87163 };
87164 #endif
87165diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
87166index 69ae41f..4f94868 100644
87167--- a/include/net/netns/ipv6.h
87168+++ b/include/net/netns/ipv6.h
87169@@ -75,8 +75,8 @@ struct netns_ipv6 {
87170 struct fib_rules_ops *mr6_rules_ops;
87171 #endif
87172 #endif
87173- atomic_t dev_addr_genid;
87174- atomic_t fib6_sernum;
87175+ atomic_unchecked_t dev_addr_genid;
87176+ atomic_unchecked_t fib6_sernum;
87177 };
87178
87179 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
87180diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
87181index 730d82a..045f2c4 100644
87182--- a/include/net/netns/xfrm.h
87183+++ b/include/net/netns/xfrm.h
87184@@ -78,7 +78,7 @@ struct netns_xfrm {
87185
87186 /* flow cache part */
87187 struct flow_cache flow_cache_global;
87188- atomic_t flow_cache_genid;
87189+ atomic_unchecked_t flow_cache_genid;
87190 struct list_head flow_cache_gc_list;
87191 spinlock_t flow_cache_gc_lock;
87192 struct work_struct flow_cache_gc_work;
87193diff --git a/include/net/ping.h b/include/net/ping.h
87194index cc16d41..664f40b 100644
87195--- a/include/net/ping.h
87196+++ b/include/net/ping.h
87197@@ -54,7 +54,7 @@ struct ping_iter_state {
87198
87199 extern struct proto ping_prot;
87200 #if IS_ENABLED(CONFIG_IPV6)
87201-extern struct pingv6_ops pingv6_ops;
87202+extern struct pingv6_ops *pingv6_ops;
87203 #endif
87204
87205 struct pingfakehdr {
87206diff --git a/include/net/protocol.h b/include/net/protocol.h
87207index d6fcc1f..ca277058 100644
87208--- a/include/net/protocol.h
87209+++ b/include/net/protocol.h
87210@@ -49,7 +49,7 @@ struct net_protocol {
87211 * socket lookup?
87212 */
87213 icmp_strict_tag_validation:1;
87214-};
87215+} __do_const;
87216
87217 #if IS_ENABLED(CONFIG_IPV6)
87218 struct inet6_protocol {
87219@@ -62,7 +62,7 @@ struct inet6_protocol {
87220 u8 type, u8 code, int offset,
87221 __be32 info);
87222 unsigned int flags; /* INET6_PROTO_xxx */
87223-};
87224+} __do_const;
87225
87226 #define INET6_PROTO_NOPOLICY 0x1
87227 #define INET6_PROTO_FINAL 0x2
87228diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
87229index 6c6d539..af70817 100644
87230--- a/include/net/rtnetlink.h
87231+++ b/include/net/rtnetlink.h
87232@@ -95,7 +95,7 @@ struct rtnl_link_ops {
87233 const struct net_device *dev,
87234 const struct net_device *slave_dev);
87235 struct net *(*get_link_net)(const struct net_device *dev);
87236-};
87237+} __do_const;
87238
87239 int __rtnl_link_register(struct rtnl_link_ops *ops);
87240 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
87241diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
87242index 4a5b9a3..ca27d73 100644
87243--- a/include/net/sctp/checksum.h
87244+++ b/include/net/sctp/checksum.h
87245@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
87246 unsigned int offset)
87247 {
87248 struct sctphdr *sh = sctp_hdr(skb);
87249- __le32 ret, old = sh->checksum;
87250- const struct skb_checksum_ops ops = {
87251+ __le32 ret, old = sh->checksum;
87252+ static const struct skb_checksum_ops ops = {
87253 .update = sctp_csum_update,
87254 .combine = sctp_csum_combine,
87255 };
87256diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
87257index 487ef34..d457f98 100644
87258--- a/include/net/sctp/sm.h
87259+++ b/include/net/sctp/sm.h
87260@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
87261 typedef struct {
87262 sctp_state_fn_t *fn;
87263 const char *name;
87264-} sctp_sm_table_entry_t;
87265+} __do_const sctp_sm_table_entry_t;
87266
87267 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
87268 * currently in use.
87269@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
87270 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
87271
87272 /* Extern declarations for major data structures. */
87273-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87274+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87275
87276
87277 /* Get the size of a DATA chunk payload. */
87278diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
87279index 2bb2fcf..d17c291 100644
87280--- a/include/net/sctp/structs.h
87281+++ b/include/net/sctp/structs.h
87282@@ -509,7 +509,7 @@ struct sctp_pf {
87283 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
87284 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
87285 struct sctp_af *af;
87286-};
87287+} __do_const;
87288
87289
87290 /* Structure to track chunk fragments that have been acked, but peer
87291diff --git a/include/net/sock.h b/include/net/sock.h
87292index e4079c2..79c5d3a 100644
87293--- a/include/net/sock.h
87294+++ b/include/net/sock.h
87295@@ -362,7 +362,7 @@ struct sock {
87296 unsigned int sk_napi_id;
87297 unsigned int sk_ll_usec;
87298 #endif
87299- atomic_t sk_drops;
87300+ atomic_unchecked_t sk_drops;
87301 int sk_rcvbuf;
87302
87303 struct sk_filter __rcu *sk_filter;
87304@@ -1039,7 +1039,7 @@ struct proto {
87305 void (*destroy_cgroup)(struct mem_cgroup *memcg);
87306 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
87307 #endif
87308-};
87309+} __randomize_layout;
87310
87311 /*
87312 * Bits in struct cg_proto.flags
87313@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
87314 page_counter_uncharge(&prot->memory_allocated, amt);
87315 }
87316
87317-static inline long
87318+static inline long __intentional_overflow(-1)
87319 sk_memory_allocated(const struct sock *sk)
87320 {
87321 struct proto *prot = sk->sk_prot;
87322@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
87323 }
87324
87325 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
87326- struct iov_iter *from, char *to,
87327+ struct iov_iter *from, unsigned char *to,
87328 int copy, int offset)
87329 {
87330 if (skb->ip_summed == CHECKSUM_NONE) {
87331@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
87332 }
87333 }
87334
87335-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87336+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87337
87338 /**
87339 * sk_page_frag - return an appropriate page_frag
87340diff --git a/include/net/tcp.h b/include/net/tcp.h
87341index 8d6b983..5813205 100644
87342--- a/include/net/tcp.h
87343+++ b/include/net/tcp.h
87344@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
87345 void tcp_xmit_retransmit_queue(struct sock *);
87346 void tcp_simple_retransmit(struct sock *);
87347 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
87348-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87349+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87350
87351 void tcp_send_probe0(struct sock *);
87352 void tcp_send_partial(struct sock *);
87353@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
87354 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
87355 */
87356 struct tcp_skb_cb {
87357- __u32 seq; /* Starting sequence number */
87358- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
87359+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
87360+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
87361 union {
87362 /* Note : tcp_tw_isn is used in input path only
87363 * (isn chosen by tcp_timewait_state_process())
87364@@ -720,7 +720,7 @@ struct tcp_skb_cb {
87365
87366 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
87367 /* 1 byte hole */
87368- __u32 ack_seq; /* Sequence number ACK'd */
87369+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
87370 union {
87371 struct inet_skb_parm h4;
87372 #if IS_ENABLED(CONFIG_IPV6)
87373diff --git a/include/net/xfrm.h b/include/net/xfrm.h
87374index dc4865e..152ee4c 100644
87375--- a/include/net/xfrm.h
87376+++ b/include/net/xfrm.h
87377@@ -285,7 +285,6 @@ struct xfrm_dst;
87378 struct xfrm_policy_afinfo {
87379 unsigned short family;
87380 struct dst_ops *dst_ops;
87381- void (*garbage_collect)(struct net *net);
87382 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
87383 const xfrm_address_t *saddr,
87384 const xfrm_address_t *daddr);
87385@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
87386 struct net_device *dev,
87387 const struct flowi *fl);
87388 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
87389-};
87390+} __do_const;
87391
87392 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
87393 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
87394@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
87395 int (*transport_finish)(struct sk_buff *skb,
87396 int async);
87397 void (*local_error)(struct sk_buff *skb, u32 mtu);
87398-};
87399+} __do_const;
87400
87401 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
87402 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
87403@@ -437,7 +436,7 @@ struct xfrm_mode {
87404 struct module *owner;
87405 unsigned int encap;
87406 int flags;
87407-};
87408+} __do_const;
87409
87410 /* Flags for xfrm_mode. */
87411 enum {
87412@@ -534,7 +533,7 @@ struct xfrm_policy {
87413 struct timer_list timer;
87414
87415 struct flow_cache_object flo;
87416- atomic_t genid;
87417+ atomic_unchecked_t genid;
87418 u32 priority;
87419 u32 index;
87420 struct xfrm_mark mark;
87421@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
87422 }
87423
87424 void xfrm_garbage_collect(struct net *net);
87425+void xfrm_garbage_collect_deferred(struct net *net);
87426
87427 #else
87428
87429@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
87430 static inline void xfrm_garbage_collect(struct net *net)
87431 {
87432 }
87433+static inline void xfrm_garbage_collect_deferred(struct net *net)
87434+{
87435+}
87436 #endif
87437
87438 static __inline__
87439diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
87440index 1017e0b..227aa4d 100644
87441--- a/include/rdma/iw_cm.h
87442+++ b/include/rdma/iw_cm.h
87443@@ -122,7 +122,7 @@ struct iw_cm_verbs {
87444 int backlog);
87445
87446 int (*destroy_listen)(struct iw_cm_id *cm_id);
87447-};
87448+} __no_const;
87449
87450 /**
87451 * iw_create_cm_id - Create an IW CM identifier.
87452diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
87453index 93d14da..734b3d8 100644
87454--- a/include/scsi/libfc.h
87455+++ b/include/scsi/libfc.h
87456@@ -771,6 +771,7 @@ struct libfc_function_template {
87457 */
87458 void (*disc_stop_final) (struct fc_lport *);
87459 };
87460+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
87461
87462 /**
87463 * struct fc_disc - Discovery context
87464@@ -875,7 +876,7 @@ struct fc_lport {
87465 struct fc_vport *vport;
87466
87467 /* Operational Information */
87468- struct libfc_function_template tt;
87469+ libfc_function_template_no_const tt;
87470 u8 link_up;
87471 u8 qfull;
87472 enum fc_lport_state state;
87473diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87474index a4c9336..d6f8f34 100644
87475--- a/include/scsi/scsi_device.h
87476+++ b/include/scsi/scsi_device.h
87477@@ -185,9 +185,9 @@ struct scsi_device {
87478 unsigned int max_device_blocked; /* what device_blocked counts down from */
87479 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87480
87481- atomic_t iorequest_cnt;
87482- atomic_t iodone_cnt;
87483- atomic_t ioerr_cnt;
87484+ atomic_unchecked_t iorequest_cnt;
87485+ atomic_unchecked_t iodone_cnt;
87486+ atomic_unchecked_t ioerr_cnt;
87487
87488 struct device sdev_gendev,
87489 sdev_dev;
87490diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87491index 007a0bc..7188db8 100644
87492--- a/include/scsi/scsi_transport_fc.h
87493+++ b/include/scsi/scsi_transport_fc.h
87494@@ -756,7 +756,8 @@ struct fc_function_template {
87495 unsigned long show_host_system_hostname:1;
87496
87497 unsigned long disable_target_scan:1;
87498-};
87499+} __do_const;
87500+typedef struct fc_function_template __no_const fc_function_template_no_const;
87501
87502
87503 /**
87504diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87505index f48089d..73abe48 100644
87506--- a/include/sound/compress_driver.h
87507+++ b/include/sound/compress_driver.h
87508@@ -130,7 +130,7 @@ struct snd_compr_ops {
87509 struct snd_compr_caps *caps);
87510 int (*get_codec_caps) (struct snd_compr_stream *stream,
87511 struct snd_compr_codec_caps *codec);
87512-};
87513+} __no_const;
87514
87515 /**
87516 * struct snd_compr: Compressed device
87517diff --git a/include/sound/soc.h b/include/sound/soc.h
87518index 0d1ade1..34e77d3 100644
87519--- a/include/sound/soc.h
87520+++ b/include/sound/soc.h
87521@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
87522 enum snd_soc_dapm_type, int);
87523
87524 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
87525-};
87526+} __do_const;
87527
87528 /* SoC platform interface */
87529 struct snd_soc_platform_driver {
87530@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
87531 const struct snd_compr_ops *compr_ops;
87532
87533 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87534-};
87535+} __do_const;
87536
87537 struct snd_soc_dai_link_component {
87538 const char *name;
87539diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87540index 672150b..9d4bec4 100644
87541--- a/include/target/target_core_base.h
87542+++ b/include/target/target_core_base.h
87543@@ -767,7 +767,7 @@ struct se_device {
87544 atomic_long_t write_bytes;
87545 /* Active commands on this virtual SE device */
87546 atomic_t simple_cmds;
87547- atomic_t dev_ordered_id;
87548+ atomic_unchecked_t dev_ordered_id;
87549 atomic_t dev_ordered_sync;
87550 atomic_t dev_qf_count;
87551 int export_count;
87552diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87553new file mode 100644
87554index 0000000..fb634b7
87555--- /dev/null
87556+++ b/include/trace/events/fs.h
87557@@ -0,0 +1,53 @@
87558+#undef TRACE_SYSTEM
87559+#define TRACE_SYSTEM fs
87560+
87561+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87562+#define _TRACE_FS_H
87563+
87564+#include <linux/fs.h>
87565+#include <linux/tracepoint.h>
87566+
87567+TRACE_EVENT(do_sys_open,
87568+
87569+ TP_PROTO(const char *filename, int flags, int mode),
87570+
87571+ TP_ARGS(filename, flags, mode),
87572+
87573+ TP_STRUCT__entry(
87574+ __string( filename, filename )
87575+ __field( int, flags )
87576+ __field( int, mode )
87577+ ),
87578+
87579+ TP_fast_assign(
87580+ __assign_str(filename, filename);
87581+ __entry->flags = flags;
87582+ __entry->mode = mode;
87583+ ),
87584+
87585+ TP_printk("\"%s\" %x %o",
87586+ __get_str(filename), __entry->flags, __entry->mode)
87587+);
87588+
87589+TRACE_EVENT(open_exec,
87590+
87591+ TP_PROTO(const char *filename),
87592+
87593+ TP_ARGS(filename),
87594+
87595+ TP_STRUCT__entry(
87596+ __string( filename, filename )
87597+ ),
87598+
87599+ TP_fast_assign(
87600+ __assign_str(filename, filename);
87601+ ),
87602+
87603+ TP_printk("\"%s\"",
87604+ __get_str(filename))
87605+);
87606+
87607+#endif /* _TRACE_FS_H */
87608+
87609+/* This part must be outside protection */
87610+#include <trace/define_trace.h>
87611diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87612index 3608beb..df39d8a 100644
87613--- a/include/trace/events/irq.h
87614+++ b/include/trace/events/irq.h
87615@@ -36,7 +36,7 @@ struct softirq_action;
87616 */
87617 TRACE_EVENT(irq_handler_entry,
87618
87619- TP_PROTO(int irq, struct irqaction *action),
87620+ TP_PROTO(int irq, const struct irqaction *action),
87621
87622 TP_ARGS(irq, action),
87623
87624@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87625 */
87626 TRACE_EVENT(irq_handler_exit,
87627
87628- TP_PROTO(int irq, struct irqaction *action, int ret),
87629+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87630
87631 TP_ARGS(irq, action, ret),
87632
87633diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87634index 7caf44c..23c6f27 100644
87635--- a/include/uapi/linux/a.out.h
87636+++ b/include/uapi/linux/a.out.h
87637@@ -39,6 +39,14 @@ enum machine_type {
87638 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87639 };
87640
87641+/* Constants for the N_FLAGS field */
87642+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87643+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87644+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87645+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87646+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87647+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87648+
87649 #if !defined (N_MAGIC)
87650 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87651 #endif
87652diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87653index 22b6ad3..aeba37e 100644
87654--- a/include/uapi/linux/bcache.h
87655+++ b/include/uapi/linux/bcache.h
87656@@ -5,6 +5,7 @@
87657 * Bcache on disk data structures
87658 */
87659
87660+#include <linux/compiler.h>
87661 #include <asm/types.h>
87662
87663 #define BITMASK(name, type, field, offset, size) \
87664@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87665 /* Btree keys - all units are in sectors */
87666
87667 struct bkey {
87668- __u64 high;
87669- __u64 low;
87670+ __u64 high __intentional_overflow(-1);
87671+ __u64 low __intentional_overflow(-1);
87672 __u64 ptr[];
87673 };
87674
87675diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87676index d876736..ccce5c0 100644
87677--- a/include/uapi/linux/byteorder/little_endian.h
87678+++ b/include/uapi/linux/byteorder/little_endian.h
87679@@ -42,51 +42,51 @@
87680
87681 static inline __le64 __cpu_to_le64p(const __u64 *p)
87682 {
87683- return (__force __le64)*p;
87684+ return (__force const __le64)*p;
87685 }
87686-static inline __u64 __le64_to_cpup(const __le64 *p)
87687+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87688 {
87689- return (__force __u64)*p;
87690+ return (__force const __u64)*p;
87691 }
87692 static inline __le32 __cpu_to_le32p(const __u32 *p)
87693 {
87694- return (__force __le32)*p;
87695+ return (__force const __le32)*p;
87696 }
87697 static inline __u32 __le32_to_cpup(const __le32 *p)
87698 {
87699- return (__force __u32)*p;
87700+ return (__force const __u32)*p;
87701 }
87702 static inline __le16 __cpu_to_le16p(const __u16 *p)
87703 {
87704- return (__force __le16)*p;
87705+ return (__force const __le16)*p;
87706 }
87707 static inline __u16 __le16_to_cpup(const __le16 *p)
87708 {
87709- return (__force __u16)*p;
87710+ return (__force const __u16)*p;
87711 }
87712 static inline __be64 __cpu_to_be64p(const __u64 *p)
87713 {
87714- return (__force __be64)__swab64p(p);
87715+ return (__force const __be64)__swab64p(p);
87716 }
87717 static inline __u64 __be64_to_cpup(const __be64 *p)
87718 {
87719- return __swab64p((__u64 *)p);
87720+ return __swab64p((const __u64 *)p);
87721 }
87722 static inline __be32 __cpu_to_be32p(const __u32 *p)
87723 {
87724- return (__force __be32)__swab32p(p);
87725+ return (__force const __be32)__swab32p(p);
87726 }
87727-static inline __u32 __be32_to_cpup(const __be32 *p)
87728+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87729 {
87730- return __swab32p((__u32 *)p);
87731+ return __swab32p((const __u32 *)p);
87732 }
87733 static inline __be16 __cpu_to_be16p(const __u16 *p)
87734 {
87735- return (__force __be16)__swab16p(p);
87736+ return (__force const __be16)__swab16p(p);
87737 }
87738 static inline __u16 __be16_to_cpup(const __be16 *p)
87739 {
87740- return __swab16p((__u16 *)p);
87741+ return __swab16p((const __u16 *)p);
87742 }
87743 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87744 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87745diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87746index 71e1d0e..6cc9caf 100644
87747--- a/include/uapi/linux/elf.h
87748+++ b/include/uapi/linux/elf.h
87749@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87750 #define PT_GNU_EH_FRAME 0x6474e550
87751
87752 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87753+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87754+
87755+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87756+
87757+/* Constants for the e_flags field */
87758+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87759+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87760+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87761+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87762+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87763+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87764
87765 /*
87766 * Extended Numbering
87767@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87768 #define DT_DEBUG 21
87769 #define DT_TEXTREL 22
87770 #define DT_JMPREL 23
87771+#define DT_FLAGS 30
87772+ #define DF_TEXTREL 0x00000004
87773 #define DT_ENCODING 32
87774 #define OLD_DT_LOOS 0x60000000
87775 #define DT_LOOS 0x6000000d
87776@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87777 #define PF_W 0x2
87778 #define PF_X 0x1
87779
87780+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87781+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87782+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87783+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87784+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87785+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87786+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87787+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87788+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87789+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87790+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87791+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87792+
87793 typedef struct elf32_phdr{
87794 Elf32_Word p_type;
87795 Elf32_Off p_offset;
87796@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87797 #define EI_OSABI 7
87798 #define EI_PAD 8
87799
87800+#define EI_PAX 14
87801+
87802 #define ELFMAG0 0x7f /* EI_MAG */
87803 #define ELFMAG1 'E'
87804 #define ELFMAG2 'L'
87805diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87806index aa169c4..6a2771d 100644
87807--- a/include/uapi/linux/personality.h
87808+++ b/include/uapi/linux/personality.h
87809@@ -30,6 +30,7 @@ enum {
87810 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87811 ADDR_NO_RANDOMIZE | \
87812 ADDR_COMPAT_LAYOUT | \
87813+ ADDR_LIMIT_3GB | \
87814 MMAP_PAGE_ZERO)
87815
87816 /*
87817diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87818index 7530e74..e714828 100644
87819--- a/include/uapi/linux/screen_info.h
87820+++ b/include/uapi/linux/screen_info.h
87821@@ -43,7 +43,8 @@ struct screen_info {
87822 __u16 pages; /* 0x32 */
87823 __u16 vesa_attributes; /* 0x34 */
87824 __u32 capabilities; /* 0x36 */
87825- __u8 _reserved[6]; /* 0x3a */
87826+ __u16 vesapm_size; /* 0x3a */
87827+ __u8 _reserved[4]; /* 0x3c */
87828 } __attribute__((packed));
87829
87830 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87831diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87832index 0e011eb..82681b1 100644
87833--- a/include/uapi/linux/swab.h
87834+++ b/include/uapi/linux/swab.h
87835@@ -43,7 +43,7 @@
87836 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87837 */
87838
87839-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87840+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87841 {
87842 #ifdef __HAVE_BUILTIN_BSWAP16__
87843 return __builtin_bswap16(val);
87844@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87845 #endif
87846 }
87847
87848-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87849+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87850 {
87851 #ifdef __HAVE_BUILTIN_BSWAP32__
87852 return __builtin_bswap32(val);
87853@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87854 #endif
87855 }
87856
87857-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87858+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87859 {
87860 #ifdef __HAVE_BUILTIN_BSWAP64__
87861 return __builtin_bswap64(val);
87862diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87863index 1590c49..5eab462 100644
87864--- a/include/uapi/linux/xattr.h
87865+++ b/include/uapi/linux/xattr.h
87866@@ -73,5 +73,9 @@
87867 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87868 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87869
87870+/* User namespace */
87871+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87872+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87873+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87874
87875 #endif /* _UAPI_LINUX_XATTR_H */
87876diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87877index f9466fa..f4e2b81 100644
87878--- a/include/video/udlfb.h
87879+++ b/include/video/udlfb.h
87880@@ -53,10 +53,10 @@ struct dlfb_data {
87881 u32 pseudo_palette[256];
87882 int blank_mode; /*one of FB_BLANK_ */
87883 /* blit-only rendering path metrics, exposed through sysfs */
87884- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87885- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87886- atomic_t bytes_sent; /* to usb, after compression including overhead */
87887- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87888+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87889+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87890+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87891+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87892 };
87893
87894 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87895diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87896index 30f5362..8ed8ac9 100644
87897--- a/include/video/uvesafb.h
87898+++ b/include/video/uvesafb.h
87899@@ -122,6 +122,7 @@ struct uvesafb_par {
87900 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87901 u8 pmi_setpal; /* PMI for palette changes */
87902 u16 *pmi_base; /* protected mode interface location */
87903+ u8 *pmi_code; /* protected mode code location */
87904 void *pmi_start;
87905 void *pmi_pal;
87906 u8 *vbe_state_orig; /*
87907diff --git a/init/Kconfig b/init/Kconfig
87908index f5dbc6d..8259396 100644
87909--- a/init/Kconfig
87910+++ b/init/Kconfig
87911@@ -1136,6 +1136,7 @@ endif # CGROUPS
87912
87913 config CHECKPOINT_RESTORE
87914 bool "Checkpoint/restore support" if EXPERT
87915+ depends on !GRKERNSEC
87916 default n
87917 help
87918 Enables additional kernel features in a sake of checkpoint/restore.
87919@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
87920
87921 config COMPAT_BRK
87922 bool "Disable heap randomization"
87923- default y
87924+ default n
87925 help
87926 Randomizing heap placement makes heap exploits harder, but it
87927 also breaks ancient binaries (including anything libc5 based).
87928@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
87929 config STOP_MACHINE
87930 bool
87931 default y
87932- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87933+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87934 help
87935 Need stop_machine() primitive.
87936
87937diff --git a/init/Makefile b/init/Makefile
87938index 7bc47ee..6da2dc7 100644
87939--- a/init/Makefile
87940+++ b/init/Makefile
87941@@ -2,6 +2,9 @@
87942 # Makefile for the linux kernel.
87943 #
87944
87945+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87946+asflags-y := $(GCC_PLUGINS_AFLAGS)
87947+
87948 obj-y := main.o version.o mounts.o
87949 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87950 obj-y += noinitramfs.o
87951diff --git a/init/do_mounts.c b/init/do_mounts.c
87952index eb41008..f5dbbf9 100644
87953--- a/init/do_mounts.c
87954+++ b/init/do_mounts.c
87955@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87956 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87957 {
87958 struct super_block *s;
87959- int err = sys_mount(name, "/root", fs, flags, data);
87960+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87961 if (err)
87962 return err;
87963
87964- sys_chdir("/root");
87965+ sys_chdir((const char __force_user *)"/root");
87966 s = current->fs->pwd.dentry->d_sb;
87967 ROOT_DEV = s->s_dev;
87968 printk(KERN_INFO
87969@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87970 va_start(args, fmt);
87971 vsprintf(buf, fmt, args);
87972 va_end(args);
87973- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87974+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87975 if (fd >= 0) {
87976 sys_ioctl(fd, FDEJECT, 0);
87977 sys_close(fd);
87978 }
87979 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87980- fd = sys_open("/dev/console", O_RDWR, 0);
87981+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87982 if (fd >= 0) {
87983 sys_ioctl(fd, TCGETS, (long)&termios);
87984 termios.c_lflag &= ~ICANON;
87985 sys_ioctl(fd, TCSETSF, (long)&termios);
87986- sys_read(fd, &c, 1);
87987+ sys_read(fd, (char __user *)&c, 1);
87988 termios.c_lflag |= ICANON;
87989 sys_ioctl(fd, TCSETSF, (long)&termios);
87990 sys_close(fd);
87991@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87992 mount_root();
87993 out:
87994 devtmpfs_mount("dev");
87995- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87996- sys_chroot(".");
87997+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87998+ sys_chroot((const char __force_user *)".");
87999 }
88000
88001 static bool is_tmpfs;
88002diff --git a/init/do_mounts.h b/init/do_mounts.h
88003index f5b978a..69dbfe8 100644
88004--- a/init/do_mounts.h
88005+++ b/init/do_mounts.h
88006@@ -15,15 +15,15 @@ extern int root_mountflags;
88007
88008 static inline int create_dev(char *name, dev_t dev)
88009 {
88010- sys_unlink(name);
88011- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
88012+ sys_unlink((char __force_user *)name);
88013+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
88014 }
88015
88016 #if BITS_PER_LONG == 32
88017 static inline u32 bstat(char *name)
88018 {
88019 struct stat64 stat;
88020- if (sys_stat64(name, &stat) != 0)
88021+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
88022 return 0;
88023 if (!S_ISBLK(stat.st_mode))
88024 return 0;
88025@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
88026 static inline u32 bstat(char *name)
88027 {
88028 struct stat stat;
88029- if (sys_newstat(name, &stat) != 0)
88030+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
88031 return 0;
88032 if (!S_ISBLK(stat.st_mode))
88033 return 0;
88034diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
88035index 3e0878e..8a9d7a0 100644
88036--- a/init/do_mounts_initrd.c
88037+++ b/init/do_mounts_initrd.c
88038@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
88039 {
88040 sys_unshare(CLONE_FS | CLONE_FILES);
88041 /* stdin/stdout/stderr for /linuxrc */
88042- sys_open("/dev/console", O_RDWR, 0);
88043+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
88044 sys_dup(0);
88045 sys_dup(0);
88046 /* move initrd over / and chdir/chroot in initrd root */
88047- sys_chdir("/root");
88048- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88049- sys_chroot(".");
88050+ sys_chdir((const char __force_user *)"/root");
88051+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88052+ sys_chroot((const char __force_user *)".");
88053 sys_setsid();
88054 return 0;
88055 }
88056@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
88057 create_dev("/dev/root.old", Root_RAM0);
88058 /* mount initrd on rootfs' /root */
88059 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
88060- sys_mkdir("/old", 0700);
88061- sys_chdir("/old");
88062+ sys_mkdir((const char __force_user *)"/old", 0700);
88063+ sys_chdir((const char __force_user *)"/old");
88064
88065 /* try loading default modules from initrd */
88066 load_default_modules();
88067@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
88068 current->flags &= ~PF_FREEZER_SKIP;
88069
88070 /* move initrd to rootfs' /old */
88071- sys_mount("..", ".", NULL, MS_MOVE, NULL);
88072+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
88073 /* switch root and cwd back to / of rootfs */
88074- sys_chroot("..");
88075+ sys_chroot((const char __force_user *)"..");
88076
88077 if (new_decode_dev(real_root_dev) == Root_RAM0) {
88078- sys_chdir("/old");
88079+ sys_chdir((const char __force_user *)"/old");
88080 return;
88081 }
88082
88083- sys_chdir("/");
88084+ sys_chdir((const char __force_user *)"/");
88085 ROOT_DEV = new_decode_dev(real_root_dev);
88086 mount_root();
88087
88088 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
88089- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
88090+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
88091 if (!error)
88092 printk("okay\n");
88093 else {
88094- int fd = sys_open("/dev/root.old", O_RDWR, 0);
88095+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
88096 if (error == -ENOENT)
88097 printk("/initrd does not exist. Ignored.\n");
88098 else
88099 printk("failed\n");
88100 printk(KERN_NOTICE "Unmounting old root\n");
88101- sys_umount("/old", MNT_DETACH);
88102+ sys_umount((char __force_user *)"/old", MNT_DETACH);
88103 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
88104 if (fd < 0) {
88105 error = fd;
88106@@ -127,11 +127,11 @@ int __init initrd_load(void)
88107 * mounted in the normal path.
88108 */
88109 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
88110- sys_unlink("/initrd.image");
88111+ sys_unlink((const char __force_user *)"/initrd.image");
88112 handle_initrd();
88113 return 1;
88114 }
88115 }
88116- sys_unlink("/initrd.image");
88117+ sys_unlink((const char __force_user *)"/initrd.image");
88118 return 0;
88119 }
88120diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
88121index 8cb6db5..d729f50 100644
88122--- a/init/do_mounts_md.c
88123+++ b/init/do_mounts_md.c
88124@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
88125 partitioned ? "_d" : "", minor,
88126 md_setup_args[ent].device_names);
88127
88128- fd = sys_open(name, 0, 0);
88129+ fd = sys_open((char __force_user *)name, 0, 0);
88130 if (fd < 0) {
88131 printk(KERN_ERR "md: open failed - cannot start "
88132 "array %s\n", name);
88133@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
88134 * array without it
88135 */
88136 sys_close(fd);
88137- fd = sys_open(name, 0, 0);
88138+ fd = sys_open((char __force_user *)name, 0, 0);
88139 sys_ioctl(fd, BLKRRPART, 0);
88140 }
88141 sys_close(fd);
88142@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
88143
88144 wait_for_device_probe();
88145
88146- fd = sys_open("/dev/md0", 0, 0);
88147+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
88148 if (fd >= 0) {
88149 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
88150 sys_close(fd);
88151diff --git a/init/init_task.c b/init/init_task.c
88152index ba0a7f36..2bcf1d5 100644
88153--- a/init/init_task.c
88154+++ b/init/init_task.c
88155@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
88156 * Initial thread structure. Alignment of this is handled by a special
88157 * linker map entry.
88158 */
88159+#ifdef CONFIG_X86
88160+union thread_union init_thread_union __init_task_data;
88161+#else
88162 union thread_union init_thread_union __init_task_data =
88163 { INIT_THREAD_INFO(init_task) };
88164+#endif
88165diff --git a/init/initramfs.c b/init/initramfs.c
88166index ad1bd77..dca2c1b 100644
88167--- a/init/initramfs.c
88168+++ b/init/initramfs.c
88169@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
88170
88171 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
88172 while (count) {
88173- ssize_t rv = sys_write(fd, p, count);
88174+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
88175
88176 if (rv < 0) {
88177 if (rv == -EINTR || rv == -EAGAIN)
88178@@ -107,7 +107,7 @@ static void __init free_hash(void)
88179 }
88180 }
88181
88182-static long __init do_utime(char *filename, time_t mtime)
88183+static long __init do_utime(char __force_user *filename, time_t mtime)
88184 {
88185 struct timespec t[2];
88186
88187@@ -142,7 +142,7 @@ static void __init dir_utime(void)
88188 struct dir_entry *de, *tmp;
88189 list_for_each_entry_safe(de, tmp, &dir_list, list) {
88190 list_del(&de->list);
88191- do_utime(de->name, de->mtime);
88192+ do_utime((char __force_user *)de->name, de->mtime);
88193 kfree(de->name);
88194 kfree(de);
88195 }
88196@@ -304,7 +304,7 @@ static int __init maybe_link(void)
88197 if (nlink >= 2) {
88198 char *old = find_link(major, minor, ino, mode, collected);
88199 if (old)
88200- return (sys_link(old, collected) < 0) ? -1 : 1;
88201+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
88202 }
88203 return 0;
88204 }
88205@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
88206 {
88207 struct stat st;
88208
88209- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
88210+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
88211 if (S_ISDIR(st.st_mode))
88212- sys_rmdir(path);
88213+ sys_rmdir((char __force_user *)path);
88214 else
88215- sys_unlink(path);
88216+ sys_unlink((char __force_user *)path);
88217 }
88218 }
88219
88220@@ -338,7 +338,7 @@ static int __init do_name(void)
88221 int openflags = O_WRONLY|O_CREAT;
88222 if (ml != 1)
88223 openflags |= O_TRUNC;
88224- wfd = sys_open(collected, openflags, mode);
88225+ wfd = sys_open((char __force_user *)collected, openflags, mode);
88226
88227 if (wfd >= 0) {
88228 sys_fchown(wfd, uid, gid);
88229@@ -350,17 +350,17 @@ static int __init do_name(void)
88230 }
88231 }
88232 } else if (S_ISDIR(mode)) {
88233- sys_mkdir(collected, mode);
88234- sys_chown(collected, uid, gid);
88235- sys_chmod(collected, mode);
88236+ sys_mkdir((char __force_user *)collected, mode);
88237+ sys_chown((char __force_user *)collected, uid, gid);
88238+ sys_chmod((char __force_user *)collected, mode);
88239 dir_add(collected, mtime);
88240 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
88241 S_ISFIFO(mode) || S_ISSOCK(mode)) {
88242 if (maybe_link() == 0) {
88243- sys_mknod(collected, mode, rdev);
88244- sys_chown(collected, uid, gid);
88245- sys_chmod(collected, mode);
88246- do_utime(collected, mtime);
88247+ sys_mknod((char __force_user *)collected, mode, rdev);
88248+ sys_chown((char __force_user *)collected, uid, gid);
88249+ sys_chmod((char __force_user *)collected, mode);
88250+ do_utime((char __force_user *)collected, mtime);
88251 }
88252 }
88253 return 0;
88254@@ -372,7 +372,7 @@ static int __init do_copy(void)
88255 if (xwrite(wfd, victim, body_len) != body_len)
88256 error("write error");
88257 sys_close(wfd);
88258- do_utime(vcollected, mtime);
88259+ do_utime((char __force_user *)vcollected, mtime);
88260 kfree(vcollected);
88261 eat(body_len);
88262 state = SkipIt;
88263@@ -390,9 +390,9 @@ static int __init do_symlink(void)
88264 {
88265 collected[N_ALIGN(name_len) + body_len] = '\0';
88266 clean_path(collected, 0);
88267- sys_symlink(collected + N_ALIGN(name_len), collected);
88268- sys_lchown(collected, uid, gid);
88269- do_utime(collected, mtime);
88270+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
88271+ sys_lchown((char __force_user *)collected, uid, gid);
88272+ do_utime((char __force_user *)collected, mtime);
88273 state = SkipIt;
88274 next_state = Reset;
88275 return 0;
88276diff --git a/init/main.c b/init/main.c
88277index 6f0f1c5f..a542824 100644
88278--- a/init/main.c
88279+++ b/init/main.c
88280@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
88281 static inline void mark_rodata_ro(void) { }
88282 #endif
88283
88284+extern void grsecurity_init(void);
88285+
88286 /*
88287 * Debug helper: via this flag we know that we are in 'early bootup code'
88288 * where only the boot processor is running with IRQ disabled. This means
88289@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
88290
88291 __setup("reset_devices", set_reset_devices);
88292
88293+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
88294+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
88295+static int __init setup_grsec_proc_gid(char *str)
88296+{
88297+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
88298+ return 1;
88299+}
88300+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
88301+#endif
88302+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
88303+int grsec_enable_sysfs_restrict = 1;
88304+static int __init setup_grsec_sysfs_restrict(char *str)
88305+{
88306+ if (!simple_strtol(str, NULL, 0))
88307+ grsec_enable_sysfs_restrict = 0;
88308+ return 1;
88309+}
88310+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
88311+#endif
88312+
88313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
88314+unsigned long pax_user_shadow_base __read_only;
88315+EXPORT_SYMBOL(pax_user_shadow_base);
88316+extern char pax_enter_kernel_user[];
88317+extern char pax_exit_kernel_user[];
88318+#endif
88319+
88320+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
88321+static int __init setup_pax_nouderef(char *str)
88322+{
88323+#ifdef CONFIG_X86_32
88324+ unsigned int cpu;
88325+ struct desc_struct *gdt;
88326+
88327+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
88328+ gdt = get_cpu_gdt_table(cpu);
88329+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
88330+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
88331+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
88332+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
88333+ }
88334+ loadsegment(ds, __KERNEL_DS);
88335+ loadsegment(es, __KERNEL_DS);
88336+ loadsegment(ss, __KERNEL_DS);
88337+#else
88338+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
88339+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
88340+ clone_pgd_mask = ~(pgdval_t)0UL;
88341+ pax_user_shadow_base = 0UL;
88342+ setup_clear_cpu_cap(X86_FEATURE_PCID);
88343+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
88344+#endif
88345+
88346+ return 0;
88347+}
88348+early_param("pax_nouderef", setup_pax_nouderef);
88349+
88350+#ifdef CONFIG_X86_64
88351+static int __init setup_pax_weakuderef(char *str)
88352+{
88353+ if (clone_pgd_mask != ~(pgdval_t)0UL)
88354+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
88355+ return 1;
88356+}
88357+__setup("pax_weakuderef", setup_pax_weakuderef);
88358+#endif
88359+#endif
88360+
88361+#ifdef CONFIG_PAX_SOFTMODE
88362+int pax_softmode;
88363+
88364+static int __init setup_pax_softmode(char *str)
88365+{
88366+ get_option(&str, &pax_softmode);
88367+ return 1;
88368+}
88369+__setup("pax_softmode=", setup_pax_softmode);
88370+#endif
88371+
88372 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
88373 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
88374 static const char *panic_later, *panic_param;
88375@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
88376 struct blacklist_entry *entry;
88377 char *fn_name;
88378
88379- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
88380+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
88381 if (!fn_name)
88382 return false;
88383
88384@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
88385 {
88386 int count = preempt_count();
88387 int ret;
88388- char msgbuf[64];
88389+ const char *msg1 = "", *msg2 = "";
88390
88391 if (initcall_blacklisted(fn))
88392 return -EPERM;
88393@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
88394 else
88395 ret = fn();
88396
88397- msgbuf[0] = 0;
88398-
88399 if (preempt_count() != count) {
88400- sprintf(msgbuf, "preemption imbalance ");
88401+ msg1 = " preemption imbalance";
88402 preempt_count_set(count);
88403 }
88404 if (irqs_disabled()) {
88405- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
88406+ msg2 = " disabled interrupts";
88407 local_irq_enable();
88408 }
88409- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
88410+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
88411
88412+ add_latent_entropy();
88413 return ret;
88414 }
88415
88416@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
88417 {
88418 argv_init[0] = init_filename;
88419 return do_execve(getname_kernel(init_filename),
88420- (const char __user *const __user *)argv_init,
88421- (const char __user *const __user *)envp_init);
88422+ (const char __user *const __force_user *)argv_init,
88423+ (const char __user *const __force_user *)envp_init);
88424 }
88425
88426 static int try_to_run_init_process(const char *init_filename)
88427@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
88428 return ret;
88429 }
88430
88431+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88432+extern int gr_init_ran;
88433+#endif
88434+
88435 static noinline void __init kernel_init_freeable(void);
88436
88437 static int __ref kernel_init(void *unused)
88438@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
88439 ramdisk_execute_command, ret);
88440 }
88441
88442+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88443+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
88444+ gr_init_ran = 1;
88445+#endif
88446+
88447 /*
88448 * We try each of these until one succeeds.
88449 *
88450@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
88451 do_basic_setup();
88452
88453 /* Open the /dev/console on the rootfs, this should never fail */
88454- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88455+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88456 pr_err("Warning: unable to open an initial console.\n");
88457
88458 (void) sys_dup(0);
88459@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
88460 if (!ramdisk_execute_command)
88461 ramdisk_execute_command = "/init";
88462
88463- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88464+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88465 ramdisk_execute_command = NULL;
88466 prepare_namespace();
88467 }
88468
88469+ grsecurity_init();
88470+
88471 /*
88472 * Ok, we have completed the initial bootup, and
88473 * we're essentially up and running. Get rid of the
88474diff --git a/ipc/compat.c b/ipc/compat.c
88475index 9b3c85f..1c4d897 100644
88476--- a/ipc/compat.c
88477+++ b/ipc/compat.c
88478@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88479 COMPAT_SHMLBA);
88480 if (err < 0)
88481 return err;
88482- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88483+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88484 }
88485 case SHMDT:
88486 return sys_shmdt(compat_ptr(ptr));
88487diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88488index 8ad93c2..efd80f8 100644
88489--- a/ipc/ipc_sysctl.c
88490+++ b/ipc/ipc_sysctl.c
88491@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88492 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88493 void __user *buffer, size_t *lenp, loff_t *ppos)
88494 {
88495- struct ctl_table ipc_table;
88496+ ctl_table_no_const ipc_table;
88497
88498 memcpy(&ipc_table, table, sizeof(ipc_table));
88499 ipc_table.data = get_ipc(table);
88500@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88501 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88502 void __user *buffer, size_t *lenp, loff_t *ppos)
88503 {
88504- struct ctl_table ipc_table;
88505+ ctl_table_no_const ipc_table;
88506
88507 memcpy(&ipc_table, table, sizeof(ipc_table));
88508 ipc_table.data = get_ipc(table);
88509@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88510 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88511 void __user *buffer, size_t *lenp, loff_t *ppos)
88512 {
88513- struct ctl_table ipc_table;
88514+ ctl_table_no_const ipc_table;
88515 memcpy(&ipc_table, table, sizeof(ipc_table));
88516 ipc_table.data = get_ipc(table);
88517
88518@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88519 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
88520 void __user *buffer, size_t *lenp, loff_t *ppos)
88521 {
88522- struct ctl_table ipc_table;
88523+ ctl_table_no_const ipc_table;
88524 int dummy = 0;
88525
88526 memcpy(&ipc_table, table, sizeof(ipc_table));
88527diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88528index 68d4e95..1477ded 100644
88529--- a/ipc/mq_sysctl.c
88530+++ b/ipc/mq_sysctl.c
88531@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88532 static int proc_mq_dointvec(struct ctl_table *table, int write,
88533 void __user *buffer, size_t *lenp, loff_t *ppos)
88534 {
88535- struct ctl_table mq_table;
88536+ ctl_table_no_const mq_table;
88537 memcpy(&mq_table, table, sizeof(mq_table));
88538 mq_table.data = get_mq(table);
88539
88540@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88541 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88542 void __user *buffer, size_t *lenp, loff_t *ppos)
88543 {
88544- struct ctl_table mq_table;
88545+ ctl_table_no_const mq_table;
88546 memcpy(&mq_table, table, sizeof(mq_table));
88547 mq_table.data = get_mq(table);
88548
88549diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88550index 7635a1c..7432cb6 100644
88551--- a/ipc/mqueue.c
88552+++ b/ipc/mqueue.c
88553@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88554 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88555 info->attr.mq_msgsize);
88556
88557+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88558 spin_lock(&mq_lock);
88559 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88560 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88561diff --git a/ipc/shm.c b/ipc/shm.c
88562index 19633b4..d454904 100644
88563--- a/ipc/shm.c
88564+++ b/ipc/shm.c
88565@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88566 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88567 #endif
88568
88569+#ifdef CONFIG_GRKERNSEC
88570+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88571+ const u64 shm_createtime, const kuid_t cuid,
88572+ const int shmid);
88573+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88574+ const u64 shm_createtime);
88575+#endif
88576+
88577 void shm_init_ns(struct ipc_namespace *ns)
88578 {
88579 ns->shm_ctlmax = SHMMAX;
88580@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88581 shp->shm_lprid = 0;
88582 shp->shm_atim = shp->shm_dtim = 0;
88583 shp->shm_ctim = get_seconds();
88584+#ifdef CONFIG_GRKERNSEC
88585+ shp->shm_createtime = ktime_get_ns();
88586+#endif
88587 shp->shm_segsz = size;
88588 shp->shm_nattch = 0;
88589 shp->shm_file = file;
88590@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88591 f_mode = FMODE_READ | FMODE_WRITE;
88592 }
88593 if (shmflg & SHM_EXEC) {
88594+
88595+#ifdef CONFIG_PAX_MPROTECT
88596+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88597+ goto out;
88598+#endif
88599+
88600 prot |= PROT_EXEC;
88601 acc_mode |= S_IXUGO;
88602 }
88603@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88604 if (err)
88605 goto out_unlock;
88606
88607+#ifdef CONFIG_GRKERNSEC
88608+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88609+ shp->shm_perm.cuid, shmid) ||
88610+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88611+ err = -EACCES;
88612+ goto out_unlock;
88613+ }
88614+#endif
88615+
88616 ipc_lock_object(&shp->shm_perm);
88617
88618 /* check if shm_destroy() is tearing down shp */
88619@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88620 path = shp->shm_file->f_path;
88621 path_get(&path);
88622 shp->shm_nattch++;
88623+#ifdef CONFIG_GRKERNSEC
88624+ shp->shm_lapid = current->pid;
88625+#endif
88626 size = i_size_read(path.dentry->d_inode);
88627 ipc_unlock_object(&shp->shm_perm);
88628 rcu_read_unlock();
88629diff --git a/ipc/util.c b/ipc/util.c
88630index 106bed0..f851429 100644
88631--- a/ipc/util.c
88632+++ b/ipc/util.c
88633@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88634 int (*show)(struct seq_file *, void *);
88635 };
88636
88637+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88638+
88639 /**
88640 * ipc_init - initialise ipc subsystem
88641 *
88642@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88643 granted_mode >>= 6;
88644 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88645 granted_mode >>= 3;
88646+
88647+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88648+ return -1;
88649+
88650 /* is there some bit set in requested_mode but not in granted_mode? */
88651 if ((requested_mode & ~granted_mode & 0007) &&
88652 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88653diff --git a/kernel/audit.c b/kernel/audit.c
88654index 72ab759..757deba 100644
88655--- a/kernel/audit.c
88656+++ b/kernel/audit.c
88657@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88658 3) suppressed due to audit_rate_limit
88659 4) suppressed due to audit_backlog_limit
88660 */
88661-static atomic_t audit_lost = ATOMIC_INIT(0);
88662+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88663
88664 /* The netlink socket. */
88665 static struct sock *audit_sock;
88666@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88667 unsigned long now;
88668 int print;
88669
88670- atomic_inc(&audit_lost);
88671+ atomic_inc_unchecked(&audit_lost);
88672
88673 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88674
88675@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88676 if (print) {
88677 if (printk_ratelimit())
88678 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88679- atomic_read(&audit_lost),
88680+ atomic_read_unchecked(&audit_lost),
88681 audit_rate_limit,
88682 audit_backlog_limit);
88683 audit_panic(message);
88684@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88685 s.pid = audit_pid;
88686 s.rate_limit = audit_rate_limit;
88687 s.backlog_limit = audit_backlog_limit;
88688- s.lost = atomic_read(&audit_lost);
88689+ s.lost = atomic_read_unchecked(&audit_lost);
88690 s.backlog = skb_queue_len(&audit_skb_queue);
88691 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88692 s.backlog_wait_time = audit_backlog_wait_time;
88693diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88694index dc4ae70..2a2bddc 100644
88695--- a/kernel/auditsc.c
88696+++ b/kernel/auditsc.c
88697@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88698 }
88699
88700 /* global counter which is incremented every time something logs in */
88701-static atomic_t session_id = ATOMIC_INIT(0);
88702+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88703
88704 static int audit_set_loginuid_perm(kuid_t loginuid)
88705 {
88706@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
88707
88708 /* are we setting or clearing? */
88709 if (uid_valid(loginuid))
88710- sessionid = (unsigned int)atomic_inc_return(&session_id);
88711+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88712
88713 task->sessionid = sessionid;
88714 task->loginuid = loginuid;
88715diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88716index a64e7a2..2e69448 100644
88717--- a/kernel/bpf/core.c
88718+++ b/kernel/bpf/core.c
88719@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88720 * random section of illegal instructions.
88721 */
88722 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88723- hdr = module_alloc(size);
88724+ hdr = module_alloc_exec(size);
88725 if (hdr == NULL)
88726 return NULL;
88727
88728 /* Fill space with illegal/arch-dep instructions. */
88729 bpf_fill_ill_insns(hdr, size);
88730
88731+ pax_open_kernel();
88732 hdr->pages = size / PAGE_SIZE;
88733+ pax_close_kernel();
88734+
88735 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88736 PAGE_SIZE - sizeof(*hdr));
88737 start = (prandom_u32() % hole) & ~(alignment - 1);
88738@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88739
88740 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88741 {
88742- module_memfree(hdr);
88743+ module_memfree_exec(hdr);
88744 }
88745 #endif /* CONFIG_BPF_JIT */
88746
88747diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88748index 536edc2..d28c85d 100644
88749--- a/kernel/bpf/syscall.c
88750+++ b/kernel/bpf/syscall.c
88751@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88752 int err;
88753
88754 /* the syscall is limited to root temporarily. This restriction will be
88755- * lifted when security audit is clean. Note that eBPF+tracing must have
88756- * this restriction, since it may pass kernel data to user space
88757+ * lifted by upstream when a half-assed security audit is clean. Note
88758+ * that eBPF+tracing must have this restriction, since it may pass
88759+ * kernel data to user space
88760 */
88761 if (!capable(CAP_SYS_ADMIN))
88762 return -EPERM;
88763+#ifdef CONFIG_GRKERNSEC
88764+ return -EPERM;
88765+#endif
88766
88767 if (!access_ok(VERIFY_READ, uattr, 1))
88768 return -EFAULT;
88769diff --git a/kernel/capability.c b/kernel/capability.c
88770index 989f5bf..d317ca0 100644
88771--- a/kernel/capability.c
88772+++ b/kernel/capability.c
88773@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88774 * before modification is attempted and the application
88775 * fails.
88776 */
88777+ if (tocopy > ARRAY_SIZE(kdata))
88778+ return -EFAULT;
88779+
88780 if (copy_to_user(dataptr, kdata, tocopy
88781 * sizeof(struct __user_cap_data_struct))) {
88782 return -EFAULT;
88783@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88784 int ret;
88785
88786 rcu_read_lock();
88787- ret = security_capable(__task_cred(t), ns, cap);
88788+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88789+ gr_task_is_capable(t, __task_cred(t), cap);
88790 rcu_read_unlock();
88791
88792- return (ret == 0);
88793+ return ret;
88794 }
88795
88796 /**
88797@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88798 int ret;
88799
88800 rcu_read_lock();
88801- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88802+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88803 rcu_read_unlock();
88804
88805- return (ret == 0);
88806+ return ret;
88807 }
88808
88809 /**
88810@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88811 BUG();
88812 }
88813
88814- if (security_capable(current_cred(), ns, cap) == 0) {
88815+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88816 current->flags |= PF_SUPERPRIV;
88817 return true;
88818 }
88819@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88820 }
88821 EXPORT_SYMBOL(ns_capable);
88822
88823+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88824+{
88825+ if (unlikely(!cap_valid(cap))) {
88826+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88827+ BUG();
88828+ }
88829+
88830+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88831+ current->flags |= PF_SUPERPRIV;
88832+ return true;
88833+ }
88834+ return false;
88835+}
88836+EXPORT_SYMBOL(ns_capable_nolog);
88837+
88838 /**
88839 * file_ns_capable - Determine if the file's opener had a capability in effect
88840 * @file: The file we want to check
88841@@ -427,6 +446,12 @@ bool capable(int cap)
88842 }
88843 EXPORT_SYMBOL(capable);
88844
88845+bool capable_nolog(int cap)
88846+{
88847+ return ns_capable_nolog(&init_user_ns, cap);
88848+}
88849+EXPORT_SYMBOL(capable_nolog);
88850+
88851 /**
88852 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88853 * @inode: The inode in question
88854@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88855 kgid_has_mapping(ns, inode->i_gid);
88856 }
88857 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88858+
88859+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88860+{
88861+ struct user_namespace *ns = current_user_ns();
88862+
88863+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88864+ kgid_has_mapping(ns, inode->i_gid);
88865+}
88866+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88867diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88868index 29a7b2c..a64e30a 100644
88869--- a/kernel/cgroup.c
88870+++ b/kernel/cgroup.c
88871@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
88872 if (!pathbuf || !agentbuf)
88873 goto out;
88874
88875+ if (agentbuf[0] == '\0')
88876+ goto out;
88877+
88878 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88879 if (!path)
88880 goto out;
88881@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88882 struct task_struct *task;
88883 int count = 0;
88884
88885- seq_printf(seq, "css_set %p\n", cset);
88886+ seq_printf(seq, "css_set %pK\n", cset);
88887
88888 list_for_each_entry(task, &cset->tasks, cg_list) {
88889 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88890diff --git a/kernel/compat.c b/kernel/compat.c
88891index 24f0061..ea80802 100644
88892--- a/kernel/compat.c
88893+++ b/kernel/compat.c
88894@@ -13,6 +13,7 @@
88895
88896 #include <linux/linkage.h>
88897 #include <linux/compat.h>
88898+#include <linux/module.h>
88899 #include <linux/errno.h>
88900 #include <linux/time.h>
88901 #include <linux/signal.h>
88902@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88903 mm_segment_t oldfs;
88904 long ret;
88905
88906- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88907+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88908 oldfs = get_fs();
88909 set_fs(KERNEL_DS);
88910 ret = hrtimer_nanosleep_restart(restart);
88911@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88912 oldfs = get_fs();
88913 set_fs(KERNEL_DS);
88914 ret = hrtimer_nanosleep(&tu,
88915- rmtp ? (struct timespec __user *)&rmt : NULL,
88916+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88917 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88918 set_fs(oldfs);
88919
88920@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88921 mm_segment_t old_fs = get_fs();
88922
88923 set_fs(KERNEL_DS);
88924- ret = sys_sigpending((old_sigset_t __user *) &s);
88925+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88926 set_fs(old_fs);
88927 if (ret == 0)
88928 ret = put_user(s, set);
88929@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88930 mm_segment_t old_fs = get_fs();
88931
88932 set_fs(KERNEL_DS);
88933- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88934+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88935 set_fs(old_fs);
88936
88937 if (!ret) {
88938@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88939 set_fs (KERNEL_DS);
88940 ret = sys_wait4(pid,
88941 (stat_addr ?
88942- (unsigned int __user *) &status : NULL),
88943- options, (struct rusage __user *) &r);
88944+ (unsigned int __force_user *) &status : NULL),
88945+ options, (struct rusage __force_user *) &r);
88946 set_fs (old_fs);
88947
88948 if (ret > 0) {
88949@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88950 memset(&info, 0, sizeof(info));
88951
88952 set_fs(KERNEL_DS);
88953- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88954- uru ? (struct rusage __user *)&ru : NULL);
88955+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88956+ uru ? (struct rusage __force_user *)&ru : NULL);
88957 set_fs(old_fs);
88958
88959 if ((ret < 0) || (info.si_signo == 0))
88960@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88961 oldfs = get_fs();
88962 set_fs(KERNEL_DS);
88963 err = sys_timer_settime(timer_id, flags,
88964- (struct itimerspec __user *) &newts,
88965- (struct itimerspec __user *) &oldts);
88966+ (struct itimerspec __force_user *) &newts,
88967+ (struct itimerspec __force_user *) &oldts);
88968 set_fs(oldfs);
88969 if (!err && old && put_compat_itimerspec(old, &oldts))
88970 return -EFAULT;
88971@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88972 oldfs = get_fs();
88973 set_fs(KERNEL_DS);
88974 err = sys_timer_gettime(timer_id,
88975- (struct itimerspec __user *) &ts);
88976+ (struct itimerspec __force_user *) &ts);
88977 set_fs(oldfs);
88978 if (!err && put_compat_itimerspec(setting, &ts))
88979 return -EFAULT;
88980@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88981 oldfs = get_fs();
88982 set_fs(KERNEL_DS);
88983 err = sys_clock_settime(which_clock,
88984- (struct timespec __user *) &ts);
88985+ (struct timespec __force_user *) &ts);
88986 set_fs(oldfs);
88987 return err;
88988 }
88989@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88990 oldfs = get_fs();
88991 set_fs(KERNEL_DS);
88992 err = sys_clock_gettime(which_clock,
88993- (struct timespec __user *) &ts);
88994+ (struct timespec __force_user *) &ts);
88995 set_fs(oldfs);
88996 if (!err && compat_put_timespec(&ts, tp))
88997 return -EFAULT;
88998@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88999
89000 oldfs = get_fs();
89001 set_fs(KERNEL_DS);
89002- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
89003+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
89004 set_fs(oldfs);
89005
89006 err = compat_put_timex(utp, &txc);
89007@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
89008 oldfs = get_fs();
89009 set_fs(KERNEL_DS);
89010 err = sys_clock_getres(which_clock,
89011- (struct timespec __user *) &ts);
89012+ (struct timespec __force_user *) &ts);
89013 set_fs(oldfs);
89014 if (!err && tp && compat_put_timespec(&ts, tp))
89015 return -EFAULT;
89016@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
89017 struct timespec tu;
89018 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
89019
89020- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
89021+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
89022 oldfs = get_fs();
89023 set_fs(KERNEL_DS);
89024 err = clock_nanosleep_restart(restart);
89025@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
89026 oldfs = get_fs();
89027 set_fs(KERNEL_DS);
89028 err = sys_clock_nanosleep(which_clock, flags,
89029- (struct timespec __user *) &in,
89030- (struct timespec __user *) &out);
89031+ (struct timespec __force_user *) &in,
89032+ (struct timespec __force_user *) &out);
89033 set_fs(oldfs);
89034
89035 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
89036@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
89037 mm_segment_t old_fs = get_fs();
89038
89039 set_fs(KERNEL_DS);
89040- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
89041+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
89042 set_fs(old_fs);
89043 if (compat_put_timespec(&t, interval))
89044 return -EFAULT;
89045diff --git a/kernel/configs.c b/kernel/configs.c
89046index c18b1f1..b9a0132 100644
89047--- a/kernel/configs.c
89048+++ b/kernel/configs.c
89049@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
89050 struct proc_dir_entry *entry;
89051
89052 /* create the current config file */
89053+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
89054+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
89055+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
89056+ &ikconfig_file_ops);
89057+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89058+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
89059+ &ikconfig_file_ops);
89060+#endif
89061+#else
89062 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
89063 &ikconfig_file_ops);
89064+#endif
89065+
89066 if (!entry)
89067 return -ENOMEM;
89068
89069diff --git a/kernel/cred.c b/kernel/cred.c
89070index e0573a4..26c0fd3 100644
89071--- a/kernel/cred.c
89072+++ b/kernel/cred.c
89073@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
89074 validate_creds(cred);
89075 alter_cred_subscribers(cred, -1);
89076 put_cred(cred);
89077+
89078+#ifdef CONFIG_GRKERNSEC_SETXID
89079+ cred = (struct cred *) tsk->delayed_cred;
89080+ if (cred != NULL) {
89081+ tsk->delayed_cred = NULL;
89082+ validate_creds(cred);
89083+ alter_cred_subscribers(cred, -1);
89084+ put_cred(cred);
89085+ }
89086+#endif
89087 }
89088
89089 /**
89090@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
89091 * Always returns 0 thus allowing this function to be tail-called at the end
89092 * of, say, sys_setgid().
89093 */
89094-int commit_creds(struct cred *new)
89095+static int __commit_creds(struct cred *new)
89096 {
89097 struct task_struct *task = current;
89098 const struct cred *old = task->real_cred;
89099@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
89100
89101 get_cred(new); /* we will require a ref for the subj creds too */
89102
89103+ gr_set_role_label(task, new->uid, new->gid);
89104+
89105 /* dumpability changes */
89106 if (!uid_eq(old->euid, new->euid) ||
89107 !gid_eq(old->egid, new->egid) ||
89108@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
89109 put_cred(old);
89110 return 0;
89111 }
89112+#ifdef CONFIG_GRKERNSEC_SETXID
89113+extern int set_user(struct cred *new);
89114+
89115+void gr_delayed_cred_worker(void)
89116+{
89117+ const struct cred *new = current->delayed_cred;
89118+ struct cred *ncred;
89119+
89120+ current->delayed_cred = NULL;
89121+
89122+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
89123+ // from doing get_cred on it when queueing this
89124+ put_cred(new);
89125+ return;
89126+ } else if (new == NULL)
89127+ return;
89128+
89129+ ncred = prepare_creds();
89130+ if (!ncred)
89131+ goto die;
89132+ // uids
89133+ ncred->uid = new->uid;
89134+ ncred->euid = new->euid;
89135+ ncred->suid = new->suid;
89136+ ncred->fsuid = new->fsuid;
89137+ // gids
89138+ ncred->gid = new->gid;
89139+ ncred->egid = new->egid;
89140+ ncred->sgid = new->sgid;
89141+ ncred->fsgid = new->fsgid;
89142+ // groups
89143+ set_groups(ncred, new->group_info);
89144+ // caps
89145+ ncred->securebits = new->securebits;
89146+ ncred->cap_inheritable = new->cap_inheritable;
89147+ ncred->cap_permitted = new->cap_permitted;
89148+ ncred->cap_effective = new->cap_effective;
89149+ ncred->cap_bset = new->cap_bset;
89150+
89151+ if (set_user(ncred)) {
89152+ abort_creds(ncred);
89153+ goto die;
89154+ }
89155+
89156+ // from doing get_cred on it when queueing this
89157+ put_cred(new);
89158+
89159+ __commit_creds(ncred);
89160+ return;
89161+die:
89162+ // from doing get_cred on it when queueing this
89163+ put_cred(new);
89164+ do_group_exit(SIGKILL);
89165+}
89166+#endif
89167+
89168+int commit_creds(struct cred *new)
89169+{
89170+#ifdef CONFIG_GRKERNSEC_SETXID
89171+ int ret;
89172+ int schedule_it = 0;
89173+ struct task_struct *t;
89174+ unsigned oldsecurebits = current_cred()->securebits;
89175+
89176+ /* we won't get called with tasklist_lock held for writing
89177+ and interrupts disabled as the cred struct in that case is
89178+ init_cred
89179+ */
89180+ if (grsec_enable_setxid && !current_is_single_threaded() &&
89181+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
89182+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
89183+ schedule_it = 1;
89184+ }
89185+ ret = __commit_creds(new);
89186+ if (schedule_it) {
89187+ rcu_read_lock();
89188+ read_lock(&tasklist_lock);
89189+ for (t = next_thread(current); t != current;
89190+ t = next_thread(t)) {
89191+ /* we'll check if the thread has uid 0 in
89192+ * the delayed worker routine
89193+ */
89194+ if (task_securebits(t) == oldsecurebits &&
89195+ t->delayed_cred == NULL) {
89196+ t->delayed_cred = get_cred(new);
89197+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
89198+ set_tsk_need_resched(t);
89199+ }
89200+ }
89201+ read_unlock(&tasklist_lock);
89202+ rcu_read_unlock();
89203+ }
89204+
89205+ return ret;
89206+#else
89207+ return __commit_creds(new);
89208+#endif
89209+}
89210+
89211 EXPORT_SYMBOL(commit_creds);
89212
89213 /**
89214diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
89215index 0874e2e..5b32cc9 100644
89216--- a/kernel/debug/debug_core.c
89217+++ b/kernel/debug/debug_core.c
89218@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
89219 */
89220 static atomic_t masters_in_kgdb;
89221 static atomic_t slaves_in_kgdb;
89222-static atomic_t kgdb_break_tasklet_var;
89223+static atomic_unchecked_t kgdb_break_tasklet_var;
89224 atomic_t kgdb_setting_breakpoint;
89225
89226 struct task_struct *kgdb_usethread;
89227@@ -137,7 +137,7 @@ int kgdb_single_step;
89228 static pid_t kgdb_sstep_pid;
89229
89230 /* to keep track of the CPU which is doing the single stepping*/
89231-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89232+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89233
89234 /*
89235 * If you are debugging a problem where roundup (the collection of
89236@@ -552,7 +552,7 @@ return_normal:
89237 * kernel will only try for the value of sstep_tries before
89238 * giving up and continuing on.
89239 */
89240- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
89241+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
89242 (kgdb_info[cpu].task &&
89243 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
89244 atomic_set(&kgdb_active, -1);
89245@@ -654,8 +654,8 @@ cpu_master_loop:
89246 }
89247
89248 kgdb_restore:
89249- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
89250- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
89251+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
89252+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
89253 if (kgdb_info[sstep_cpu].task)
89254 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
89255 else
89256@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
89257 static void kgdb_tasklet_bpt(unsigned long ing)
89258 {
89259 kgdb_breakpoint();
89260- atomic_set(&kgdb_break_tasklet_var, 0);
89261+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
89262 }
89263
89264 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
89265
89266 void kgdb_schedule_breakpoint(void)
89267 {
89268- if (atomic_read(&kgdb_break_tasklet_var) ||
89269+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
89270 atomic_read(&kgdb_active) != -1 ||
89271 atomic_read(&kgdb_setting_breakpoint))
89272 return;
89273- atomic_inc(&kgdb_break_tasklet_var);
89274+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
89275 tasklet_schedule(&kgdb_tasklet_breakpoint);
89276 }
89277 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
89278diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
89279index 4121345..861e178 100644
89280--- a/kernel/debug/kdb/kdb_main.c
89281+++ b/kernel/debug/kdb/kdb_main.c
89282@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
89283 continue;
89284
89285 kdb_printf("%-20s%8u 0x%p ", mod->name,
89286- mod->core_size, (void *)mod);
89287+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
89288 #ifdef CONFIG_MODULE_UNLOAD
89289 kdb_printf("%4d ", module_refcount(mod));
89290 #endif
89291@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
89292 kdb_printf(" (Loading)");
89293 else
89294 kdb_printf(" (Live)");
89295- kdb_printf(" 0x%p", mod->module_core);
89296+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
89297
89298 #ifdef CONFIG_MODULE_UNLOAD
89299 {
89300diff --git a/kernel/events/core.c b/kernel/events/core.c
89301index 2fabc06..79cceec 100644
89302--- a/kernel/events/core.c
89303+++ b/kernel/events/core.c
89304@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
89305 * 0 - disallow raw tracepoint access for unpriv
89306 * 1 - disallow cpu events for unpriv
89307 * 2 - disallow kernel profiling for unpriv
89308+ * 3 - disallow all unpriv perf event use
89309 */
89310-int sysctl_perf_event_paranoid __read_mostly = 1;
89311+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89312+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
89313+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
89314+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
89315+#else
89316+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
89317+#endif
89318
89319 /* Minimum for 512 kiB + 1 user control page */
89320 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
89321@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
89322
89323 tmp *= sysctl_perf_cpu_time_max_percent;
89324 do_div(tmp, 100);
89325- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
89326+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
89327 }
89328
89329 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
89330@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
89331 }
89332 }
89333
89334-static atomic64_t perf_event_id;
89335+static atomic64_unchecked_t perf_event_id;
89336
89337 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
89338 enum event_type_t event_type);
89339@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
89340
89341 static inline u64 perf_event_count(struct perf_event *event)
89342 {
89343- return local64_read(&event->count) + atomic64_read(&event->child_count);
89344+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
89345 }
89346
89347 static u64 perf_event_read(struct perf_event *event)
89348@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
89349 mutex_lock(&event->child_mutex);
89350 total += perf_event_read(event);
89351 *enabled += event->total_time_enabled +
89352- atomic64_read(&event->child_total_time_enabled);
89353+ atomic64_read_unchecked(&event->child_total_time_enabled);
89354 *running += event->total_time_running +
89355- atomic64_read(&event->child_total_time_running);
89356+ atomic64_read_unchecked(&event->child_total_time_running);
89357
89358 list_for_each_entry(child, &event->child_list, child_list) {
89359 total += perf_event_read(child);
89360@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
89361 userpg->offset -= local64_read(&event->hw.prev_count);
89362
89363 userpg->time_enabled = enabled +
89364- atomic64_read(&event->child_total_time_enabled);
89365+ atomic64_read_unchecked(&event->child_total_time_enabled);
89366
89367 userpg->time_running = running +
89368- atomic64_read(&event->child_total_time_running);
89369+ atomic64_read_unchecked(&event->child_total_time_running);
89370
89371 arch_perf_update_userpage(event, userpg, now);
89372
89373@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
89374
89375 /* Data. */
89376 sp = perf_user_stack_pointer(regs);
89377- rem = __output_copy_user(handle, (void *) sp, dump_size);
89378+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
89379 dyn_size = dump_size - rem;
89380
89381 perf_output_skip(handle, rem);
89382@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
89383 values[n++] = perf_event_count(event);
89384 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
89385 values[n++] = enabled +
89386- atomic64_read(&event->child_total_time_enabled);
89387+ atomic64_read_unchecked(&event->child_total_time_enabled);
89388 }
89389 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89390 values[n++] = running +
89391- atomic64_read(&event->child_total_time_running);
89392+ atomic64_read_unchecked(&event->child_total_time_running);
89393 }
89394 if (read_format & PERF_FORMAT_ID)
89395 values[n++] = primary_event_id(event);
89396@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89397 event->parent = parent_event;
89398
89399 event->ns = get_pid_ns(task_active_pid_ns(current));
89400- event->id = atomic64_inc_return(&perf_event_id);
89401+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89402
89403 event->state = PERF_EVENT_STATE_INACTIVE;
89404
89405@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
89406 if (flags & ~PERF_FLAG_ALL)
89407 return -EINVAL;
89408
89409+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89410+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89411+ return -EACCES;
89412+#endif
89413+
89414 err = perf_copy_attr(attr_uptr, &attr);
89415 if (err)
89416 return err;
89417@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
89418 /*
89419 * Add back the child's count to the parent's count:
89420 */
89421- atomic64_add(child_val, &parent_event->child_count);
89422- atomic64_add(child_event->total_time_enabled,
89423+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89424+ atomic64_add_unchecked(child_event->total_time_enabled,
89425 &parent_event->child_total_time_enabled);
89426- atomic64_add(child_event->total_time_running,
89427+ atomic64_add_unchecked(child_event->total_time_running,
89428 &parent_event->child_total_time_running);
89429
89430 /*
89431diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89432index 569b2187..19940d9 100644
89433--- a/kernel/events/internal.h
89434+++ b/kernel/events/internal.h
89435@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89436 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89437 }
89438
89439-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89440+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89441 static inline unsigned long \
89442 func_name(struct perf_output_handle *handle, \
89443- const void *buf, unsigned long len) \
89444+ const void user *buf, unsigned long len) \
89445 { \
89446 unsigned long size, written; \
89447 \
89448@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89449 return 0;
89450 }
89451
89452-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89453+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89454
89455 static inline unsigned long
89456 memcpy_skip(void *dst, const void *src, unsigned long n)
89457@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89458 return 0;
89459 }
89460
89461-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89462+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89463
89464 #ifndef arch_perf_out_copy_user
89465 #define arch_perf_out_copy_user arch_perf_out_copy_user
89466@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89467 }
89468 #endif
89469
89470-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89471+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89472
89473 /* Callchain handling */
89474 extern struct perf_callchain_entry *
89475diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89476index cb346f2..e4dc317 100644
89477--- a/kernel/events/uprobes.c
89478+++ b/kernel/events/uprobes.c
89479@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89480 {
89481 struct page *page;
89482 uprobe_opcode_t opcode;
89483- int result;
89484+ long result;
89485
89486 pagefault_disable();
89487 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89488diff --git a/kernel/exit.c b/kernel/exit.c
89489index feff10b..f623dd5 100644
89490--- a/kernel/exit.c
89491+++ b/kernel/exit.c
89492@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
89493 struct task_struct *leader;
89494 int zap_leader;
89495 repeat:
89496+#ifdef CONFIG_NET
89497+ gr_del_task_from_ip_table(p);
89498+#endif
89499+
89500 /* don't need to get the RCU readlock here - the process is dead and
89501 * can't be modifying its own credentials. But shut RCU-lockdep up */
89502 rcu_read_lock();
89503@@ -656,6 +660,8 @@ void do_exit(long code)
89504 int group_dead;
89505 TASKS_RCU(int tasks_rcu_i);
89506
89507+ set_fs(USER_DS);
89508+
89509 profile_task_exit(tsk);
89510
89511 WARN_ON(blk_needs_flush_plug(tsk));
89512@@ -672,7 +678,6 @@ void do_exit(long code)
89513 * mm_release()->clear_child_tid() from writing to a user-controlled
89514 * kernel address.
89515 */
89516- set_fs(USER_DS);
89517
89518 ptrace_event(PTRACE_EVENT_EXIT, code);
89519
89520@@ -730,6 +735,9 @@ void do_exit(long code)
89521 tsk->exit_code = code;
89522 taskstats_exit(tsk, group_dead);
89523
89524+ gr_acl_handle_psacct(tsk, code);
89525+ gr_acl_handle_exit();
89526+
89527 exit_mm(tsk);
89528
89529 if (group_dead)
89530@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89531 * Take down every thread in the group. This is called by fatal signals
89532 * as well as by sys_exit_group (below).
89533 */
89534-void
89535+__noreturn void
89536 do_group_exit(int exit_code)
89537 {
89538 struct signal_struct *sig = current->signal;
89539diff --git a/kernel/fork.c b/kernel/fork.c
89540index cf65139..704476e 100644
89541--- a/kernel/fork.c
89542+++ b/kernel/fork.c
89543@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
89544 void thread_info_cache_init(void)
89545 {
89546 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
89547- THREAD_SIZE, 0, NULL);
89548+ THREAD_SIZE, SLAB_USERCOPY, NULL);
89549 BUG_ON(thread_info_cache == NULL);
89550 }
89551 # endif
89552 #endif
89553
89554+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89555+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89556+ int node, void **lowmem_stack)
89557+{
89558+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89559+ void *ret = NULL;
89560+ unsigned int i;
89561+
89562+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89563+ if (*lowmem_stack == NULL)
89564+ goto out;
89565+
89566+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89567+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89568+
89569+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89570+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89571+ if (ret == NULL) {
89572+ free_thread_info(*lowmem_stack);
89573+ *lowmem_stack = NULL;
89574+ }
89575+
89576+out:
89577+ return ret;
89578+}
89579+
89580+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89581+{
89582+ unmap_process_stacks(tsk);
89583+}
89584+#else
89585+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89586+ int node, void **lowmem_stack)
89587+{
89588+ return alloc_thread_info_node(tsk, node);
89589+}
89590+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89591+{
89592+ free_thread_info(ti);
89593+}
89594+#endif
89595+
89596 /* SLAB cache for signal_struct structures (tsk->signal) */
89597 static struct kmem_cache *signal_cachep;
89598
89599@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89600 /* SLAB cache for mm_struct structures (tsk->mm) */
89601 static struct kmem_cache *mm_cachep;
89602
89603-static void account_kernel_stack(struct thread_info *ti, int account)
89604+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89605 {
89606+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89607+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89608+#else
89609 struct zone *zone = page_zone(virt_to_page(ti));
89610+#endif
89611
89612 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89613 }
89614
89615 void free_task(struct task_struct *tsk)
89616 {
89617- account_kernel_stack(tsk->stack, -1);
89618+ account_kernel_stack(tsk, tsk->stack, -1);
89619 arch_release_thread_info(tsk->stack);
89620- free_thread_info(tsk->stack);
89621+ gr_free_thread_info(tsk, tsk->stack);
89622 rt_mutex_debug_task_free(tsk);
89623 ftrace_graph_exit_task(tsk);
89624 put_seccomp_filter(tsk);
89625@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89626 {
89627 struct task_struct *tsk;
89628 struct thread_info *ti;
89629+ void *lowmem_stack;
89630 int node = tsk_fork_get_node(orig);
89631 int err;
89632
89633@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89634 if (!tsk)
89635 return NULL;
89636
89637- ti = alloc_thread_info_node(tsk, node);
89638+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89639 if (!ti)
89640 goto free_tsk;
89641
89642@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89643 goto free_ti;
89644
89645 tsk->stack = ti;
89646+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89647+ tsk->lowmem_stack = lowmem_stack;
89648+#endif
89649 #ifdef CONFIG_SECCOMP
89650 /*
89651 * We must handle setting up seccomp filters once we're under
89652@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89653 set_task_stack_end_magic(tsk);
89654
89655 #ifdef CONFIG_CC_STACKPROTECTOR
89656- tsk->stack_canary = get_random_int();
89657+ tsk->stack_canary = pax_get_random_long();
89658 #endif
89659
89660 /*
89661@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89662 tsk->splice_pipe = NULL;
89663 tsk->task_frag.page = NULL;
89664
89665- account_kernel_stack(ti, 1);
89666+ account_kernel_stack(tsk, ti, 1);
89667
89668 return tsk;
89669
89670 free_ti:
89671- free_thread_info(ti);
89672+ gr_free_thread_info(tsk, ti);
89673 free_tsk:
89674 free_task_struct(tsk);
89675 return NULL;
89676 }
89677
89678 #ifdef CONFIG_MMU
89679-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89680+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89681+{
89682+ struct vm_area_struct *tmp;
89683+ unsigned long charge;
89684+ struct file *file;
89685+ int retval;
89686+
89687+ charge = 0;
89688+ if (mpnt->vm_flags & VM_ACCOUNT) {
89689+ unsigned long len = vma_pages(mpnt);
89690+
89691+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89692+ goto fail_nomem;
89693+ charge = len;
89694+ }
89695+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89696+ if (!tmp)
89697+ goto fail_nomem;
89698+ *tmp = *mpnt;
89699+ tmp->vm_mm = mm;
89700+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89701+ retval = vma_dup_policy(mpnt, tmp);
89702+ if (retval)
89703+ goto fail_nomem_policy;
89704+ if (anon_vma_fork(tmp, mpnt))
89705+ goto fail_nomem_anon_vma_fork;
89706+ tmp->vm_flags &= ~VM_LOCKED;
89707+ tmp->vm_next = tmp->vm_prev = NULL;
89708+ tmp->vm_mirror = NULL;
89709+ file = tmp->vm_file;
89710+ if (file) {
89711+ struct inode *inode = file_inode(file);
89712+ struct address_space *mapping = file->f_mapping;
89713+
89714+ get_file(file);
89715+ if (tmp->vm_flags & VM_DENYWRITE)
89716+ atomic_dec(&inode->i_writecount);
89717+ i_mmap_lock_write(mapping);
89718+ if (tmp->vm_flags & VM_SHARED)
89719+ atomic_inc(&mapping->i_mmap_writable);
89720+ flush_dcache_mmap_lock(mapping);
89721+ /* insert tmp into the share list, just after mpnt */
89722+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89723+ flush_dcache_mmap_unlock(mapping);
89724+ i_mmap_unlock_write(mapping);
89725+ }
89726+
89727+ /*
89728+ * Clear hugetlb-related page reserves for children. This only
89729+ * affects MAP_PRIVATE mappings. Faults generated by the child
89730+ * are not guaranteed to succeed, even if read-only
89731+ */
89732+ if (is_vm_hugetlb_page(tmp))
89733+ reset_vma_resv_huge_pages(tmp);
89734+
89735+ return tmp;
89736+
89737+fail_nomem_anon_vma_fork:
89738+ mpol_put(vma_policy(tmp));
89739+fail_nomem_policy:
89740+ kmem_cache_free(vm_area_cachep, tmp);
89741+fail_nomem:
89742+ vm_unacct_memory(charge);
89743+ return NULL;
89744+}
89745+
89746+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89747 {
89748 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89749 struct rb_node **rb_link, *rb_parent;
89750 int retval;
89751- unsigned long charge;
89752
89753 uprobe_start_dup_mmap();
89754 down_write(&oldmm->mmap_sem);
89755@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89756
89757 prev = NULL;
89758 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89759- struct file *file;
89760-
89761 if (mpnt->vm_flags & VM_DONTCOPY) {
89762 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89763 -vma_pages(mpnt));
89764 continue;
89765 }
89766- charge = 0;
89767- if (mpnt->vm_flags & VM_ACCOUNT) {
89768- unsigned long len = vma_pages(mpnt);
89769-
89770- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89771- goto fail_nomem;
89772- charge = len;
89773- }
89774- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89775- if (!tmp)
89776- goto fail_nomem;
89777- *tmp = *mpnt;
89778- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89779- retval = vma_dup_policy(mpnt, tmp);
89780- if (retval)
89781- goto fail_nomem_policy;
89782- tmp->vm_mm = mm;
89783- if (anon_vma_fork(tmp, mpnt))
89784- goto fail_nomem_anon_vma_fork;
89785- tmp->vm_flags &= ~VM_LOCKED;
89786- tmp->vm_next = tmp->vm_prev = NULL;
89787- file = tmp->vm_file;
89788- if (file) {
89789- struct inode *inode = file_inode(file);
89790- struct address_space *mapping = file->f_mapping;
89791-
89792- get_file(file);
89793- if (tmp->vm_flags & VM_DENYWRITE)
89794- atomic_dec(&inode->i_writecount);
89795- i_mmap_lock_write(mapping);
89796- if (tmp->vm_flags & VM_SHARED)
89797- atomic_inc(&mapping->i_mmap_writable);
89798- flush_dcache_mmap_lock(mapping);
89799- /* insert tmp into the share list, just after mpnt */
89800- vma_interval_tree_insert_after(tmp, mpnt,
89801- &mapping->i_mmap);
89802- flush_dcache_mmap_unlock(mapping);
89803- i_mmap_unlock_write(mapping);
89804+ tmp = dup_vma(mm, oldmm, mpnt);
89805+ if (!tmp) {
89806+ retval = -ENOMEM;
89807+ goto out;
89808 }
89809
89810 /*
89811@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89812 if (retval)
89813 goto out;
89814 }
89815+
89816+#ifdef CONFIG_PAX_SEGMEXEC
89817+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89818+ struct vm_area_struct *mpnt_m;
89819+
89820+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89821+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89822+
89823+ if (!mpnt->vm_mirror)
89824+ continue;
89825+
89826+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89827+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89828+ mpnt->vm_mirror = mpnt_m;
89829+ } else {
89830+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89831+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89832+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89833+ mpnt->vm_mirror->vm_mirror = mpnt;
89834+ }
89835+ }
89836+ BUG_ON(mpnt_m);
89837+ }
89838+#endif
89839+
89840 /* a new mm has just been created */
89841 arch_dup_mmap(oldmm, mm);
89842 retval = 0;
89843@@ -482,14 +586,6 @@ out:
89844 up_write(&oldmm->mmap_sem);
89845 uprobe_end_dup_mmap();
89846 return retval;
89847-fail_nomem_anon_vma_fork:
89848- mpol_put(vma_policy(tmp));
89849-fail_nomem_policy:
89850- kmem_cache_free(vm_area_cachep, tmp);
89851-fail_nomem:
89852- retval = -ENOMEM;
89853- vm_unacct_memory(charge);
89854- goto out;
89855 }
89856
89857 static inline int mm_alloc_pgd(struct mm_struct *mm)
89858@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89859 return ERR_PTR(err);
89860
89861 mm = get_task_mm(task);
89862- if (mm && mm != current->mm &&
89863- !ptrace_may_access(task, mode)) {
89864+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89865+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89866 mmput(mm);
89867 mm = ERR_PTR(-EACCES);
89868 }
89869@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89870 spin_unlock(&fs->lock);
89871 return -EAGAIN;
89872 }
89873- fs->users++;
89874+ atomic_inc(&fs->users);
89875 spin_unlock(&fs->lock);
89876 return 0;
89877 }
89878 tsk->fs = copy_fs_struct(fs);
89879 if (!tsk->fs)
89880 return -ENOMEM;
89881+ /* Carry through gr_chroot_dentry and is_chrooted instead
89882+ of recomputing it here. Already copied when the task struct
89883+ is duplicated. This allows pivot_root to not be treated as
89884+ a chroot
89885+ */
89886+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89887+
89888 return 0;
89889 }
89890
89891@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89892 * parts of the process environment (as per the clone
89893 * flags). The actual kick-off is left to the caller.
89894 */
89895-static struct task_struct *copy_process(unsigned long clone_flags,
89896+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89897 unsigned long stack_start,
89898 unsigned long stack_size,
89899 int __user *child_tidptr,
89900@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89901 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89902 #endif
89903 retval = -EAGAIN;
89904+
89905+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89906+
89907 if (atomic_read(&p->real_cred->user->processes) >=
89908 task_rlimit(p, RLIMIT_NPROC)) {
89909 if (p->real_cred->user != INIT_USER &&
89910@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89911 goto bad_fork_free_pid;
89912 }
89913
89914+ /* synchronizes with gr_set_acls()
89915+ we need to call this past the point of no return for fork()
89916+ */
89917+ gr_copy_label(p);
89918+
89919 if (likely(p->pid)) {
89920 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89921
89922@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
89923 bad_fork_free:
89924 free_task(p);
89925 fork_out:
89926+ gr_log_forkfail(retval);
89927+
89928 return ERR_PTR(retval);
89929 }
89930
89931@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
89932
89933 p = copy_process(clone_flags, stack_start, stack_size,
89934 child_tidptr, NULL, trace);
89935+ add_latent_entropy();
89936 /*
89937 * Do this prior waking up the new thread - the thread pointer
89938 * might get invalid after that point, if the thread exits quickly.
89939@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
89940 if (clone_flags & CLONE_PARENT_SETTID)
89941 put_user(nr, parent_tidptr);
89942
89943+ gr_handle_brute_check();
89944+
89945 if (clone_flags & CLONE_VFORK) {
89946 p->vfork_done = &vfork;
89947 init_completion(&vfork);
89948@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
89949 mm_cachep = kmem_cache_create("mm_struct",
89950 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89951 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89952- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89953+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89954 mmap_init();
89955 nsproxy_cache_init();
89956 }
89957@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89958 return 0;
89959
89960 /* don't need lock here; in the worst case we'll do useless copy */
89961- if (fs->users == 1)
89962+ if (atomic_read(&fs->users) == 1)
89963 return 0;
89964
89965 *new_fsp = copy_fs_struct(fs);
89966@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89967 fs = current->fs;
89968 spin_lock(&fs->lock);
89969 current->fs = new_fs;
89970- if (--fs->users)
89971+ gr_set_chroot_entries(current, &current->fs->root);
89972+ if (atomic_dec_return(&fs->users))
89973 new_fs = NULL;
89974 else
89975 new_fs = fs;
89976diff --git a/kernel/futex.c b/kernel/futex.c
89977index 2a5e383..878bac6 100644
89978--- a/kernel/futex.c
89979+++ b/kernel/futex.c
89980@@ -201,7 +201,7 @@ struct futex_pi_state {
89981 atomic_t refcount;
89982
89983 union futex_key key;
89984-};
89985+} __randomize_layout;
89986
89987 /**
89988 * struct futex_q - The hashed futex queue entry, one per waiting task
89989@@ -235,7 +235,7 @@ struct futex_q {
89990 struct rt_mutex_waiter *rt_waiter;
89991 union futex_key *requeue_pi_key;
89992 u32 bitset;
89993-};
89994+} __randomize_layout;
89995
89996 static const struct futex_q futex_q_init = {
89997 /* list gets initialized in queue_me()*/
89998@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89999 struct page *page, *page_head;
90000 int err, ro = 0;
90001
90002+#ifdef CONFIG_PAX_SEGMEXEC
90003+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
90004+ return -EFAULT;
90005+#endif
90006+
90007 /*
90008 * The futex address must be "naturally" aligned.
90009 */
90010@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
90011
90012 static int get_futex_value_locked(u32 *dest, u32 __user *from)
90013 {
90014- int ret;
90015+ unsigned long ret;
90016
90017 pagefault_disable();
90018 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
90019@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
90020 {
90021 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
90022 u32 curval;
90023+ mm_segment_t oldfs;
90024
90025 /*
90026 * This will fail and we want it. Some arch implementations do
90027@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
90028 * implementation, the non-functional ones will return
90029 * -ENOSYS.
90030 */
90031+ oldfs = get_fs();
90032+ set_fs(USER_DS);
90033 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
90034 futex_cmpxchg_enabled = 1;
90035+ set_fs(oldfs);
90036 #endif
90037 }
90038
90039diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90040index 55c8c93..9ba7ad6 100644
90041--- a/kernel/futex_compat.c
90042+++ b/kernel/futex_compat.c
90043@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
90044 return 0;
90045 }
90046
90047-static void __user *futex_uaddr(struct robust_list __user *entry,
90048+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
90049 compat_long_t futex_offset)
90050 {
90051 compat_uptr_t base = ptr_to_compat(entry);
90052diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90053index b358a80..fc25240 100644
90054--- a/kernel/gcov/base.c
90055+++ b/kernel/gcov/base.c
90056@@ -114,11 +114,6 @@ void gcov_enable_events(void)
90057 }
90058
90059 #ifdef CONFIG_MODULES
90060-static inline int within(void *addr, void *start, unsigned long size)
90061-{
90062- return ((addr >= start) && (addr < start + size));
90063-}
90064-
90065 /* Update list and generate events when modules are unloaded. */
90066 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90067 void *data)
90068@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90069
90070 /* Remove entries located in module from linked list. */
90071 while ((info = gcov_info_next(info))) {
90072- if (within(info, mod->module_core, mod->core_size)) {
90073+ if (within_module_core_rw((unsigned long)info, mod)) {
90074 gcov_info_unlink(prev, info);
90075 if (gcov_events_enabled)
90076 gcov_event(GCOV_REMOVE, info);
90077diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
90078index 886d09e..c7ff4e5 100644
90079--- a/kernel/irq/manage.c
90080+++ b/kernel/irq/manage.c
90081@@ -874,7 +874,7 @@ static int irq_thread(void *data)
90082
90083 action_ret = handler_fn(desc, action);
90084 if (action_ret == IRQ_HANDLED)
90085- atomic_inc(&desc->threads_handled);
90086+ atomic_inc_unchecked(&desc->threads_handled);
90087
90088 wake_threads_waitq(desc);
90089 }
90090diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
90091index e2514b0..de3dfe0 100644
90092--- a/kernel/irq/spurious.c
90093+++ b/kernel/irq/spurious.c
90094@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
90095 * count. We just care about the count being
90096 * different than the one we saw before.
90097 */
90098- handled = atomic_read(&desc->threads_handled);
90099+ handled = atomic_read_unchecked(&desc->threads_handled);
90100 handled |= SPURIOUS_DEFERRED;
90101 if (handled != desc->threads_handled_last) {
90102 action_ret = IRQ_HANDLED;
90103diff --git a/kernel/jump_label.c b/kernel/jump_label.c
90104index 9019f15..9a3c42e 100644
90105--- a/kernel/jump_label.c
90106+++ b/kernel/jump_label.c
90107@@ -14,6 +14,7 @@
90108 #include <linux/err.h>
90109 #include <linux/static_key.h>
90110 #include <linux/jump_label_ratelimit.h>
90111+#include <linux/mm.h>
90112
90113 #ifdef HAVE_JUMP_LABEL
90114
90115@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
90116
90117 size = (((unsigned long)stop - (unsigned long)start)
90118 / sizeof(struct jump_entry));
90119+ pax_open_kernel();
90120 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
90121+ pax_close_kernel();
90122 }
90123
90124 static void jump_label_update(struct static_key *key, int enable);
90125@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
90126 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
90127 struct jump_entry *iter;
90128
90129+ pax_open_kernel();
90130 for (iter = iter_start; iter < iter_stop; iter++) {
90131 if (within_module_init(iter->code, mod))
90132 iter->code = 0;
90133 }
90134+ pax_close_kernel();
90135 }
90136
90137 static int
90138diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
90139index 5c5987f..bc502b0 100644
90140--- a/kernel/kallsyms.c
90141+++ b/kernel/kallsyms.c
90142@@ -11,6 +11,9 @@
90143 * Changed the compression method from stem compression to "table lookup"
90144 * compression (see scripts/kallsyms.c for a more complete description)
90145 */
90146+#ifdef CONFIG_GRKERNSEC_HIDESYM
90147+#define __INCLUDED_BY_HIDESYM 1
90148+#endif
90149 #include <linux/kallsyms.h>
90150 #include <linux/module.h>
90151 #include <linux/init.h>
90152@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
90153
90154 static inline int is_kernel_inittext(unsigned long addr)
90155 {
90156+ if (system_state != SYSTEM_BOOTING)
90157+ return 0;
90158+
90159 if (addr >= (unsigned long)_sinittext
90160 && addr <= (unsigned long)_einittext)
90161 return 1;
90162 return 0;
90163 }
90164
90165+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90166+#ifdef CONFIG_MODULES
90167+static inline int is_module_text(unsigned long addr)
90168+{
90169+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
90170+ return 1;
90171+
90172+ addr = ktla_ktva(addr);
90173+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
90174+}
90175+#else
90176+static inline int is_module_text(unsigned long addr)
90177+{
90178+ return 0;
90179+}
90180+#endif
90181+#endif
90182+
90183 static inline int is_kernel_text(unsigned long addr)
90184 {
90185 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
90186@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
90187
90188 static inline int is_kernel(unsigned long addr)
90189 {
90190+
90191+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90192+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
90193+ return 1;
90194+
90195+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
90196+#else
90197 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
90198+#endif
90199+
90200 return 1;
90201 return in_gate_area_no_mm(addr);
90202 }
90203
90204 static int is_ksym_addr(unsigned long addr)
90205 {
90206+
90207+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90208+ if (is_module_text(addr))
90209+ return 0;
90210+#endif
90211+
90212 if (all_var)
90213 return is_kernel(addr);
90214
90215@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
90216
90217 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
90218 {
90219- iter->name[0] = '\0';
90220 iter->nameoff = get_symbol_offset(new_pos);
90221 iter->pos = new_pos;
90222 }
90223@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
90224 {
90225 struct kallsym_iter *iter = m->private;
90226
90227+#ifdef CONFIG_GRKERNSEC_HIDESYM
90228+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
90229+ return 0;
90230+#endif
90231+
90232 /* Some debugging symbols have no name. Ignore them. */
90233 if (!iter->name[0])
90234 return 0;
90235@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
90236 */
90237 type = iter->exported ? toupper(iter->type) :
90238 tolower(iter->type);
90239+
90240 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
90241 type, iter->name, iter->module_name);
90242 } else
90243diff --git a/kernel/kcmp.c b/kernel/kcmp.c
90244index 0aa69ea..a7fcafb 100644
90245--- a/kernel/kcmp.c
90246+++ b/kernel/kcmp.c
90247@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
90248 struct task_struct *task1, *task2;
90249 int ret;
90250
90251+#ifdef CONFIG_GRKERNSEC
90252+ return -ENOSYS;
90253+#endif
90254+
90255 rcu_read_lock();
90256
90257 /*
90258diff --git a/kernel/kexec.c b/kernel/kexec.c
90259index 38c25b1..12b3f69 100644
90260--- a/kernel/kexec.c
90261+++ b/kernel/kexec.c
90262@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
90263 compat_ulong_t, flags)
90264 {
90265 struct compat_kexec_segment in;
90266- struct kexec_segment out, __user *ksegments;
90267+ struct kexec_segment out;
90268+ struct kexec_segment __user *ksegments;
90269 unsigned long i, result;
90270
90271 /* Don't allow clients that don't understand the native
90272diff --git a/kernel/kmod.c b/kernel/kmod.c
90273index 2777f40..a689506 100644
90274--- a/kernel/kmod.c
90275+++ b/kernel/kmod.c
90276@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
90277 kfree(info->argv);
90278 }
90279
90280-static int call_modprobe(char *module_name, int wait)
90281+static int call_modprobe(char *module_name, char *module_param, int wait)
90282 {
90283 struct subprocess_info *info;
90284 static char *envp[] = {
90285@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
90286 NULL
90287 };
90288
90289- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
90290+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
90291 if (!argv)
90292 goto out;
90293
90294@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
90295 argv[1] = "-q";
90296 argv[2] = "--";
90297 argv[3] = module_name; /* check free_modprobe_argv() */
90298- argv[4] = NULL;
90299+ argv[4] = module_param;
90300+ argv[5] = NULL;
90301
90302 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
90303 NULL, free_modprobe_argv, NULL);
90304@@ -122,9 +123,8 @@ out:
90305 * If module auto-loading support is disabled then this function
90306 * becomes a no-operation.
90307 */
90308-int __request_module(bool wait, const char *fmt, ...)
90309+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
90310 {
90311- va_list args;
90312 char module_name[MODULE_NAME_LEN];
90313 unsigned int max_modprobes;
90314 int ret;
90315@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
90316 if (!modprobe_path[0])
90317 return 0;
90318
90319- va_start(args, fmt);
90320- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
90321- va_end(args);
90322+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
90323 if (ret >= MODULE_NAME_LEN)
90324 return -ENAMETOOLONG;
90325
90326@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
90327 if (ret)
90328 return ret;
90329
90330+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90331+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90332+ /* hack to workaround consolekit/udisks stupidity */
90333+ read_lock(&tasklist_lock);
90334+ if (!strcmp(current->comm, "mount") &&
90335+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
90336+ read_unlock(&tasklist_lock);
90337+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
90338+ return -EPERM;
90339+ }
90340+ read_unlock(&tasklist_lock);
90341+ }
90342+#endif
90343+
90344 /* If modprobe needs a service that is in a module, we get a recursive
90345 * loop. Limit the number of running kmod threads to max_threads/2 or
90346 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
90347@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
90348
90349 trace_module_request(module_name, wait, _RET_IP_);
90350
90351- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90352+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90353
90354 atomic_dec(&kmod_concurrent);
90355 return ret;
90356 }
90357+
90358+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
90359+{
90360+ va_list args;
90361+ int ret;
90362+
90363+ va_start(args, fmt);
90364+ ret = ____request_module(wait, module_param, fmt, args);
90365+ va_end(args);
90366+
90367+ return ret;
90368+}
90369+
90370+int __request_module(bool wait, const char *fmt, ...)
90371+{
90372+ va_list args;
90373+ int ret;
90374+
90375+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90376+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90377+ char module_param[MODULE_NAME_LEN];
90378+
90379+ memset(module_param, 0, sizeof(module_param));
90380+
90381+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
90382+
90383+ va_start(args, fmt);
90384+ ret = ____request_module(wait, module_param, fmt, args);
90385+ va_end(args);
90386+
90387+ return ret;
90388+ }
90389+#endif
90390+
90391+ va_start(args, fmt);
90392+ ret = ____request_module(wait, NULL, fmt, args);
90393+ va_end(args);
90394+
90395+ return ret;
90396+}
90397+
90398 EXPORT_SYMBOL(__request_module);
90399 #endif /* CONFIG_MODULES */
90400
90401 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90402 {
90403+#ifdef CONFIG_GRKERNSEC
90404+ kfree(info->path);
90405+ info->path = info->origpath;
90406+#endif
90407 if (info->cleanup)
90408 (*info->cleanup)(info);
90409 kfree(info);
90410@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
90411 */
90412 set_user_nice(current, 0);
90413
90414+#ifdef CONFIG_GRKERNSEC
90415+ /* this is race-free as far as userland is concerned as we copied
90416+ out the path to be used prior to this point and are now operating
90417+ on that copy
90418+ */
90419+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90420+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90421+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
90422+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90423+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
90424+ retval = -EPERM;
90425+ goto out;
90426+ }
90427+#endif
90428+
90429 retval = -ENOMEM;
90430 new = prepare_kernel_cred(current);
90431 if (!new)
90432@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
90433 commit_creds(new);
90434
90435 retval = do_execve(getname_kernel(sub_info->path),
90436- (const char __user *const __user *)sub_info->argv,
90437- (const char __user *const __user *)sub_info->envp);
90438+ (const char __user *const __force_user *)sub_info->argv,
90439+ (const char __user *const __force_user *)sub_info->envp);
90440 out:
90441 sub_info->retval = retval;
90442 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
90443@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
90444 *
90445 * Thus the __user pointer cast is valid here.
90446 */
90447- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90448+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90449
90450 /*
90451 * If ret is 0, either ____call_usermodehelper failed and the
90452@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90453 goto out;
90454
90455 INIT_WORK(&sub_info->work, __call_usermodehelper);
90456+#ifdef CONFIG_GRKERNSEC
90457+ sub_info->origpath = path;
90458+ sub_info->path = kstrdup(path, gfp_mask);
90459+#else
90460 sub_info->path = path;
90461+#endif
90462 sub_info->argv = argv;
90463 sub_info->envp = envp;
90464
90465@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90466 static int proc_cap_handler(struct ctl_table *table, int write,
90467 void __user *buffer, size_t *lenp, loff_t *ppos)
90468 {
90469- struct ctl_table t;
90470+ ctl_table_no_const t;
90471 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90472 kernel_cap_t new_cap;
90473 int err, i;
90474diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90475index c90e417..e6c515d 100644
90476--- a/kernel/kprobes.c
90477+++ b/kernel/kprobes.c
90478@@ -31,6 +31,9 @@
90479 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90480 * <prasanna@in.ibm.com> added function-return probes.
90481 */
90482+#ifdef CONFIG_GRKERNSEC_HIDESYM
90483+#define __INCLUDED_BY_HIDESYM 1
90484+#endif
90485 #include <linux/kprobes.h>
90486 #include <linux/hash.h>
90487 #include <linux/init.h>
90488@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90489
90490 static void *alloc_insn_page(void)
90491 {
90492- return module_alloc(PAGE_SIZE);
90493+ return module_alloc_exec(PAGE_SIZE);
90494 }
90495
90496 static void free_insn_page(void *page)
90497 {
90498- module_memfree(page);
90499+ module_memfree_exec(page);
90500 }
90501
90502 struct kprobe_insn_cache kprobe_insn_slots = {
90503@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90504 kprobe_type = "k";
90505
90506 if (sym)
90507- seq_printf(pi, "%p %s %s+0x%x %s ",
90508+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90509 p->addr, kprobe_type, sym, offset,
90510 (modname ? modname : " "));
90511 else
90512- seq_printf(pi, "%p %s %p ",
90513+ seq_printf(pi, "%pK %s %pK ",
90514 p->addr, kprobe_type, p->addr);
90515
90516 if (!pp)
90517diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90518index 6683cce..daf8999 100644
90519--- a/kernel/ksysfs.c
90520+++ b/kernel/ksysfs.c
90521@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90522 {
90523 if (count+1 > UEVENT_HELPER_PATH_LEN)
90524 return -ENOENT;
90525+ if (!capable(CAP_SYS_ADMIN))
90526+ return -EPERM;
90527 memcpy(uevent_helper, buf, count);
90528 uevent_helper[count] = '\0';
90529 if (count && uevent_helper[count-1] == '\n')
90530@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90531 return count;
90532 }
90533
90534-static struct bin_attribute notes_attr = {
90535+static bin_attribute_no_const notes_attr __read_only = {
90536 .attr = {
90537 .name = "notes",
90538 .mode = S_IRUGO,
90539diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90540index ba77ab5..d6a3e20 100644
90541--- a/kernel/locking/lockdep.c
90542+++ b/kernel/locking/lockdep.c
90543@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90544 end = (unsigned long) &_end,
90545 addr = (unsigned long) obj;
90546
90547+#ifdef CONFIG_PAX_KERNEXEC
90548+ start = ktla_ktva(start);
90549+#endif
90550+
90551 /*
90552 * static variable?
90553 */
90554@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90555 if (!static_obj(lock->key)) {
90556 debug_locks_off();
90557 printk("INFO: trying to register non-static key.\n");
90558+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90559 printk("the code is fine but needs lockdep annotation.\n");
90560 printk("turning off the locking correctness validator.\n");
90561 dump_stack();
90562@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90563 if (!class)
90564 return 0;
90565 }
90566- atomic_inc((atomic_t *)&class->ops);
90567+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90568 if (very_verbose(class)) {
90569 printk("\nacquire class [%p] %s", class->key, class->name);
90570 if (class->name_version > 1)
90571diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90572index ef43ac4..2720dfa 100644
90573--- a/kernel/locking/lockdep_proc.c
90574+++ b/kernel/locking/lockdep_proc.c
90575@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90576 return 0;
90577 }
90578
90579- seq_printf(m, "%p", class->key);
90580+ seq_printf(m, "%pK", class->key);
90581 #ifdef CONFIG_DEBUG_LOCKDEP
90582 seq_printf(m, " OPS:%8ld", class->ops);
90583 #endif
90584@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90585
90586 list_for_each_entry(entry, &class->locks_after, entry) {
90587 if (entry->distance == 1) {
90588- seq_printf(m, " -> [%p] ", entry->class->key);
90589+ seq_printf(m, " -> [%pK] ", entry->class->key);
90590 print_name(m, entry->class);
90591 seq_puts(m, "\n");
90592 }
90593@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90594 if (!class->key)
90595 continue;
90596
90597- seq_printf(m, "[%p] ", class->key);
90598+ seq_printf(m, "[%pK] ", class->key);
90599 print_name(m, class);
90600 seq_puts(m, "\n");
90601 }
90602@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90603 if (!i)
90604 seq_line(m, '-', 40-namelen, namelen);
90605
90606- snprintf(ip, sizeof(ip), "[<%p>]",
90607+ snprintf(ip, sizeof(ip), "[<%pK>]",
90608 (void *)class->contention_point[i]);
90609 seq_printf(m, "%40s %14lu %29s %pS\n",
90610 name, stats->contention_point[i],
90611@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90612 if (!i)
90613 seq_line(m, '-', 40-namelen, namelen);
90614
90615- snprintf(ip, sizeof(ip), "[<%p>]",
90616+ snprintf(ip, sizeof(ip), "[<%pK>]",
90617 (void *)class->contending_point[i]);
90618 seq_printf(m, "%40s %14lu %29s %pS\n",
90619 name, stats->contending_point[i],
90620diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90621index d1fe2ba..180cd65e 100644
90622--- a/kernel/locking/mcs_spinlock.h
90623+++ b/kernel/locking/mcs_spinlock.h
90624@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90625 */
90626 return;
90627 }
90628- ACCESS_ONCE(prev->next) = node;
90629+ ACCESS_ONCE_RW(prev->next) = node;
90630
90631 /* Wait until the lock holder passes the lock down. */
90632 arch_mcs_spin_lock_contended(&node->locked);
90633diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90634index 3ef3736..9c951fa 100644
90635--- a/kernel/locking/mutex-debug.c
90636+++ b/kernel/locking/mutex-debug.c
90637@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90638 }
90639
90640 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90641- struct thread_info *ti)
90642+ struct task_struct *task)
90643 {
90644 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90645
90646 /* Mark the current thread as blocked on the lock: */
90647- ti->task->blocked_on = waiter;
90648+ task->blocked_on = waiter;
90649 }
90650
90651 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90652- struct thread_info *ti)
90653+ struct task_struct *task)
90654 {
90655 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90656- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90657- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90658- ti->task->blocked_on = NULL;
90659+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90660+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90661+ task->blocked_on = NULL;
90662
90663 list_del_init(&waiter->list);
90664 waiter->task = NULL;
90665diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90666index 0799fd3..d06ae3b 100644
90667--- a/kernel/locking/mutex-debug.h
90668+++ b/kernel/locking/mutex-debug.h
90669@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90670 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90671 extern void debug_mutex_add_waiter(struct mutex *lock,
90672 struct mutex_waiter *waiter,
90673- struct thread_info *ti);
90674+ struct task_struct *task);
90675 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90676- struct thread_info *ti);
90677+ struct task_struct *task);
90678 extern void debug_mutex_unlock(struct mutex *lock);
90679 extern void debug_mutex_init(struct mutex *lock, const char *name,
90680 struct lock_class_key *key);
90681diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90682index 94674e5..de4966f 100644
90683--- a/kernel/locking/mutex.c
90684+++ b/kernel/locking/mutex.c
90685@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90686 goto skip_wait;
90687
90688 debug_mutex_lock_common(lock, &waiter);
90689- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90690+ debug_mutex_add_waiter(lock, &waiter, task);
90691
90692 /* add waiting tasks to the end of the waitqueue (FIFO): */
90693 list_add_tail(&waiter.list, &lock->wait_list);
90694@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90695 }
90696 __set_task_state(task, TASK_RUNNING);
90697
90698- mutex_remove_waiter(lock, &waiter, current_thread_info());
90699+ mutex_remove_waiter(lock, &waiter, task);
90700 /* set it to 0 if there are no waiters left: */
90701 if (likely(list_empty(&lock->wait_list)))
90702 atomic_set(&lock->count, 0);
90703@@ -610,7 +610,7 @@ skip_wait:
90704 return 0;
90705
90706 err:
90707- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90708+ mutex_remove_waiter(lock, &waiter, task);
90709 spin_unlock_mutex(&lock->wait_lock, flags);
90710 debug_mutex_free_waiter(&waiter);
90711 mutex_release(&lock->dep_map, 1, ip);
90712diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
90713index c112d00..1946ad9 100644
90714--- a/kernel/locking/osq_lock.c
90715+++ b/kernel/locking/osq_lock.c
90716@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90717
90718 prev = decode_cpu(old);
90719 node->prev = prev;
90720- ACCESS_ONCE(prev->next) = node;
90721+ ACCESS_ONCE_RW(prev->next) = node;
90722
90723 /*
90724 * Normally @prev is untouchable after the above store; because at that
90725@@ -170,8 +170,8 @@ unqueue:
90726 * it will wait in Step-A.
90727 */
90728
90729- ACCESS_ONCE(next->prev) = prev;
90730- ACCESS_ONCE(prev->next) = next;
90731+ ACCESS_ONCE_RW(next->prev) = prev;
90732+ ACCESS_ONCE_RW(prev->next) = next;
90733
90734 return false;
90735 }
90736@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90737 node = this_cpu_ptr(&osq_node);
90738 next = xchg(&node->next, NULL);
90739 if (next) {
90740- ACCESS_ONCE(next->locked) = 1;
90741+ ACCESS_ONCE_RW(next->locked) = 1;
90742 return;
90743 }
90744
90745 next = osq_wait_next(lock, node, NULL);
90746 if (next)
90747- ACCESS_ONCE(next->locked) = 1;
90748+ ACCESS_ONCE_RW(next->locked) = 1;
90749 }
90750diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90751index 1d96dd0..994ff19 100644
90752--- a/kernel/locking/rtmutex-tester.c
90753+++ b/kernel/locking/rtmutex-tester.c
90754@@ -22,7 +22,7 @@
90755 #define MAX_RT_TEST_MUTEXES 8
90756
90757 static spinlock_t rttest_lock;
90758-static atomic_t rttest_event;
90759+static atomic_unchecked_t rttest_event;
90760
90761 struct test_thread_data {
90762 int opcode;
90763@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90764
90765 case RTTEST_LOCKCONT:
90766 td->mutexes[td->opdata] = 1;
90767- td->event = atomic_add_return(1, &rttest_event);
90768+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90769 return 0;
90770
90771 case RTTEST_RESET:
90772@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90773 return 0;
90774
90775 case RTTEST_RESETEVENT:
90776- atomic_set(&rttest_event, 0);
90777+ atomic_set_unchecked(&rttest_event, 0);
90778 return 0;
90779
90780 default:
90781@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90782 return ret;
90783
90784 td->mutexes[id] = 1;
90785- td->event = atomic_add_return(1, &rttest_event);
90786+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90787 rt_mutex_lock(&mutexes[id]);
90788- td->event = atomic_add_return(1, &rttest_event);
90789+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90790 td->mutexes[id] = 4;
90791 return 0;
90792
90793@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90794 return ret;
90795
90796 td->mutexes[id] = 1;
90797- td->event = atomic_add_return(1, &rttest_event);
90798+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90799 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90800- td->event = atomic_add_return(1, &rttest_event);
90801+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90802 td->mutexes[id] = ret ? 0 : 4;
90803 return ret ? -EINTR : 0;
90804
90805@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90806 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90807 return ret;
90808
90809- td->event = atomic_add_return(1, &rttest_event);
90810+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90811 rt_mutex_unlock(&mutexes[id]);
90812- td->event = atomic_add_return(1, &rttest_event);
90813+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90814 td->mutexes[id] = 0;
90815 return 0;
90816
90817@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90818 break;
90819
90820 td->mutexes[dat] = 2;
90821- td->event = atomic_add_return(1, &rttest_event);
90822+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90823 break;
90824
90825 default:
90826@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90827 return;
90828
90829 td->mutexes[dat] = 3;
90830- td->event = atomic_add_return(1, &rttest_event);
90831+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90832 break;
90833
90834 case RTTEST_LOCKNOWAIT:
90835@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90836 return;
90837
90838 td->mutexes[dat] = 1;
90839- td->event = atomic_add_return(1, &rttest_event);
90840+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90841 return;
90842
90843 default:
90844diff --git a/kernel/module.c b/kernel/module.c
90845index ec53f59..67d9655 100644
90846--- a/kernel/module.c
90847+++ b/kernel/module.c
90848@@ -59,6 +59,7 @@
90849 #include <linux/jump_label.h>
90850 #include <linux/pfn.h>
90851 #include <linux/bsearch.h>
90852+#include <linux/grsecurity.h>
90853 #include <uapi/linux/module.h>
90854 #include "module-internal.h"
90855
90856@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90857
90858 /* Bounds of module allocation, for speeding __module_address.
90859 * Protected by module_mutex. */
90860-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90861+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90862+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90863
90864 int register_module_notifier(struct notifier_block *nb)
90865 {
90866@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90867 return true;
90868
90869 list_for_each_entry_rcu(mod, &modules, list) {
90870- struct symsearch arr[] = {
90871+ struct symsearch modarr[] = {
90872 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90873 NOT_GPL_ONLY, false },
90874 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90875@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90876 if (mod->state == MODULE_STATE_UNFORMED)
90877 continue;
90878
90879- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90880+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90881 return true;
90882 }
90883 return false;
90884@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90885 if (!pcpusec->sh_size)
90886 return 0;
90887
90888- if (align > PAGE_SIZE) {
90889+ if (align-1 >= PAGE_SIZE) {
90890 pr_warn("%s: per-cpu alignment %li > %li\n",
90891 mod->name, align, PAGE_SIZE);
90892 align = PAGE_SIZE;
90893@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90894 static ssize_t show_coresize(struct module_attribute *mattr,
90895 struct module_kobject *mk, char *buffer)
90896 {
90897- return sprintf(buffer, "%u\n", mk->mod->core_size);
90898+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90899 }
90900
90901 static struct module_attribute modinfo_coresize =
90902@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90903 static ssize_t show_initsize(struct module_attribute *mattr,
90904 struct module_kobject *mk, char *buffer)
90905 {
90906- return sprintf(buffer, "%u\n", mk->mod->init_size);
90907+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90908 }
90909
90910 static struct module_attribute modinfo_initsize =
90911@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90912 goto bad_version;
90913 }
90914
90915+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90916+ /*
90917+ * avoid potentially printing jibberish on attempted load
90918+ * of a module randomized with a different seed
90919+ */
90920+ pr_warn("no symbol version for %s\n", symname);
90921+#else
90922 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90923+#endif
90924 return 0;
90925
90926 bad_version:
90927+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90928+ /*
90929+ * avoid potentially printing jibberish on attempted load
90930+ * of a module randomized with a different seed
90931+ */
90932+ pr_warn("attempted module disagrees about version of symbol %s\n",
90933+ symname);
90934+#else
90935 pr_warn("%s: disagrees about version of symbol %s\n",
90936 mod->name, symname);
90937+#endif
90938 return 0;
90939 }
90940
90941@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
90942 */
90943 #ifdef CONFIG_SYSFS
90944
90945-#ifdef CONFIG_KALLSYMS
90946+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90947 static inline bool sect_empty(const Elf_Shdr *sect)
90948 {
90949 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90950@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90951 {
90952 unsigned int notes, loaded, i;
90953 struct module_notes_attrs *notes_attrs;
90954- struct bin_attribute *nattr;
90955+ bin_attribute_no_const *nattr;
90956
90957 /* failed to create section attributes, so can't create notes */
90958 if (!mod->sect_attrs)
90959@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
90960 static int module_add_modinfo_attrs(struct module *mod)
90961 {
90962 struct module_attribute *attr;
90963- struct module_attribute *temp_attr;
90964+ module_attribute_no_const *temp_attr;
90965 int error = 0;
90966 int i;
90967
90968@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
90969
90970 static void unset_module_core_ro_nx(struct module *mod)
90971 {
90972- set_page_attributes(mod->module_core + mod->core_text_size,
90973- mod->module_core + mod->core_size,
90974+ set_page_attributes(mod->module_core_rw,
90975+ mod->module_core_rw + mod->core_size_rw,
90976 set_memory_x);
90977- set_page_attributes(mod->module_core,
90978- mod->module_core + mod->core_ro_size,
90979+ set_page_attributes(mod->module_core_rx,
90980+ mod->module_core_rx + mod->core_size_rx,
90981 set_memory_rw);
90982 }
90983
90984 static void unset_module_init_ro_nx(struct module *mod)
90985 {
90986- set_page_attributes(mod->module_init + mod->init_text_size,
90987- mod->module_init + mod->init_size,
90988+ set_page_attributes(mod->module_init_rw,
90989+ mod->module_init_rw + mod->init_size_rw,
90990 set_memory_x);
90991- set_page_attributes(mod->module_init,
90992- mod->module_init + mod->init_ro_size,
90993+ set_page_attributes(mod->module_init_rx,
90994+ mod->module_init_rx + mod->init_size_rx,
90995 set_memory_rw);
90996 }
90997
90998@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
90999 list_for_each_entry_rcu(mod, &modules, list) {
91000 if (mod->state == MODULE_STATE_UNFORMED)
91001 continue;
91002- if ((mod->module_core) && (mod->core_text_size)) {
91003- set_page_attributes(mod->module_core,
91004- mod->module_core + mod->core_text_size,
91005+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91006+ set_page_attributes(mod->module_core_rx,
91007+ mod->module_core_rx + mod->core_size_rx,
91008 set_memory_rw);
91009 }
91010- if ((mod->module_init) && (mod->init_text_size)) {
91011- set_page_attributes(mod->module_init,
91012- mod->module_init + mod->init_text_size,
91013+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91014+ set_page_attributes(mod->module_init_rx,
91015+ mod->module_init_rx + mod->init_size_rx,
91016 set_memory_rw);
91017 }
91018 }
91019@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
91020 list_for_each_entry_rcu(mod, &modules, list) {
91021 if (mod->state == MODULE_STATE_UNFORMED)
91022 continue;
91023- if ((mod->module_core) && (mod->core_text_size)) {
91024- set_page_attributes(mod->module_core,
91025- mod->module_core + mod->core_text_size,
91026+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91027+ set_page_attributes(mod->module_core_rx,
91028+ mod->module_core_rx + mod->core_size_rx,
91029 set_memory_ro);
91030 }
91031- if ((mod->module_init) && (mod->init_text_size)) {
91032- set_page_attributes(mod->module_init,
91033- mod->module_init + mod->init_text_size,
91034+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91035+ set_page_attributes(mod->module_init_rx,
91036+ mod->module_init_rx + mod->init_size_rx,
91037 set_memory_ro);
91038 }
91039 }
91040@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
91041 #else
91042 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
91043 static void unset_module_core_ro_nx(struct module *mod) { }
91044-static void unset_module_init_ro_nx(struct module *mod) { }
91045+static void unset_module_init_ro_nx(struct module *mod)
91046+{
91047+
91048+#ifdef CONFIG_PAX_KERNEXEC
91049+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
91050+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
91051+#endif
91052+
91053+}
91054 #endif
91055
91056 void __weak module_memfree(void *module_region)
91057@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
91058 /* This may be NULL, but that's OK */
91059 unset_module_init_ro_nx(mod);
91060 module_arch_freeing_init(mod);
91061- module_memfree(mod->module_init);
91062+ module_memfree(mod->module_init_rw);
91063+ module_memfree_exec(mod->module_init_rx);
91064 kfree(mod->args);
91065 percpu_modfree(mod);
91066
91067 /* Free lock-classes; relies on the preceding sync_rcu(). */
91068- lockdep_free_key_range(mod->module_core, mod->core_size);
91069+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91070+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91071
91072 /* Finally, free the core (containing the module structure) */
91073 unset_module_core_ro_nx(mod);
91074- module_memfree(mod->module_core);
91075+ module_memfree_exec(mod->module_core_rx);
91076+ module_memfree(mod->module_core_rw);
91077
91078 #ifdef CONFIG_MPU
91079 update_protections(current->mm);
91080@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91081 int ret = 0;
91082 const struct kernel_symbol *ksym;
91083
91084+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91085+ int is_fs_load = 0;
91086+ int register_filesystem_found = 0;
91087+ char *p;
91088+
91089+ p = strstr(mod->args, "grsec_modharden_fs");
91090+ if (p) {
91091+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
91092+ /* copy \0 as well */
91093+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91094+ is_fs_load = 1;
91095+ }
91096+#endif
91097+
91098 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
91099 const char *name = info->strtab + sym[i].st_name;
91100
91101+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91102+ /* it's a real shame this will never get ripped and copied
91103+ upstream! ;(
91104+ */
91105+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91106+ register_filesystem_found = 1;
91107+#endif
91108+
91109 switch (sym[i].st_shndx) {
91110 case SHN_COMMON:
91111 /* Ignore common symbols */
91112@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91113 ksym = resolve_symbol_wait(mod, info, name);
91114 /* Ok if resolved. */
91115 if (ksym && !IS_ERR(ksym)) {
91116+ pax_open_kernel();
91117 sym[i].st_value = ksym->value;
91118+ pax_close_kernel();
91119 break;
91120 }
91121
91122@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91123 secbase = (unsigned long)mod_percpu(mod);
91124 else
91125 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
91126+ pax_open_kernel();
91127 sym[i].st_value += secbase;
91128+ pax_close_kernel();
91129 break;
91130 }
91131 }
91132
91133+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91134+ if (is_fs_load && !register_filesystem_found) {
91135+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
91136+ ret = -EPERM;
91137+ }
91138+#endif
91139+
91140 return ret;
91141 }
91142
91143@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
91144 || s->sh_entsize != ~0UL
91145 || strstarts(sname, ".init"))
91146 continue;
91147- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
91148+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91149+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
91150+ else
91151+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
91152 pr_debug("\t%s\n", sname);
91153 }
91154- switch (m) {
91155- case 0: /* executable */
91156- mod->core_size = debug_align(mod->core_size);
91157- mod->core_text_size = mod->core_size;
91158- break;
91159- case 1: /* RO: text and ro-data */
91160- mod->core_size = debug_align(mod->core_size);
91161- mod->core_ro_size = mod->core_size;
91162- break;
91163- case 3: /* whole core */
91164- mod->core_size = debug_align(mod->core_size);
91165- break;
91166- }
91167 }
91168
91169 pr_debug("Init section allocation order:\n");
91170@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
91171 || s->sh_entsize != ~0UL
91172 || !strstarts(sname, ".init"))
91173 continue;
91174- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
91175- | INIT_OFFSET_MASK);
91176+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91177+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
91178+ else
91179+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
91180+ s->sh_entsize |= INIT_OFFSET_MASK;
91181 pr_debug("\t%s\n", sname);
91182 }
91183- switch (m) {
91184- case 0: /* executable */
91185- mod->init_size = debug_align(mod->init_size);
91186- mod->init_text_size = mod->init_size;
91187- break;
91188- case 1: /* RO: text and ro-data */
91189- mod->init_size = debug_align(mod->init_size);
91190- mod->init_ro_size = mod->init_size;
91191- break;
91192- case 3: /* whole init */
91193- mod->init_size = debug_align(mod->init_size);
91194- break;
91195- }
91196 }
91197 }
91198
91199@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91200
91201 /* Put symbol section at end of init part of module. */
91202 symsect->sh_flags |= SHF_ALLOC;
91203- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
91204+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
91205 info->index.sym) | INIT_OFFSET_MASK;
91206 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
91207
91208@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91209 }
91210
91211 /* Append room for core symbols at end of core part. */
91212- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
91213- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
91214- mod->core_size += strtab_size;
91215- mod->core_size = debug_align(mod->core_size);
91216+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
91217+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
91218+ mod->core_size_rx += strtab_size;
91219+ mod->core_size_rx = debug_align(mod->core_size_rx);
91220
91221 /* Put string table section at end of init part of module. */
91222 strsect->sh_flags |= SHF_ALLOC;
91223- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
91224+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
91225 info->index.str) | INIT_OFFSET_MASK;
91226- mod->init_size = debug_align(mod->init_size);
91227+ mod->init_size_rx = debug_align(mod->init_size_rx);
91228 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
91229 }
91230
91231@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91232 /* Make sure we get permanent strtab: don't use info->strtab. */
91233 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
91234
91235+ pax_open_kernel();
91236+
91237 /* Set types up while we still have access to sections. */
91238 for (i = 0; i < mod->num_symtab; i++)
91239 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
91240
91241- mod->core_symtab = dst = mod->module_core + info->symoffs;
91242- mod->core_strtab = s = mod->module_core + info->stroffs;
91243+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
91244+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
91245 src = mod->symtab;
91246 for (ndst = i = 0; i < mod->num_symtab; i++) {
91247 if (i == 0 ||
91248@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91249 }
91250 }
91251 mod->core_num_syms = ndst;
91252+
91253+ pax_close_kernel();
91254 }
91255 #else
91256 static inline void layout_symtab(struct module *mod, struct load_info *info)
91257@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
91258 return vmalloc_exec(size);
91259 }
91260
91261-static void *module_alloc_update_bounds(unsigned long size)
91262+static void *module_alloc_update_bounds_rw(unsigned long size)
91263 {
91264 void *ret = module_alloc(size);
91265
91266 if (ret) {
91267 mutex_lock(&module_mutex);
91268 /* Update module bounds. */
91269- if ((unsigned long)ret < module_addr_min)
91270- module_addr_min = (unsigned long)ret;
91271- if ((unsigned long)ret + size > module_addr_max)
91272- module_addr_max = (unsigned long)ret + size;
91273+ if ((unsigned long)ret < module_addr_min_rw)
91274+ module_addr_min_rw = (unsigned long)ret;
91275+ if ((unsigned long)ret + size > module_addr_max_rw)
91276+ module_addr_max_rw = (unsigned long)ret + size;
91277+ mutex_unlock(&module_mutex);
91278+ }
91279+ return ret;
91280+}
91281+
91282+static void *module_alloc_update_bounds_rx(unsigned long size)
91283+{
91284+ void *ret = module_alloc_exec(size);
91285+
91286+ if (ret) {
91287+ mutex_lock(&module_mutex);
91288+ /* Update module bounds. */
91289+ if ((unsigned long)ret < module_addr_min_rx)
91290+ module_addr_min_rx = (unsigned long)ret;
91291+ if ((unsigned long)ret + size > module_addr_max_rx)
91292+ module_addr_max_rx = (unsigned long)ret + size;
91293 mutex_unlock(&module_mutex);
91294 }
91295 return ret;
91296@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91297 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
91298
91299 if (info->index.sym == 0) {
91300+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91301+ /*
91302+ * avoid potentially printing jibberish on attempted load
91303+ * of a module randomized with a different seed
91304+ */
91305+ pr_warn("module has no symbols (stripped?)\n");
91306+#else
91307 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
91308+#endif
91309 return ERR_PTR(-ENOEXEC);
91310 }
91311
91312@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91313 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91314 {
91315 const char *modmagic = get_modinfo(info, "vermagic");
91316+ const char *license = get_modinfo(info, "license");
91317 int err;
91318
91319+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
91320+ if (!license || !license_is_gpl_compatible(license))
91321+ return -ENOEXEC;
91322+#endif
91323+
91324 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
91325 modmagic = NULL;
91326
91327@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91328 }
91329
91330 /* Set up license info based on the info section */
91331- set_license(mod, get_modinfo(info, "license"));
91332+ set_license(mod, license);
91333
91334 return 0;
91335 }
91336@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
91337 void *ptr;
91338
91339 /* Do the allocs. */
91340- ptr = module_alloc_update_bounds(mod->core_size);
91341+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91342 /*
91343 * The pointer to this block is stored in the module structure
91344 * which is inside the block. Just mark it as not being a
91345@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
91346 if (!ptr)
91347 return -ENOMEM;
91348
91349- memset(ptr, 0, mod->core_size);
91350- mod->module_core = ptr;
91351+ memset(ptr, 0, mod->core_size_rw);
91352+ mod->module_core_rw = ptr;
91353
91354- if (mod->init_size) {
91355- ptr = module_alloc_update_bounds(mod->init_size);
91356+ if (mod->init_size_rw) {
91357+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91358 /*
91359 * The pointer to this block is stored in the module structure
91360 * which is inside the block. This block doesn't need to be
91361@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
91362 */
91363 kmemleak_ignore(ptr);
91364 if (!ptr) {
91365- module_memfree(mod->module_core);
91366+ module_memfree(mod->module_core_rw);
91367 return -ENOMEM;
91368 }
91369- memset(ptr, 0, mod->init_size);
91370- mod->module_init = ptr;
91371+ memset(ptr, 0, mod->init_size_rw);
91372+ mod->module_init_rw = ptr;
91373 } else
91374- mod->module_init = NULL;
91375+ mod->module_init_rw = NULL;
91376+
91377+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91378+ kmemleak_not_leak(ptr);
91379+ if (!ptr) {
91380+ if (mod->module_init_rw)
91381+ module_memfree(mod->module_init_rw);
91382+ module_memfree(mod->module_core_rw);
91383+ return -ENOMEM;
91384+ }
91385+
91386+ pax_open_kernel();
91387+ memset(ptr, 0, mod->core_size_rx);
91388+ pax_close_kernel();
91389+ mod->module_core_rx = ptr;
91390+
91391+ if (mod->init_size_rx) {
91392+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91393+ kmemleak_ignore(ptr);
91394+ if (!ptr && mod->init_size_rx) {
91395+ module_memfree_exec(mod->module_core_rx);
91396+ if (mod->module_init_rw)
91397+ module_memfree(mod->module_init_rw);
91398+ module_memfree(mod->module_core_rw);
91399+ return -ENOMEM;
91400+ }
91401+
91402+ pax_open_kernel();
91403+ memset(ptr, 0, mod->init_size_rx);
91404+ pax_close_kernel();
91405+ mod->module_init_rx = ptr;
91406+ } else
91407+ mod->module_init_rx = NULL;
91408
91409 /* Transfer each section which specifies SHF_ALLOC */
91410 pr_debug("final section addresses:\n");
91411@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
91412 if (!(shdr->sh_flags & SHF_ALLOC))
91413 continue;
91414
91415- if (shdr->sh_entsize & INIT_OFFSET_MASK)
91416- dest = mod->module_init
91417- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91418- else
91419- dest = mod->module_core + shdr->sh_entsize;
91420+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
91421+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91422+ dest = mod->module_init_rw
91423+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91424+ else
91425+ dest = mod->module_init_rx
91426+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91427+ } else {
91428+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91429+ dest = mod->module_core_rw + shdr->sh_entsize;
91430+ else
91431+ dest = mod->module_core_rx + shdr->sh_entsize;
91432+ }
91433+
91434+ if (shdr->sh_type != SHT_NOBITS) {
91435+
91436+#ifdef CONFIG_PAX_KERNEXEC
91437+#ifdef CONFIG_X86_64
91438+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91439+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91440+#endif
91441+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91442+ pax_open_kernel();
91443+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91444+ pax_close_kernel();
91445+ } else
91446+#endif
91447
91448- if (shdr->sh_type != SHT_NOBITS)
91449 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91450+ }
91451 /* Update sh_addr to point to copy in image. */
91452- shdr->sh_addr = (unsigned long)dest;
91453+
91454+#ifdef CONFIG_PAX_KERNEXEC
91455+ if (shdr->sh_flags & SHF_EXECINSTR)
91456+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91457+ else
91458+#endif
91459+
91460+ shdr->sh_addr = (unsigned long)dest;
91461 pr_debug("\t0x%lx %s\n",
91462 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91463 }
91464@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
91465 * Do it before processing of module parameters, so the module
91466 * can provide parameter accessor functions of its own.
91467 */
91468- if (mod->module_init)
91469- flush_icache_range((unsigned long)mod->module_init,
91470- (unsigned long)mod->module_init
91471- + mod->init_size);
91472- flush_icache_range((unsigned long)mod->module_core,
91473- (unsigned long)mod->module_core + mod->core_size);
91474+ if (mod->module_init_rx)
91475+ flush_icache_range((unsigned long)mod->module_init_rx,
91476+ (unsigned long)mod->module_init_rx
91477+ + mod->init_size_rx);
91478+ flush_icache_range((unsigned long)mod->module_core_rx,
91479+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91480
91481 set_fs(old_fs);
91482 }
91483@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
91484 {
91485 percpu_modfree(mod);
91486 module_arch_freeing_init(mod);
91487- module_memfree(mod->module_init);
91488- module_memfree(mod->module_core);
91489+ module_memfree_exec(mod->module_init_rx);
91490+ module_memfree_exec(mod->module_core_rx);
91491+ module_memfree(mod->module_init_rw);
91492+ module_memfree(mod->module_core_rw);
91493 }
91494
91495 int __weak module_finalize(const Elf_Ehdr *hdr,
91496@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91497 static int post_relocation(struct module *mod, const struct load_info *info)
91498 {
91499 /* Sort exception table now relocations are done. */
91500+ pax_open_kernel();
91501 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91502+ pax_close_kernel();
91503
91504 /* Copy relocated percpu area over. */
91505 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91506@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
91507 /* For freeing module_init on success, in case kallsyms traversing */
91508 struct mod_initfree {
91509 struct rcu_head rcu;
91510- void *module_init;
91511+ void *module_init_rw;
91512+ void *module_init_rx;
91513 };
91514
91515 static void do_free_init(struct rcu_head *head)
91516 {
91517 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
91518- module_memfree(m->module_init);
91519+ module_memfree(m->module_init_rw);
91520+ module_memfree_exec(m->module_init_rx);
91521 kfree(m);
91522 }
91523
91524@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
91525 ret = -ENOMEM;
91526 goto fail;
91527 }
91528- freeinit->module_init = mod->module_init;
91529+ freeinit->module_init_rw = mod->module_init_rw;
91530+ freeinit->module_init_rx = mod->module_init_rx;
91531
91532 /*
91533 * We want to find out whether @mod uses async during init. Clear
91534@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
91535 #endif
91536 unset_module_init_ro_nx(mod);
91537 module_arch_freeing_init(mod);
91538- mod->module_init = NULL;
91539- mod->init_size = 0;
91540- mod->init_ro_size = 0;
91541- mod->init_text_size = 0;
91542+ mod->module_init_rw = NULL;
91543+ mod->module_init_rx = NULL;
91544+ mod->init_size_rw = 0;
91545+ mod->init_size_rx = 0;
91546 /*
91547 * We want to free module_init, but be aware that kallsyms may be
91548 * walking this with preempt disabled. In all the failure paths,
91549@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91550 module_bug_finalize(info->hdr, info->sechdrs, mod);
91551
91552 /* Set RO and NX regions for core */
91553- set_section_ro_nx(mod->module_core,
91554- mod->core_text_size,
91555- mod->core_ro_size,
91556- mod->core_size);
91557+ set_section_ro_nx(mod->module_core_rx,
91558+ mod->core_size_rx,
91559+ mod->core_size_rx,
91560+ mod->core_size_rx);
91561
91562 /* Set RO and NX regions for init */
91563- set_section_ro_nx(mod->module_init,
91564- mod->init_text_size,
91565- mod->init_ro_size,
91566- mod->init_size);
91567+ set_section_ro_nx(mod->module_init_rx,
91568+ mod->init_size_rx,
91569+ mod->init_size_rx,
91570+ mod->init_size_rx);
91571
91572 /* Mark state as coming so strong_try_module_get() ignores us,
91573 * but kallsyms etc. can see us. */
91574@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91575 if (err)
91576 goto free_unload;
91577
91578+ /* Now copy in args */
91579+ mod->args = strndup_user(uargs, ~0UL >> 1);
91580+ if (IS_ERR(mod->args)) {
91581+ err = PTR_ERR(mod->args);
91582+ goto free_unload;
91583+ }
91584+
91585 /* Set up MODINFO_ATTR fields */
91586 setup_modinfo(mod, info);
91587
91588+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91589+ {
91590+ char *p, *p2;
91591+
91592+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91593+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91594+ err = -EPERM;
91595+ goto free_modinfo;
91596+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91597+ p += sizeof("grsec_modharden_normal") - 1;
91598+ p2 = strstr(p, "_");
91599+ if (p2) {
91600+ *p2 = '\0';
91601+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91602+ *p2 = '_';
91603+ }
91604+ err = -EPERM;
91605+ goto free_modinfo;
91606+ }
91607+ }
91608+#endif
91609+
91610 /* Fix up syms, so that st_value is a pointer to location. */
91611 err = simplify_symbols(mod, info);
91612 if (err < 0)
91613@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91614
91615 flush_module_icache(mod);
91616
91617- /* Now copy in args */
91618- mod->args = strndup_user(uargs, ~0UL >> 1);
91619- if (IS_ERR(mod->args)) {
91620- err = PTR_ERR(mod->args);
91621- goto free_arch_cleanup;
91622- }
91623-
91624 dynamic_debug_setup(info->debug, info->num_debug);
91625
91626 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91627@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91628 ddebug_cleanup:
91629 dynamic_debug_remove(info->debug);
91630 synchronize_sched();
91631- kfree(mod->args);
91632- free_arch_cleanup:
91633 module_arch_cleanup(mod);
91634 free_modinfo:
91635 free_modinfo(mod);
91636+ kfree(mod->args);
91637 free_unload:
91638 module_unload_free(mod);
91639 unlink_mod:
91640@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
91641 mutex_unlock(&module_mutex);
91642 free_module:
91643 /* Free lock-classes; relies on the preceding sync_rcu() */
91644- lockdep_free_key_range(mod->module_core, mod->core_size);
91645+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91646+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91647
91648 module_deallocate(mod, info);
91649 free_copy:
91650@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
91651 unsigned long nextval;
91652
91653 /* At worse, next value is at end of module */
91654- if (within_module_init(addr, mod))
91655- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91656+ if (within_module_init_rx(addr, mod))
91657+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91658+ else if (within_module_init_rw(addr, mod))
91659+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91660+ else if (within_module_core_rx(addr, mod))
91661+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91662+ else if (within_module_core_rw(addr, mod))
91663+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91664 else
91665- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91666+ return NULL;
91667
91668 /* Scan for closest preceding symbol, and next symbol. (ELF
91669 starts real symbols at 1). */
91670@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
91671 return 0;
91672
91673 seq_printf(m, "%s %u",
91674- mod->name, mod->init_size + mod->core_size);
91675+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91676 print_unload_info(m, mod);
91677
91678 /* Informative for users. */
91679@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
91680 mod->state == MODULE_STATE_COMING ? "Loading" :
91681 "Live");
91682 /* Used by oprofile and other similar tools. */
91683- seq_printf(m, " 0x%pK", mod->module_core);
91684+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91685
91686 /* Taints info */
91687 if (mod->taints)
91688@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
91689
91690 static int __init proc_modules_init(void)
91691 {
91692+#ifndef CONFIG_GRKERNSEC_HIDESYM
91693+#ifdef CONFIG_GRKERNSEC_PROC_USER
91694+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91695+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91696+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91697+#else
91698 proc_create("modules", 0, NULL, &proc_modules_operations);
91699+#endif
91700+#else
91701+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91702+#endif
91703 return 0;
91704 }
91705 module_init(proc_modules_init);
91706@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
91707 {
91708 struct module *mod;
91709
91710- if (addr < module_addr_min || addr > module_addr_max)
91711+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91712+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91713 return NULL;
91714
91715 list_for_each_entry_rcu(mod, &modules, list) {
91716@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
91717 */
91718 struct module *__module_text_address(unsigned long addr)
91719 {
91720- struct module *mod = __module_address(addr);
91721+ struct module *mod;
91722+
91723+#ifdef CONFIG_X86_32
91724+ addr = ktla_ktva(addr);
91725+#endif
91726+
91727+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91728+ return NULL;
91729+
91730+ mod = __module_address(addr);
91731+
91732 if (mod) {
91733 /* Make sure it's within the text section. */
91734- if (!within(addr, mod->module_init, mod->init_text_size)
91735- && !within(addr, mod->module_core, mod->core_text_size))
91736+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91737 mod = NULL;
91738 }
91739 return mod;
91740diff --git a/kernel/notifier.c b/kernel/notifier.c
91741index ae9fc7c..5085fbf 100644
91742--- a/kernel/notifier.c
91743+++ b/kernel/notifier.c
91744@@ -5,6 +5,7 @@
91745 #include <linux/rcupdate.h>
91746 #include <linux/vmalloc.h>
91747 #include <linux/reboot.h>
91748+#include <linux/mm.h>
91749
91750 /*
91751 * Notifier list for kernel code which wants to be called
91752@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91753 while ((*nl) != NULL) {
91754 if (n->priority > (*nl)->priority)
91755 break;
91756- nl = &((*nl)->next);
91757+ nl = (struct notifier_block **)&((*nl)->next);
91758 }
91759- n->next = *nl;
91760+ pax_open_kernel();
91761+ *(const void **)&n->next = *nl;
91762 rcu_assign_pointer(*nl, n);
91763+ pax_close_kernel();
91764 return 0;
91765 }
91766
91767@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91768 return 0;
91769 if (n->priority > (*nl)->priority)
91770 break;
91771- nl = &((*nl)->next);
91772+ nl = (struct notifier_block **)&((*nl)->next);
91773 }
91774- n->next = *nl;
91775+ pax_open_kernel();
91776+ *(const void **)&n->next = *nl;
91777 rcu_assign_pointer(*nl, n);
91778+ pax_close_kernel();
91779 return 0;
91780 }
91781
91782@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91783 {
91784 while ((*nl) != NULL) {
91785 if ((*nl) == n) {
91786+ pax_open_kernel();
91787 rcu_assign_pointer(*nl, n->next);
91788+ pax_close_kernel();
91789 return 0;
91790 }
91791- nl = &((*nl)->next);
91792+ nl = (struct notifier_block **)&((*nl)->next);
91793 }
91794 return -ENOENT;
91795 }
91796diff --git a/kernel/padata.c b/kernel/padata.c
91797index b38bea9..91acfbe 100644
91798--- a/kernel/padata.c
91799+++ b/kernel/padata.c
91800@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91801 * seq_nr mod. number of cpus in use.
91802 */
91803
91804- seq_nr = atomic_inc_return(&pd->seq_nr);
91805+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91806 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91807
91808 return padata_index_to_cpu(pd, cpu_index);
91809@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91810 padata_init_pqueues(pd);
91811 padata_init_squeues(pd);
91812 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91813- atomic_set(&pd->seq_nr, -1);
91814+ atomic_set_unchecked(&pd->seq_nr, -1);
91815 atomic_set(&pd->reorder_objects, 0);
91816 atomic_set(&pd->refcnt, 0);
91817 pd->pinst = pinst;
91818diff --git a/kernel/panic.c b/kernel/panic.c
91819index 8136ad7..15c857b 100644
91820--- a/kernel/panic.c
91821+++ b/kernel/panic.c
91822@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91823 /*
91824 * Stop ourself in panic -- architecture code may override this
91825 */
91826-void __weak panic_smp_self_stop(void)
91827+void __weak __noreturn panic_smp_self_stop(void)
91828 {
91829 while (1)
91830 cpu_relax();
91831@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91832 disable_trace_on_warning();
91833
91834 pr_warn("------------[ cut here ]------------\n");
91835- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91836+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91837 raw_smp_processor_id(), current->pid, file, line, caller);
91838
91839 if (args)
91840@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91841 */
91842 __visible void __stack_chk_fail(void)
91843 {
91844- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91845+ dump_stack();
91846+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91847 __builtin_return_address(0));
91848 }
91849 EXPORT_SYMBOL(__stack_chk_fail);
91850diff --git a/kernel/pid.c b/kernel/pid.c
91851index cd36a5e..11f185d 100644
91852--- a/kernel/pid.c
91853+++ b/kernel/pid.c
91854@@ -33,6 +33,7 @@
91855 #include <linux/rculist.h>
91856 #include <linux/bootmem.h>
91857 #include <linux/hash.h>
91858+#include <linux/security.h>
91859 #include <linux/pid_namespace.h>
91860 #include <linux/init_task.h>
91861 #include <linux/syscalls.h>
91862@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91863
91864 int pid_max = PID_MAX_DEFAULT;
91865
91866-#define RESERVED_PIDS 300
91867+#define RESERVED_PIDS 500
91868
91869 int pid_max_min = RESERVED_PIDS + 1;
91870 int pid_max_max = PID_MAX_LIMIT;
91871@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91872 */
91873 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91874 {
91875+ struct task_struct *task;
91876+
91877 rcu_lockdep_assert(rcu_read_lock_held(),
91878 "find_task_by_pid_ns() needs rcu_read_lock()"
91879 " protection");
91880- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91881+
91882+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91883+
91884+ if (gr_pid_is_chrooted(task))
91885+ return NULL;
91886+
91887+ return task;
91888 }
91889
91890 struct task_struct *find_task_by_vpid(pid_t vnr)
91891@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91892 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91893 }
91894
91895+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91896+{
91897+ rcu_lockdep_assert(rcu_read_lock_held(),
91898+ "find_task_by_pid_ns() needs rcu_read_lock()"
91899+ " protection");
91900+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91901+}
91902+
91903 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91904 {
91905 struct pid *pid;
91906diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91907index a65ba13..f600dbb 100644
91908--- a/kernel/pid_namespace.c
91909+++ b/kernel/pid_namespace.c
91910@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91911 void __user *buffer, size_t *lenp, loff_t *ppos)
91912 {
91913 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91914- struct ctl_table tmp = *table;
91915+ ctl_table_no_const tmp = *table;
91916
91917 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91918 return -EPERM;
91919diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91920index 7e01f78..f5da19d 100644
91921--- a/kernel/power/Kconfig
91922+++ b/kernel/power/Kconfig
91923@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91924 config HIBERNATION
91925 bool "Hibernation (aka 'suspend to disk')"
91926 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91927+ depends on !GRKERNSEC_KMEM
91928+ depends on !PAX_MEMORY_SANITIZE
91929 select HIBERNATE_CALLBACKS
91930 select LZO_COMPRESS
91931 select LZO_DECOMPRESS
91932diff --git a/kernel/power/process.c b/kernel/power/process.c
91933index 564f786..361a18e 100644
91934--- a/kernel/power/process.c
91935+++ b/kernel/power/process.c
91936@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91937 unsigned int elapsed_msecs;
91938 bool wakeup = false;
91939 int sleep_usecs = USEC_PER_MSEC;
91940+ bool timedout = false;
91941
91942 do_gettimeofday(&start);
91943
91944@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91945
91946 while (true) {
91947 todo = 0;
91948+ if (time_after(jiffies, end_time))
91949+ timedout = true;
91950 read_lock(&tasklist_lock);
91951 for_each_process_thread(g, p) {
91952 if (p == current || !freeze_task(p))
91953 continue;
91954
91955- if (!freezer_should_skip(p))
91956+ if (!freezer_should_skip(p)) {
91957 todo++;
91958+ if (timedout) {
91959+ printk(KERN_ERR "Task refusing to freeze:\n");
91960+ sched_show_task(p);
91961+ }
91962+ }
91963 }
91964 read_unlock(&tasklist_lock);
91965
91966@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91967 todo += wq_busy;
91968 }
91969
91970- if (!todo || time_after(jiffies, end_time))
91971+ if (!todo || timedout)
91972 break;
91973
91974 if (pm_wakeup_pending()) {
91975diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91976index bb0635b..9aff9f3 100644
91977--- a/kernel/printk/printk.c
91978+++ b/kernel/printk/printk.c
91979@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91980 if (from_file && type != SYSLOG_ACTION_OPEN)
91981 return 0;
91982
91983+#ifdef CONFIG_GRKERNSEC_DMESG
91984+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91985+ return -EPERM;
91986+#endif
91987+
91988 if (syslog_action_restricted(type)) {
91989 if (capable(CAP_SYSLOG))
91990 return 0;
91991diff --git a/kernel/profile.c b/kernel/profile.c
91992index a7bcd28..5b368fa 100644
91993--- a/kernel/profile.c
91994+++ b/kernel/profile.c
91995@@ -37,7 +37,7 @@ struct profile_hit {
91996 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91997 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91998
91999-static atomic_t *prof_buffer;
92000+static atomic_unchecked_t *prof_buffer;
92001 static unsigned long prof_len, prof_shift;
92002
92003 int prof_on __read_mostly;
92004@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
92005 hits[i].pc = 0;
92006 continue;
92007 }
92008- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92009+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92010 hits[i].hits = hits[i].pc = 0;
92011 }
92012 }
92013@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92014 * Add the current hit(s) and flush the write-queue out
92015 * to the global buffer:
92016 */
92017- atomic_add(nr_hits, &prof_buffer[pc]);
92018+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
92019 for (i = 0; i < NR_PROFILE_HIT; ++i) {
92020- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92021+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92022 hits[i].pc = hits[i].hits = 0;
92023 }
92024 out:
92025@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92026 {
92027 unsigned long pc;
92028 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
92029- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92030+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92031 }
92032 #endif /* !CONFIG_SMP */
92033
92034@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
92035 return -EFAULT;
92036 buf++; p++; count--; read++;
92037 }
92038- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
92039+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
92040 if (copy_to_user(buf, (void *)pnt, count))
92041 return -EFAULT;
92042 read += count;
92043@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
92044 }
92045 #endif
92046 profile_discard_flip_buffers();
92047- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
92048+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
92049 return count;
92050 }
92051
92052diff --git a/kernel/ptrace.c b/kernel/ptrace.c
92053index 227fec3..3aea55b 100644
92054--- a/kernel/ptrace.c
92055+++ b/kernel/ptrace.c
92056@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
92057 if (seize)
92058 flags |= PT_SEIZED;
92059 rcu_read_lock();
92060- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92061+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92062 flags |= PT_PTRACE_CAP;
92063 rcu_read_unlock();
92064 task->ptrace = flags;
92065@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
92066 break;
92067 return -EIO;
92068 }
92069- if (copy_to_user(dst, buf, retval))
92070+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
92071 return -EFAULT;
92072 copied += retval;
92073 src += retval;
92074@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
92075 bool seized = child->ptrace & PT_SEIZED;
92076 int ret = -EIO;
92077 siginfo_t siginfo, *si;
92078- void __user *datavp = (void __user *) data;
92079+ void __user *datavp = (__force void __user *) data;
92080 unsigned long __user *datalp = datavp;
92081 unsigned long flags;
92082
92083@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
92084 goto out;
92085 }
92086
92087+ if (gr_handle_ptrace(child, request)) {
92088+ ret = -EPERM;
92089+ goto out_put_task_struct;
92090+ }
92091+
92092 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92093 ret = ptrace_attach(child, request, addr, data);
92094 /*
92095 * Some architectures need to do book-keeping after
92096 * a ptrace attach.
92097 */
92098- if (!ret)
92099+ if (!ret) {
92100 arch_ptrace_attach(child);
92101+ gr_audit_ptrace(child);
92102+ }
92103 goto out_put_task_struct;
92104 }
92105
92106@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
92107 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
92108 if (copied != sizeof(tmp))
92109 return -EIO;
92110- return put_user(tmp, (unsigned long __user *)data);
92111+ return put_user(tmp, (__force unsigned long __user *)data);
92112 }
92113
92114 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
92115@@ -1157,7 +1164,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
92116 }
92117
92118 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92119- compat_long_t, addr, compat_long_t, data)
92120+ compat_ulong_t, addr, compat_ulong_t, data)
92121 {
92122 struct task_struct *child;
92123 long ret;
92124@@ -1173,14 +1180,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92125 goto out;
92126 }
92127
92128+ if (gr_handle_ptrace(child, request)) {
92129+ ret = -EPERM;
92130+ goto out_put_task_struct;
92131+ }
92132+
92133 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92134 ret = ptrace_attach(child, request, addr, data);
92135 /*
92136 * Some architectures need to do book-keeping after
92137 * a ptrace attach.
92138 */
92139- if (!ret)
92140+ if (!ret) {
92141 arch_ptrace_attach(child);
92142+ gr_audit_ptrace(child);
92143+ }
92144 goto out_put_task_struct;
92145 }
92146
92147diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
92148index 30d42aa..cac5d66 100644
92149--- a/kernel/rcu/rcutorture.c
92150+++ b/kernel/rcu/rcutorture.c
92151@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92152 rcu_torture_count) = { 0 };
92153 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92154 rcu_torture_batch) = { 0 };
92155-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92156-static atomic_t n_rcu_torture_alloc;
92157-static atomic_t n_rcu_torture_alloc_fail;
92158-static atomic_t n_rcu_torture_free;
92159-static atomic_t n_rcu_torture_mberror;
92160-static atomic_t n_rcu_torture_error;
92161+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92162+static atomic_unchecked_t n_rcu_torture_alloc;
92163+static atomic_unchecked_t n_rcu_torture_alloc_fail;
92164+static atomic_unchecked_t n_rcu_torture_free;
92165+static atomic_unchecked_t n_rcu_torture_mberror;
92166+static atomic_unchecked_t n_rcu_torture_error;
92167 static long n_rcu_torture_barrier_error;
92168 static long n_rcu_torture_boost_ktrerror;
92169 static long n_rcu_torture_boost_rterror;
92170@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
92171 static long n_rcu_torture_timers;
92172 static long n_barrier_attempts;
92173 static long n_barrier_successes;
92174-static atomic_long_t n_cbfloods;
92175+static atomic_long_unchecked_t n_cbfloods;
92176 static struct list_head rcu_torture_removed;
92177
92178 static int rcu_torture_writer_state;
92179@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
92180
92181 spin_lock_bh(&rcu_torture_lock);
92182 if (list_empty(&rcu_torture_freelist)) {
92183- atomic_inc(&n_rcu_torture_alloc_fail);
92184+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
92185 spin_unlock_bh(&rcu_torture_lock);
92186 return NULL;
92187 }
92188- atomic_inc(&n_rcu_torture_alloc);
92189+ atomic_inc_unchecked(&n_rcu_torture_alloc);
92190 p = rcu_torture_freelist.next;
92191 list_del_init(p);
92192 spin_unlock_bh(&rcu_torture_lock);
92193@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
92194 static void
92195 rcu_torture_free(struct rcu_torture *p)
92196 {
92197- atomic_inc(&n_rcu_torture_free);
92198+ atomic_inc_unchecked(&n_rcu_torture_free);
92199 spin_lock_bh(&rcu_torture_lock);
92200 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
92201 spin_unlock_bh(&rcu_torture_lock);
92202@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
92203 i = rp->rtort_pipe_count;
92204 if (i > RCU_TORTURE_PIPE_LEN)
92205 i = RCU_TORTURE_PIPE_LEN;
92206- atomic_inc(&rcu_torture_wcount[i]);
92207+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92208 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
92209 rp->rtort_mbtest = 0;
92210 return true;
92211@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
92212 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
92213 do {
92214 schedule_timeout_interruptible(cbflood_inter_holdoff);
92215- atomic_long_inc(&n_cbfloods);
92216+ atomic_long_inc_unchecked(&n_cbfloods);
92217 WARN_ON(signal_pending(current));
92218 for (i = 0; i < cbflood_n_burst; i++) {
92219 for (j = 0; j < cbflood_n_per_burst; j++) {
92220@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
92221 i = old_rp->rtort_pipe_count;
92222 if (i > RCU_TORTURE_PIPE_LEN)
92223 i = RCU_TORTURE_PIPE_LEN;
92224- atomic_inc(&rcu_torture_wcount[i]);
92225+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92226 old_rp->rtort_pipe_count++;
92227 switch (synctype[torture_random(&rand) % nsynctypes]) {
92228 case RTWS_DEF_FREE:
92229@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
92230 return;
92231 }
92232 if (p->rtort_mbtest == 0)
92233- atomic_inc(&n_rcu_torture_mberror);
92234+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92235 spin_lock(&rand_lock);
92236 cur_ops->read_delay(&rand);
92237 n_rcu_torture_timers++;
92238@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
92239 continue;
92240 }
92241 if (p->rtort_mbtest == 0)
92242- atomic_inc(&n_rcu_torture_mberror);
92243+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92244 cur_ops->read_delay(&rand);
92245 preempt_disable();
92246 pipe_count = p->rtort_pipe_count;
92247@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
92248 rcu_torture_current,
92249 rcu_torture_current_version,
92250 list_empty(&rcu_torture_freelist),
92251- atomic_read(&n_rcu_torture_alloc),
92252- atomic_read(&n_rcu_torture_alloc_fail),
92253- atomic_read(&n_rcu_torture_free));
92254+ atomic_read_unchecked(&n_rcu_torture_alloc),
92255+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
92256+ atomic_read_unchecked(&n_rcu_torture_free));
92257 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
92258- atomic_read(&n_rcu_torture_mberror),
92259+ atomic_read_unchecked(&n_rcu_torture_mberror),
92260 n_rcu_torture_boost_ktrerror,
92261 n_rcu_torture_boost_rterror);
92262 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
92263@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
92264 n_barrier_successes,
92265 n_barrier_attempts,
92266 n_rcu_torture_barrier_error);
92267- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
92268+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
92269
92270 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
92271- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
92272+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
92273 n_rcu_torture_barrier_error != 0 ||
92274 n_rcu_torture_boost_ktrerror != 0 ||
92275 n_rcu_torture_boost_rterror != 0 ||
92276 n_rcu_torture_boost_failure != 0 ||
92277 i > 1) {
92278 pr_cont("%s", "!!! ");
92279- atomic_inc(&n_rcu_torture_error);
92280+ atomic_inc_unchecked(&n_rcu_torture_error);
92281 WARN_ON_ONCE(1);
92282 }
92283 pr_cont("Reader Pipe: ");
92284@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
92285 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
92286 pr_cont("Free-Block Circulation: ");
92287 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92288- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
92289+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
92290 }
92291 pr_cont("\n");
92292
92293@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
92294
92295 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
92296
92297- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92298+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92299 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
92300 else if (torture_onoff_failures())
92301 rcu_torture_print_module_parms(cur_ops,
92302@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
92303
92304 rcu_torture_current = NULL;
92305 rcu_torture_current_version = 0;
92306- atomic_set(&n_rcu_torture_alloc, 0);
92307- atomic_set(&n_rcu_torture_alloc_fail, 0);
92308- atomic_set(&n_rcu_torture_free, 0);
92309- atomic_set(&n_rcu_torture_mberror, 0);
92310- atomic_set(&n_rcu_torture_error, 0);
92311+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
92312+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
92313+ atomic_set_unchecked(&n_rcu_torture_free, 0);
92314+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
92315+ atomic_set_unchecked(&n_rcu_torture_error, 0);
92316 n_rcu_torture_barrier_error = 0;
92317 n_rcu_torture_boost_ktrerror = 0;
92318 n_rcu_torture_boost_rterror = 0;
92319 n_rcu_torture_boost_failure = 0;
92320 n_rcu_torture_boosts = 0;
92321 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
92322- atomic_set(&rcu_torture_wcount[i], 0);
92323+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
92324 for_each_possible_cpu(cpu) {
92325 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92326 per_cpu(rcu_torture_count, cpu)[i] = 0;
92327diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
92328index cc9ceca..ce075a6 100644
92329--- a/kernel/rcu/tiny.c
92330+++ b/kernel/rcu/tiny.c
92331@@ -42,7 +42,7 @@
92332 /* Forward declarations for tiny_plugin.h. */
92333 struct rcu_ctrlblk;
92334 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
92335-static void rcu_process_callbacks(struct softirq_action *unused);
92336+static void rcu_process_callbacks(void);
92337 static void __call_rcu(struct rcu_head *head,
92338 void (*func)(struct rcu_head *rcu),
92339 struct rcu_ctrlblk *rcp);
92340@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
92341 false));
92342 }
92343
92344-static void rcu_process_callbacks(struct softirq_action *unused)
92345+static __latent_entropy void rcu_process_callbacks(void)
92346 {
92347 __rcu_process_callbacks(&rcu_sched_ctrlblk);
92348 __rcu_process_callbacks(&rcu_bh_ctrlblk);
92349diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
92350index f94e209..d2985bd 100644
92351--- a/kernel/rcu/tiny_plugin.h
92352+++ b/kernel/rcu/tiny_plugin.h
92353@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
92354 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
92355 jiffies - rcp->gp_start, rcp->qlen);
92356 dump_stack();
92357- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
92358+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
92359 3 * rcu_jiffies_till_stall_check() + 3;
92360 } else if (ULONG_CMP_GE(j, js)) {
92361- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92362+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92363 }
92364 }
92365
92366@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
92367 {
92368 rcp->ticks_this_gp = 0;
92369 rcp->gp_start = jiffies;
92370- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92371+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92372 }
92373
92374 static void check_cpu_stalls(void)
92375diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
92376index 48d640c..9401d30 100644
92377--- a/kernel/rcu/tree.c
92378+++ b/kernel/rcu/tree.c
92379@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
92380 */
92381 rdtp = this_cpu_ptr(&rcu_dynticks);
92382 smp_mb__before_atomic(); /* Earlier stuff before QS. */
92383- atomic_add(2, &rdtp->dynticks); /* QS. */
92384+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
92385 smp_mb__after_atomic(); /* Later stuff after QS. */
92386 break;
92387 }
92388@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
92389 rcu_prepare_for_idle();
92390 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92391 smp_mb__before_atomic(); /* See above. */
92392- atomic_inc(&rdtp->dynticks);
92393+ atomic_inc_unchecked(&rdtp->dynticks);
92394 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
92395- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92396+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92397 rcu_dynticks_task_enter();
92398
92399 /*
92400@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
92401
92402 rcu_dynticks_task_exit();
92403 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
92404- atomic_inc(&rdtp->dynticks);
92405+ atomic_inc_unchecked(&rdtp->dynticks);
92406 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92407 smp_mb__after_atomic(); /* See above. */
92408- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92409+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92410 rcu_cleanup_after_idle();
92411 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
92412 if (!user && !is_idle_task(current)) {
92413@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
92414 * to be in the outermost NMI handler that interrupted an RCU-idle
92415 * period (observation due to Andy Lutomirski).
92416 */
92417- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
92418+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
92419 smp_mb__before_atomic(); /* Force delay from prior write. */
92420- atomic_inc(&rdtp->dynticks);
92421+ atomic_inc_unchecked(&rdtp->dynticks);
92422 /* atomic_inc() before later RCU read-side crit sects */
92423 smp_mb__after_atomic(); /* See above. */
92424- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92425+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92426 incby = 1;
92427 }
92428 rdtp->dynticks_nmi_nesting += incby;
92429@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
92430 * to us!)
92431 */
92432 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
92433- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92434+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92435
92436 /*
92437 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
92438@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
92439 rdtp->dynticks_nmi_nesting = 0;
92440 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92441 smp_mb__before_atomic(); /* See above. */
92442- atomic_inc(&rdtp->dynticks);
92443+ atomic_inc_unchecked(&rdtp->dynticks);
92444 smp_mb__after_atomic(); /* Force delay to next write. */
92445- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92446+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92447 }
92448
92449 /**
92450@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
92451 */
92452 bool notrace __rcu_is_watching(void)
92453 {
92454- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92455+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92456 }
92457
92458 /**
92459@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
92460 static int dyntick_save_progress_counter(struct rcu_data *rdp,
92461 bool *isidle, unsigned long *maxj)
92462 {
92463- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
92464+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92465 rcu_sysidle_check_cpu(rdp, isidle, maxj);
92466 if ((rdp->dynticks_snap & 0x1) == 0) {
92467 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
92468@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
92469 } else {
92470 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
92471 rdp->mynode->gpnum))
92472- ACCESS_ONCE(rdp->gpwrap) = true;
92473+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
92474 return 0;
92475 }
92476 }
92477@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92478 int *rcrmp;
92479 unsigned int snap;
92480
92481- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
92482+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92483 snap = (unsigned int)rdp->dynticks_snap;
92484
92485 /*
92486@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92487 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
92488 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
92489 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
92490- ACCESS_ONCE(rdp->cond_resched_completed) =
92491+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92492 ACCESS_ONCE(rdp->mynode->completed);
92493 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92494- ACCESS_ONCE(*rcrmp) =
92495+ ACCESS_ONCE_RW(*rcrmp) =
92496 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92497 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92498 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92499@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92500 rsp->gp_start = j;
92501 smp_wmb(); /* Record start time before stall time. */
92502 j1 = rcu_jiffies_till_stall_check();
92503- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92504+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92505 rsp->jiffies_resched = j + j1 / 2;
92506 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
92507 }
92508@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
92509 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92510 return;
92511 }
92512- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92513+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92514 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92515
92516 /*
92517@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92518
92519 raw_spin_lock_irqsave(&rnp->lock, flags);
92520 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92521- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92522+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92523 3 * rcu_jiffies_till_stall_check() + 3;
92524 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92525
92526@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
92527 struct rcu_state *rsp;
92528
92529 for_each_rcu_flavor(rsp)
92530- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92531+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92532 }
92533
92534 /*
92535@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
92536 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
92537 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
92538 zero_cpu_stall_ticks(rdp);
92539- ACCESS_ONCE(rdp->gpwrap) = false;
92540+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
92541 }
92542 return ret;
92543 }
92544@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92545 struct rcu_data *rdp;
92546 struct rcu_node *rnp = rcu_get_root(rsp);
92547
92548- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92549+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92550 rcu_bind_gp_kthread();
92551 raw_spin_lock_irq(&rnp->lock);
92552 smp_mb__after_unlock_lock();
92553@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92554 raw_spin_unlock_irq(&rnp->lock);
92555 return 0;
92556 }
92557- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92558+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92559
92560 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92561 /*
92562@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92563 rdp = this_cpu_ptr(rsp->rda);
92564 rcu_preempt_check_blocked_tasks(rnp);
92565 rnp->qsmask = rnp->qsmaskinit;
92566- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92567+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92568 WARN_ON_ONCE(rnp->completed != rsp->completed);
92569- ACCESS_ONCE(rnp->completed) = rsp->completed;
92570+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92571 if (rnp == rdp->mynode)
92572 (void)__note_gp_changes(rsp, rnp, rdp);
92573 rcu_preempt_boost_start_gp(rnp);
92574@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92575 rnp->grphi, rnp->qsmask);
92576 raw_spin_unlock_irq(&rnp->lock);
92577 cond_resched_rcu_qs();
92578- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92579+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92580 }
92581
92582 mutex_unlock(&rsp->onoff_mutex);
92583@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92584 unsigned long maxj;
92585 struct rcu_node *rnp = rcu_get_root(rsp);
92586
92587- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92588+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92589 rsp->n_force_qs++;
92590 if (fqs_state == RCU_SAVE_DYNTICK) {
92591 /* Collect dyntick-idle snapshots. */
92592@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92593 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92594 raw_spin_lock_irq(&rnp->lock);
92595 smp_mb__after_unlock_lock();
92596- ACCESS_ONCE(rsp->gp_flags) =
92597+ ACCESS_ONCE_RW(rsp->gp_flags) =
92598 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
92599 raw_spin_unlock_irq(&rnp->lock);
92600 }
92601@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92602 struct rcu_data *rdp;
92603 struct rcu_node *rnp = rcu_get_root(rsp);
92604
92605- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92606+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92607 raw_spin_lock_irq(&rnp->lock);
92608 smp_mb__after_unlock_lock();
92609 gp_duration = jiffies - rsp->gp_start;
92610@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92611 rcu_for_each_node_breadth_first(rsp, rnp) {
92612 raw_spin_lock_irq(&rnp->lock);
92613 smp_mb__after_unlock_lock();
92614- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92615+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92616 rdp = this_cpu_ptr(rsp->rda);
92617 if (rnp == rdp->mynode)
92618 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92619@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92620 nocb += rcu_future_gp_cleanup(rsp, rnp);
92621 raw_spin_unlock_irq(&rnp->lock);
92622 cond_resched_rcu_qs();
92623- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92624+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92625 }
92626 rnp = rcu_get_root(rsp);
92627 raw_spin_lock_irq(&rnp->lock);
92628@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92629 rcu_nocb_gp_set(rnp, nocb);
92630
92631 /* Declare grace period done. */
92632- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92633+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92634 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92635 rsp->fqs_state = RCU_GP_IDLE;
92636 rdp = this_cpu_ptr(rsp->rda);
92637 /* Advance CBs to reduce false positives below. */
92638 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92639 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92640- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92641+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92642 trace_rcu_grace_period(rsp->name,
92643 ACCESS_ONCE(rsp->gpnum),
92644 TPS("newreq"));
92645@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
92646 if (rcu_gp_init(rsp))
92647 break;
92648 cond_resched_rcu_qs();
92649- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92650+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92651 WARN_ON(signal_pending(current));
92652 trace_rcu_grace_period(rsp->name,
92653 ACCESS_ONCE(rsp->gpnum),
92654@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
92655 ACCESS_ONCE(rsp->gpnum),
92656 TPS("fqsend"));
92657 cond_resched_rcu_qs();
92658- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92659+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92660 } else {
92661 /* Deal with stray signal. */
92662 cond_resched_rcu_qs();
92663- ACCESS_ONCE(rsp->gp_activity) = jiffies;
92664+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
92665 WARN_ON(signal_pending(current));
92666 trace_rcu_grace_period(rsp->name,
92667 ACCESS_ONCE(rsp->gpnum),
92668@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92669 */
92670 return false;
92671 }
92672- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92673+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92674 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92675 TPS("newreq"));
92676
92677@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92678 rsp->qlen += rdp->qlen;
92679 rdp->n_cbs_orphaned += rdp->qlen;
92680 rdp->qlen_lazy = 0;
92681- ACCESS_ONCE(rdp->qlen) = 0;
92682+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92683 }
92684
92685 /*
92686@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92687 }
92688 smp_mb(); /* List handling before counting for rcu_barrier(). */
92689 rdp->qlen_lazy -= count_lazy;
92690- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92691+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92692 rdp->n_cbs_invoked += count;
92693
92694 /* Reinstate batch limit if we have worked down the excess. */
92695@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92696 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92697 return; /* Someone beat us to it. */
92698 }
92699- ACCESS_ONCE(rsp->gp_flags) =
92700+ ACCESS_ONCE_RW(rsp->gp_flags) =
92701 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92702 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92703 rcu_gp_kthread_wake(rsp);
92704@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92705 /*
92706 * Do RCU core processing for the current CPU.
92707 */
92708-static void rcu_process_callbacks(struct softirq_action *unused)
92709+static void rcu_process_callbacks(void)
92710 {
92711 struct rcu_state *rsp;
92712
92713@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92714 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92715 if (debug_rcu_head_queue(head)) {
92716 /* Probable double call_rcu(), so leak the callback. */
92717- ACCESS_ONCE(head->func) = rcu_leak_callback;
92718+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92719 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92720 return;
92721 }
92722@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92723 local_irq_restore(flags);
92724 return;
92725 }
92726- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92727+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92728 if (lazy)
92729 rdp->qlen_lazy++;
92730 else
92731@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
92732 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92733 * course be required on a 64-bit system.
92734 */
92735- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92736+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92737 (ulong)atomic_long_read(&rsp->expedited_done) +
92738 ULONG_MAX / 8)) {
92739 synchronize_sched();
92740- atomic_long_inc(&rsp->expedited_wrap);
92741+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92742 return;
92743 }
92744
92745@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
92746 * Take a ticket. Note that atomic_inc_return() implies a
92747 * full memory barrier.
92748 */
92749- snap = atomic_long_inc_return(&rsp->expedited_start);
92750+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92751 firstsnap = snap;
92752 if (!try_get_online_cpus()) {
92753 /* CPU hotplug operation in flight, fall back to normal GP. */
92754 wait_rcu_gp(call_rcu_sched);
92755- atomic_long_inc(&rsp->expedited_normal);
92756+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92757 return;
92758 }
92759 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92760@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
92761 for_each_cpu(cpu, cm) {
92762 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92763
92764- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92765+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92766 cpumask_clear_cpu(cpu, cm);
92767 }
92768 if (cpumask_weight(cm) == 0)
92769@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
92770 synchronize_sched_expedited_cpu_stop,
92771 NULL) == -EAGAIN) {
92772 put_online_cpus();
92773- atomic_long_inc(&rsp->expedited_tryfail);
92774+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92775
92776 /* Check to see if someone else did our work for us. */
92777 s = atomic_long_read(&rsp->expedited_done);
92778 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92779 /* ensure test happens before caller kfree */
92780 smp_mb__before_atomic(); /* ^^^ */
92781- atomic_long_inc(&rsp->expedited_workdone1);
92782+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92783 free_cpumask_var(cm);
92784 return;
92785 }
92786@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
92787 udelay(trycount * num_online_cpus());
92788 } else {
92789 wait_rcu_gp(call_rcu_sched);
92790- atomic_long_inc(&rsp->expedited_normal);
92791+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92792 free_cpumask_var(cm);
92793 return;
92794 }
92795@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
92796 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92797 /* ensure test happens before caller kfree */
92798 smp_mb__before_atomic(); /* ^^^ */
92799- atomic_long_inc(&rsp->expedited_workdone2);
92800+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92801 free_cpumask_var(cm);
92802 return;
92803 }
92804@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
92805 if (!try_get_online_cpus()) {
92806 /* CPU hotplug operation in flight, use normal GP. */
92807 wait_rcu_gp(call_rcu_sched);
92808- atomic_long_inc(&rsp->expedited_normal);
92809+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92810 free_cpumask_var(cm);
92811 return;
92812 }
92813- snap = atomic_long_read(&rsp->expedited_start);
92814+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92815 smp_mb(); /* ensure read is before try_stop_cpus(). */
92816 }
92817- atomic_long_inc(&rsp->expedited_stoppedcpus);
92818+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92819
92820 all_cpus_idle:
92821 free_cpumask_var(cm);
92822@@ -3212,16 +3212,16 @@ all_cpus_idle:
92823 * than we did already did their update.
92824 */
92825 do {
92826- atomic_long_inc(&rsp->expedited_done_tries);
92827+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92828 s = atomic_long_read(&rsp->expedited_done);
92829 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92830 /* ensure test happens before caller kfree */
92831 smp_mb__before_atomic(); /* ^^^ */
92832- atomic_long_inc(&rsp->expedited_done_lost);
92833+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92834 break;
92835 }
92836 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92837- atomic_long_inc(&rsp->expedited_done_exit);
92838+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92839
92840 put_online_cpus();
92841 }
92842@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92843 * ACCESS_ONCE() to prevent the compiler from speculating
92844 * the increment to precede the early-exit check.
92845 */
92846- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92847+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92848 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92849 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92850 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92851@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92852
92853 /* Increment ->n_barrier_done to prevent duplicate work. */
92854 smp_mb(); /* Keep increment after above mechanism. */
92855- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92856+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92857 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92858 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92859 smp_mb(); /* Keep increment before caller's subsequent code. */
92860@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92861 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92862 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92863 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92864- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92865+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92866 rdp->cpu = cpu;
92867 rdp->rsp = rsp;
92868 rcu_boot_init_nocb_percpu_data(rdp);
92869@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92870 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92871 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92872 rcu_sysidle_init_percpu_data(rdp->dynticks);
92873- atomic_set(&rdp->dynticks->dynticks,
92874- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92875+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92876+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92877 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92878
92879 /* Add CPU to rcu_node bitmasks. */
92880diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92881index 119de39..f07d31a 100644
92882--- a/kernel/rcu/tree.h
92883+++ b/kernel/rcu/tree.h
92884@@ -86,11 +86,11 @@ struct rcu_dynticks {
92885 long long dynticks_nesting; /* Track irq/process nesting level. */
92886 /* Process level is worth LLONG_MAX/2. */
92887 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92888- atomic_t dynticks; /* Even value for idle, else odd. */
92889+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92890 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92891 long long dynticks_idle_nesting;
92892 /* irq/process nesting level from idle. */
92893- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92894+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92895 /* "Idle" excludes userspace execution. */
92896 unsigned long dynticks_idle_jiffies;
92897 /* End of last non-NMI non-idle period. */
92898@@ -457,17 +457,17 @@ struct rcu_state {
92899 /* _rcu_barrier(). */
92900 /* End of fields guarded by barrier_mutex. */
92901
92902- atomic_long_t expedited_start; /* Starting ticket. */
92903- atomic_long_t expedited_done; /* Done ticket. */
92904- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92905- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92906- atomic_long_t expedited_workdone1; /* # done by others #1. */
92907- atomic_long_t expedited_workdone2; /* # done by others #2. */
92908- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92909- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92910- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92911- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92912- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92913+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92914+ atomic_long_t expedited_done; /* Done ticket. */
92915+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92916+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92917+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92918+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92919+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92920+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92921+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92922+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92923+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92924
92925 unsigned long jiffies_force_qs; /* Time at which to invoke */
92926 /* force_quiescent_state(). */
92927diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92928index 0a571e9..fbfd611 100644
92929--- a/kernel/rcu/tree_plugin.h
92930+++ b/kernel/rcu/tree_plugin.h
92931@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92932 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92933 {
92934 return !rcu_preempted_readers_exp(rnp) &&
92935- ACCESS_ONCE(rnp->expmask) == 0;
92936+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92937 }
92938
92939 /*
92940@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
92941
92942 /* Clean up and exit. */
92943 smp_mb(); /* ensure expedited GP seen before counter increment. */
92944- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92945+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92946 sync_rcu_preempt_exp_count + 1;
92947 unlock_mb_ret:
92948 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92949@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92950 free_cpumask_var(cm);
92951 }
92952
92953-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92954+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92955 .store = &rcu_cpu_kthread_task,
92956 .thread_should_run = rcu_cpu_kthread_should_run,
92957 .thread_fn = rcu_cpu_kthread,
92958@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92959 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92960 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
92961 cpu, ticks_value, ticks_title,
92962- atomic_read(&rdtp->dynticks) & 0xfff,
92963+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92964 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92965 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92966 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
92967@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92968 return;
92969 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92970 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92971- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92972+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92973 wake_up(&rdp_leader->nocb_wq);
92974 }
92975 }
92976@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92977 atomic_long_add(rhcount, &rdp->nocb_q_count);
92978 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
92979 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92980- ACCESS_ONCE(*old_rhpp) = rhp;
92981+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92982 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92983 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92984
92985@@ -2167,7 +2167,7 @@ wait_again:
92986 continue; /* No CBs here, try next follower. */
92987
92988 /* Move callbacks to wait-for-GP list, which is empty. */
92989- ACCESS_ONCE(rdp->nocb_head) = NULL;
92990+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92991 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92992 gotcbs = true;
92993 }
92994@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
92995 list = ACCESS_ONCE(rdp->nocb_follower_head);
92996 BUG_ON(!list);
92997 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92998- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92999+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
93000 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
93001
93002 /* Each pass through the following loop invokes a callback. */
93003@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
93004 if (!rcu_nocb_need_deferred_wakeup(rdp))
93005 return;
93006 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
93007- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
93008+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
93009 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
93010 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
93011 }
93012@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
93013 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
93014 "rcuo%c/%d", rsp->abbr, cpu);
93015 BUG_ON(IS_ERR(t));
93016- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
93017+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
93018 }
93019
93020 /*
93021@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
93022
93023 /* Record start of fully idle period. */
93024 j = jiffies;
93025- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
93026+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
93027 smp_mb__before_atomic();
93028- atomic_inc(&rdtp->dynticks_idle);
93029+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93030 smp_mb__after_atomic();
93031- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
93032+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
93033 }
93034
93035 /*
93036@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
93037
93038 /* Record end of idle period. */
93039 smp_mb__before_atomic();
93040- atomic_inc(&rdtp->dynticks_idle);
93041+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93042 smp_mb__after_atomic();
93043- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
93044+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
93045
93046 /*
93047 * If we are the timekeeping CPU, we are permitted to be non-idle
93048@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
93049 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
93050
93051 /* Pick up current idle and NMI-nesting counter and check. */
93052- cur = atomic_read(&rdtp->dynticks_idle);
93053+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
93054 if (cur & 0x1) {
93055 *isidle = false; /* We are not idle! */
93056 return;
93057@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
93058 case RCU_SYSIDLE_NOT:
93059
93060 /* First time all are idle, so note a short idle period. */
93061- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93062+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93063 break;
93064
93065 case RCU_SYSIDLE_SHORT:
93066@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
93067 {
93068 smp_mb();
93069 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
93070- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
93071+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
93072 }
93073
93074 /*
93075@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
93076 smp_mb(); /* grace period precedes setting inuse. */
93077
93078 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
93079- ACCESS_ONCE(rshp->inuse) = 0;
93080+ ACCESS_ONCE_RW(rshp->inuse) = 0;
93081 }
93082
93083 /*
93084@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
93085 static void rcu_dynticks_task_enter(void)
93086 {
93087 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
93088- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
93089+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
93090 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
93091 }
93092
93093@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
93094 static void rcu_dynticks_task_exit(void)
93095 {
93096 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
93097- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
93098+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
93099 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
93100 }
93101diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
93102index fbb6240..f6c5097 100644
93103--- a/kernel/rcu/tree_trace.c
93104+++ b/kernel/rcu/tree_trace.c
93105@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
93106 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
93107 rdp->qs_pending);
93108 seq_printf(m, " dt=%d/%llx/%d df=%lu",
93109- atomic_read(&rdp->dynticks->dynticks),
93110+ atomic_read_unchecked(&rdp->dynticks->dynticks),
93111 rdp->dynticks->dynticks_nesting,
93112 rdp->dynticks->dynticks_nmi_nesting,
93113 rdp->dynticks_fqs);
93114@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
93115 struct rcu_state *rsp = (struct rcu_state *)m->private;
93116
93117 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
93118- atomic_long_read(&rsp->expedited_start),
93119+ atomic_long_read_unchecked(&rsp->expedited_start),
93120 atomic_long_read(&rsp->expedited_done),
93121- atomic_long_read(&rsp->expedited_wrap),
93122- atomic_long_read(&rsp->expedited_tryfail),
93123- atomic_long_read(&rsp->expedited_workdone1),
93124- atomic_long_read(&rsp->expedited_workdone2),
93125- atomic_long_read(&rsp->expedited_normal),
93126- atomic_long_read(&rsp->expedited_stoppedcpus),
93127- atomic_long_read(&rsp->expedited_done_tries),
93128- atomic_long_read(&rsp->expedited_done_lost),
93129- atomic_long_read(&rsp->expedited_done_exit));
93130+ atomic_long_read_unchecked(&rsp->expedited_wrap),
93131+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
93132+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
93133+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
93134+ atomic_long_read_unchecked(&rsp->expedited_normal),
93135+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
93136+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
93137+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
93138+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
93139 return 0;
93140 }
93141
93142diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
93143index e0d31a3..f4dafe3 100644
93144--- a/kernel/rcu/update.c
93145+++ b/kernel/rcu/update.c
93146@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
93147 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
93148 */
93149 if (till_stall_check < 3) {
93150- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
93151+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
93152 till_stall_check = 3;
93153 } else if (till_stall_check > 300) {
93154- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
93155+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
93156 till_stall_check = 300;
93157 }
93158 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
93159@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
93160 !ACCESS_ONCE(t->on_rq) ||
93161 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
93162 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
93163- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
93164+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
93165 list_del_init(&t->rcu_tasks_holdout_list);
93166 put_task_struct(t);
93167 return;
93168@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
93169 !is_idle_task(t)) {
93170 get_task_struct(t);
93171 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
93172- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
93173+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
93174 list_add(&t->rcu_tasks_holdout_list,
93175 &rcu_tasks_holdouts);
93176 }
93177@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
93178 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
93179 BUG_ON(IS_ERR(t));
93180 smp_mb(); /* Ensure others see full kthread. */
93181- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
93182+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
93183 mutex_unlock(&rcu_tasks_kthread_mutex);
93184 }
93185
93186diff --git a/kernel/resource.c b/kernel/resource.c
93187index 19f2357..ebe7f35 100644
93188--- a/kernel/resource.c
93189+++ b/kernel/resource.c
93190@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
93191
93192 static int __init ioresources_init(void)
93193 {
93194+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93195+#ifdef CONFIG_GRKERNSEC_PROC_USER
93196+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93197+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93198+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93199+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93200+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93201+#endif
93202+#else
93203 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93204 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93205+#endif
93206 return 0;
93207 }
93208 __initcall(ioresources_init);
93209diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
93210index eae160d..c9aa22e 100644
93211--- a/kernel/sched/auto_group.c
93212+++ b/kernel/sched/auto_group.c
93213@@ -11,7 +11,7 @@
93214
93215 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
93216 static struct autogroup autogroup_default;
93217-static atomic_t autogroup_seq_nr;
93218+static atomic_unchecked_t autogroup_seq_nr;
93219
93220 void __init autogroup_init(struct task_struct *init_task)
93221 {
93222@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
93223
93224 kref_init(&ag->kref);
93225 init_rwsem(&ag->lock);
93226- ag->id = atomic_inc_return(&autogroup_seq_nr);
93227+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
93228 ag->tg = tg;
93229 #ifdef CONFIG_RT_GROUP_SCHED
93230 /*
93231diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
93232index 8d0f35d..c16360d 100644
93233--- a/kernel/sched/completion.c
93234+++ b/kernel/sched/completion.c
93235@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
93236 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93237 * or number of jiffies left till timeout) if completed.
93238 */
93239-long __sched
93240+long __sched __intentional_overflow(-1)
93241 wait_for_completion_interruptible_timeout(struct completion *x,
93242 unsigned long timeout)
93243 {
93244@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
93245 *
93246 * Return: -ERESTARTSYS if interrupted, 0 if completed.
93247 */
93248-int __sched wait_for_completion_killable(struct completion *x)
93249+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
93250 {
93251 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
93252 if (t == -ERESTARTSYS)
93253@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
93254 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93255 * or number of jiffies left till timeout) if completed.
93256 */
93257-long __sched
93258+long __sched __intentional_overflow(-1)
93259 wait_for_completion_killable_timeout(struct completion *x,
93260 unsigned long timeout)
93261 {
93262diff --git a/kernel/sched/core.c b/kernel/sched/core.c
93263index 62671f5..7b3505b 100644
93264--- a/kernel/sched/core.c
93265+++ b/kernel/sched/core.c
93266@@ -1847,7 +1847,7 @@ void set_numabalancing_state(bool enabled)
93267 int sysctl_numa_balancing(struct ctl_table *table, int write,
93268 void __user *buffer, size_t *lenp, loff_t *ppos)
93269 {
93270- struct ctl_table t;
93271+ ctl_table_no_const t;
93272 int err;
93273 int state = numabalancing_enabled;
93274
93275@@ -2297,8 +2297,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
93276 next->active_mm = oldmm;
93277 atomic_inc(&oldmm->mm_count);
93278 enter_lazy_tlb(oldmm, next);
93279- } else
93280+ } else {
93281 switch_mm(oldmm, mm, next);
93282+ populate_stack();
93283+ }
93284
93285 if (!prev->mm) {
93286 prev->active_mm = NULL;
93287@@ -3109,6 +3111,8 @@ int can_nice(const struct task_struct *p, const int nice)
93288 /* convert nice value [19,-20] to rlimit style value [1,40] */
93289 int nice_rlim = nice_to_rlimit(nice);
93290
93291+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
93292+
93293 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
93294 capable(CAP_SYS_NICE));
93295 }
93296@@ -3135,7 +3139,8 @@ SYSCALL_DEFINE1(nice, int, increment)
93297 nice = task_nice(current) + increment;
93298
93299 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
93300- if (increment < 0 && !can_nice(current, nice))
93301+ if (increment < 0 && (!can_nice(current, nice) ||
93302+ gr_handle_chroot_nice()))
93303 return -EPERM;
93304
93305 retval = security_task_setnice(current, nice);
93306@@ -3444,6 +3449,7 @@ recheck:
93307 if (policy != p->policy && !rlim_rtprio)
93308 return -EPERM;
93309
93310+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
93311 /* can't increase priority */
93312 if (attr->sched_priority > p->rt_priority &&
93313 attr->sched_priority > rlim_rtprio)
93314@@ -4931,6 +4937,7 @@ void idle_task_exit(void)
93315
93316 if (mm != &init_mm) {
93317 switch_mm(mm, &init_mm, current);
93318+ populate_stack();
93319 finish_arch_post_lock_switch();
93320 }
93321 mmdrop(mm);
93322@@ -5026,7 +5033,7 @@ static void migrate_tasks(unsigned int dead_cpu)
93323
93324 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
93325
93326-static struct ctl_table sd_ctl_dir[] = {
93327+static ctl_table_no_const sd_ctl_dir[] __read_only = {
93328 {
93329 .procname = "sched_domain",
93330 .mode = 0555,
93331@@ -5043,17 +5050,17 @@ static struct ctl_table sd_ctl_root[] = {
93332 {}
93333 };
93334
93335-static struct ctl_table *sd_alloc_ctl_entry(int n)
93336+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
93337 {
93338- struct ctl_table *entry =
93339+ ctl_table_no_const *entry =
93340 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
93341
93342 return entry;
93343 }
93344
93345-static void sd_free_ctl_entry(struct ctl_table **tablep)
93346+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
93347 {
93348- struct ctl_table *entry;
93349+ ctl_table_no_const *entry;
93350
93351 /*
93352 * In the intermediate directories, both the child directory and
93353@@ -5061,22 +5068,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
93354 * will always be set. In the lowest directory the names are
93355 * static strings and all have proc handlers.
93356 */
93357- for (entry = *tablep; entry->mode; entry++) {
93358- if (entry->child)
93359- sd_free_ctl_entry(&entry->child);
93360+ for (entry = tablep; entry->mode; entry++) {
93361+ if (entry->child) {
93362+ sd_free_ctl_entry(entry->child);
93363+ pax_open_kernel();
93364+ entry->child = NULL;
93365+ pax_close_kernel();
93366+ }
93367 if (entry->proc_handler == NULL)
93368 kfree(entry->procname);
93369 }
93370
93371- kfree(*tablep);
93372- *tablep = NULL;
93373+ kfree(tablep);
93374 }
93375
93376 static int min_load_idx = 0;
93377 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
93378
93379 static void
93380-set_table_entry(struct ctl_table *entry,
93381+set_table_entry(ctl_table_no_const *entry,
93382 const char *procname, void *data, int maxlen,
93383 umode_t mode, proc_handler *proc_handler,
93384 bool load_idx)
93385@@ -5096,7 +5106,7 @@ set_table_entry(struct ctl_table *entry,
93386 static struct ctl_table *
93387 sd_alloc_ctl_domain_table(struct sched_domain *sd)
93388 {
93389- struct ctl_table *table = sd_alloc_ctl_entry(14);
93390+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
93391
93392 if (table == NULL)
93393 return NULL;
93394@@ -5134,9 +5144,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
93395 return table;
93396 }
93397
93398-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
93399+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
93400 {
93401- struct ctl_table *entry, *table;
93402+ ctl_table_no_const *entry, *table;
93403 struct sched_domain *sd;
93404 int domain_num = 0, i;
93405 char buf[32];
93406@@ -5163,11 +5173,13 @@ static struct ctl_table_header *sd_sysctl_header;
93407 static void register_sched_domain_sysctl(void)
93408 {
93409 int i, cpu_num = num_possible_cpus();
93410- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
93411+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
93412 char buf[32];
93413
93414 WARN_ON(sd_ctl_dir[0].child);
93415+ pax_open_kernel();
93416 sd_ctl_dir[0].child = entry;
93417+ pax_close_kernel();
93418
93419 if (entry == NULL)
93420 return;
93421@@ -5190,8 +5202,12 @@ static void unregister_sched_domain_sysctl(void)
93422 if (sd_sysctl_header)
93423 unregister_sysctl_table(sd_sysctl_header);
93424 sd_sysctl_header = NULL;
93425- if (sd_ctl_dir[0].child)
93426- sd_free_ctl_entry(&sd_ctl_dir[0].child);
93427+ if (sd_ctl_dir[0].child) {
93428+ sd_free_ctl_entry(sd_ctl_dir[0].child);
93429+ pax_open_kernel();
93430+ sd_ctl_dir[0].child = NULL;
93431+ pax_close_kernel();
93432+ }
93433 }
93434 #else
93435 static void register_sched_domain_sysctl(void)
93436diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
93437index 241213b..6a64c91 100644
93438--- a/kernel/sched/fair.c
93439+++ b/kernel/sched/fair.c
93440@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
93441
93442 static void reset_ptenuma_scan(struct task_struct *p)
93443 {
93444- ACCESS_ONCE(p->mm->numa_scan_seq)++;
93445+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
93446 p->mm->numa_scan_offset = 0;
93447 }
93448
93449@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
93450 * run_rebalance_domains is triggered when needed from the scheduler tick.
93451 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
93452 */
93453-static void run_rebalance_domains(struct softirq_action *h)
93454+static __latent_entropy void run_rebalance_domains(void)
93455 {
93456 struct rq *this_rq = this_rq();
93457 enum cpu_idle_type idle = this_rq->idle_balance ?
93458diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
93459index dc0f435..ae2e085 100644
93460--- a/kernel/sched/sched.h
93461+++ b/kernel/sched/sched.h
93462@@ -1200,7 +1200,7 @@ struct sched_class {
93463 #ifdef CONFIG_FAIR_GROUP_SCHED
93464 void (*task_move_group) (struct task_struct *p, int on_rq);
93465 #endif
93466-};
93467+} __do_const;
93468
93469 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
93470 {
93471diff --git a/kernel/signal.c b/kernel/signal.c
93472index a390499..ebe9a21 100644
93473--- a/kernel/signal.c
93474+++ b/kernel/signal.c
93475@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
93476
93477 int print_fatal_signals __read_mostly;
93478
93479-static void __user *sig_handler(struct task_struct *t, int sig)
93480+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93481 {
93482 return t->sighand->action[sig - 1].sa.sa_handler;
93483 }
93484
93485-static int sig_handler_ignored(void __user *handler, int sig)
93486+static int sig_handler_ignored(__sighandler_t handler, int sig)
93487 {
93488 /* Is it explicitly or implicitly ignored? */
93489 return handler == SIG_IGN ||
93490@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93491
93492 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
93493 {
93494- void __user *handler;
93495+ __sighandler_t handler;
93496
93497 handler = sig_handler(t, sig);
93498
93499@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
93500 atomic_inc(&user->sigpending);
93501 rcu_read_unlock();
93502
93503+ if (!override_rlimit)
93504+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93505+
93506 if (override_rlimit ||
93507 atomic_read(&user->sigpending) <=
93508 task_rlimit(t, RLIMIT_SIGPENDING)) {
93509@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93510
93511 int unhandled_signal(struct task_struct *tsk, int sig)
93512 {
93513- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93514+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93515 if (is_global_init(tsk))
93516 return 1;
93517 if (handler != SIG_IGN && handler != SIG_DFL)
93518@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93519 }
93520 }
93521
93522+ /* allow glibc communication via tgkill to other threads in our
93523+ thread group */
93524+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93525+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93526+ && gr_handle_signal(t, sig))
93527+ return -EPERM;
93528+
93529 return security_task_kill(t, info, sig, 0);
93530 }
93531
93532@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93533 return send_signal(sig, info, p, 1);
93534 }
93535
93536-static int
93537+int
93538 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93539 {
93540 return send_signal(sig, info, t, 0);
93541@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93542 unsigned long int flags;
93543 int ret, blocked, ignored;
93544 struct k_sigaction *action;
93545+ int is_unhandled = 0;
93546
93547 spin_lock_irqsave(&t->sighand->siglock, flags);
93548 action = &t->sighand->action[sig-1];
93549@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93550 }
93551 if (action->sa.sa_handler == SIG_DFL)
93552 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93553+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93554+ is_unhandled = 1;
93555 ret = specific_send_sig_info(sig, info, t);
93556 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93557
93558+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93559+ normal operation */
93560+ if (is_unhandled) {
93561+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93562+ gr_handle_crash(t, sig);
93563+ }
93564+
93565 return ret;
93566 }
93567
93568@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93569 ret = check_kill_permission(sig, info, p);
93570 rcu_read_unlock();
93571
93572- if (!ret && sig)
93573+ if (!ret && sig) {
93574 ret = do_send_sig_info(sig, info, p, true);
93575+ if (!ret)
93576+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93577+ }
93578
93579 return ret;
93580 }
93581@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93582 int error = -ESRCH;
93583
93584 rcu_read_lock();
93585- p = find_task_by_vpid(pid);
93586+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93587+ /* allow glibc communication via tgkill to other threads in our
93588+ thread group */
93589+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93590+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93591+ p = find_task_by_vpid_unrestricted(pid);
93592+ else
93593+#endif
93594+ p = find_task_by_vpid(pid);
93595 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93596 error = check_kill_permission(sig, info, p);
93597 /*
93598@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93599 }
93600 seg = get_fs();
93601 set_fs(KERNEL_DS);
93602- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93603- (stack_t __force __user *) &uoss,
93604+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93605+ (stack_t __force_user *) &uoss,
93606 compat_user_stack_pointer());
93607 set_fs(seg);
93608 if (ret >= 0 && uoss_ptr) {
93609diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93610index 40190f2..8861d40 100644
93611--- a/kernel/smpboot.c
93612+++ b/kernel/smpboot.c
93613@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93614 }
93615 smpboot_unpark_thread(plug_thread, cpu);
93616 }
93617- list_add(&plug_thread->list, &hotplug_threads);
93618+ pax_list_add(&plug_thread->list, &hotplug_threads);
93619 out:
93620 mutex_unlock(&smpboot_threads_lock);
93621 put_online_cpus();
93622@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93623 {
93624 get_online_cpus();
93625 mutex_lock(&smpboot_threads_lock);
93626- list_del(&plug_thread->list);
93627+ pax_list_del(&plug_thread->list);
93628 smpboot_destroy_threads(plug_thread);
93629 mutex_unlock(&smpboot_threads_lock);
93630 put_online_cpus();
93631diff --git a/kernel/softirq.c b/kernel/softirq.c
93632index 479e443..66d845e1 100644
93633--- a/kernel/softirq.c
93634+++ b/kernel/softirq.c
93635@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93636 EXPORT_SYMBOL(irq_stat);
93637 #endif
93638
93639-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93640+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93641
93642 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93643
93644@@ -270,7 +270,7 @@ restart:
93645 kstat_incr_softirqs_this_cpu(vec_nr);
93646
93647 trace_softirq_entry(vec_nr);
93648- h->action(h);
93649+ h->action();
93650 trace_softirq_exit(vec_nr);
93651 if (unlikely(prev_count != preempt_count())) {
93652 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93653@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93654 or_softirq_pending(1UL << nr);
93655 }
93656
93657-void open_softirq(int nr, void (*action)(struct softirq_action *))
93658+void __init open_softirq(int nr, void (*action)(void))
93659 {
93660 softirq_vec[nr].action = action;
93661 }
93662@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93663 }
93664 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93665
93666-static void tasklet_action(struct softirq_action *a)
93667+static void tasklet_action(void)
93668 {
93669 struct tasklet_struct *list;
93670
93671@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
93672 }
93673 }
93674
93675-static void tasklet_hi_action(struct softirq_action *a)
93676+static __latent_entropy void tasklet_hi_action(void)
93677 {
93678 struct tasklet_struct *list;
93679
93680@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
93681 .notifier_call = cpu_callback
93682 };
93683
93684-static struct smp_hotplug_thread softirq_threads = {
93685+static struct smp_hotplug_thread softirq_threads __read_only = {
93686 .store = &ksoftirqd,
93687 .thread_should_run = ksoftirqd_should_run,
93688 .thread_fn = run_ksoftirqd,
93689diff --git a/kernel/sys.c b/kernel/sys.c
93690index a03d9cd..55dbe9c 100644
93691--- a/kernel/sys.c
93692+++ b/kernel/sys.c
93693@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93694 error = -EACCES;
93695 goto out;
93696 }
93697+
93698+ if (gr_handle_chroot_setpriority(p, niceval)) {
93699+ error = -EACCES;
93700+ goto out;
93701+ }
93702+
93703 no_nice = security_task_setnice(p, niceval);
93704 if (no_nice) {
93705 error = no_nice;
93706@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93707 goto error;
93708 }
93709
93710+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93711+ goto error;
93712+
93713+ if (!gid_eq(new->gid, old->gid)) {
93714+ /* make sure we generate a learn log for what will
93715+ end up being a role transition after a full-learning
93716+ policy is generated
93717+ CAP_SETGID is required to perform a transition
93718+ we may not log a CAP_SETGID check above, e.g.
93719+ in the case where new rgid = old egid
93720+ */
93721+ gr_learn_cap(current, new, CAP_SETGID);
93722+ }
93723+
93724 if (rgid != (gid_t) -1 ||
93725 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93726 new->sgid = new->egid;
93727@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93728 old = current_cred();
93729
93730 retval = -EPERM;
93731+
93732+ if (gr_check_group_change(kgid, kgid, kgid))
93733+ goto error;
93734+
93735 if (ns_capable(old->user_ns, CAP_SETGID))
93736 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93737 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93738@@ -417,7 +441,7 @@ error:
93739 /*
93740 * change the user struct in a credentials set to match the new UID
93741 */
93742-static int set_user(struct cred *new)
93743+int set_user(struct cred *new)
93744 {
93745 struct user_struct *new_user;
93746
93747@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93748 goto error;
93749 }
93750
93751+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93752+ goto error;
93753+
93754 if (!uid_eq(new->uid, old->uid)) {
93755+ /* make sure we generate a learn log for what will
93756+ end up being a role transition after a full-learning
93757+ policy is generated
93758+ CAP_SETUID is required to perform a transition
93759+ we may not log a CAP_SETUID check above, e.g.
93760+ in the case where new ruid = old euid
93761+ */
93762+ gr_learn_cap(current, new, CAP_SETUID);
93763 retval = set_user(new);
93764 if (retval < 0)
93765 goto error;
93766@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93767 old = current_cred();
93768
93769 retval = -EPERM;
93770+
93771+ if (gr_check_crash_uid(kuid))
93772+ goto error;
93773+ if (gr_check_user_change(kuid, kuid, kuid))
93774+ goto error;
93775+
93776 if (ns_capable(old->user_ns, CAP_SETUID)) {
93777 new->suid = new->uid = kuid;
93778 if (!uid_eq(kuid, old->uid)) {
93779@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93780 goto error;
93781 }
93782
93783+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93784+ goto error;
93785+
93786 if (ruid != (uid_t) -1) {
93787 new->uid = kruid;
93788 if (!uid_eq(kruid, old->uid)) {
93789@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93790 goto error;
93791 }
93792
93793+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93794+ goto error;
93795+
93796 if (rgid != (gid_t) -1)
93797 new->gid = krgid;
93798 if (egid != (gid_t) -1)
93799@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93800 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93801 ns_capable(old->user_ns, CAP_SETUID)) {
93802 if (!uid_eq(kuid, old->fsuid)) {
93803+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93804+ goto error;
93805+
93806 new->fsuid = kuid;
93807 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93808 goto change_okay;
93809 }
93810 }
93811
93812+error:
93813 abort_creds(new);
93814 return old_fsuid;
93815
93816@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93817 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93818 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93819 ns_capable(old->user_ns, CAP_SETGID)) {
93820+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93821+ goto error;
93822+
93823 if (!gid_eq(kgid, old->fsgid)) {
93824 new->fsgid = kgid;
93825 goto change_okay;
93826 }
93827 }
93828
93829+error:
93830 abort_creds(new);
93831 return old_fsgid;
93832
93833@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93834 return -EFAULT;
93835
93836 down_read(&uts_sem);
93837- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93838+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93839 __OLD_UTS_LEN);
93840 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93841- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93842+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93843 __OLD_UTS_LEN);
93844 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93845- error |= __copy_to_user(&name->release, &utsname()->release,
93846+ error |= __copy_to_user(name->release, &utsname()->release,
93847 __OLD_UTS_LEN);
93848 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93849- error |= __copy_to_user(&name->version, &utsname()->version,
93850+ error |= __copy_to_user(name->version, &utsname()->version,
93851 __OLD_UTS_LEN);
93852 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93853- error |= __copy_to_user(&name->machine, &utsname()->machine,
93854+ error |= __copy_to_user(name->machine, &utsname()->machine,
93855 __OLD_UTS_LEN);
93856 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93857 up_read(&uts_sem);
93858@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93859 */
93860 new_rlim->rlim_cur = 1;
93861 }
93862+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93863+ is changed to a lower value. Since tasks can be created by the same
93864+ user in between this limit change and an execve by this task, force
93865+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93866+ */
93867+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93868+ tsk->flags |= PF_NPROC_EXCEEDED;
93869 }
93870 if (!retval) {
93871 if (old_rlim)
93872diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93873index ce410bb..cd276f0 100644
93874--- a/kernel/sysctl.c
93875+++ b/kernel/sysctl.c
93876@@ -94,7 +94,6 @@
93877
93878
93879 #if defined(CONFIG_SYSCTL)
93880-
93881 /* External variables not in a header file. */
93882 extern int max_threads;
93883 extern int suid_dumpable;
93884@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93885
93886 /* Constants used for minimum and maximum */
93887 #ifdef CONFIG_LOCKUP_DETECTOR
93888-static int sixty = 60;
93889+static int sixty __read_only = 60;
93890 #endif
93891
93892-static int __maybe_unused neg_one = -1;
93893+static int __maybe_unused neg_one __read_only = -1;
93894
93895-static int zero;
93896-static int __maybe_unused one = 1;
93897-static int __maybe_unused two = 2;
93898-static int __maybe_unused four = 4;
93899-static unsigned long one_ul = 1;
93900-static int one_hundred = 100;
93901+static int zero __read_only = 0;
93902+static int __maybe_unused one __read_only = 1;
93903+static int __maybe_unused two __read_only = 2;
93904+static int __maybe_unused three __read_only = 3;
93905+static int __maybe_unused four __read_only = 4;
93906+static unsigned long one_ul __read_only = 1;
93907+static int one_hundred __read_only = 100;
93908 #ifdef CONFIG_PRINTK
93909-static int ten_thousand = 10000;
93910+static int ten_thousand __read_only = 10000;
93911 #endif
93912
93913 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93914@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93915 void __user *buffer, size_t *lenp, loff_t *ppos);
93916 #endif
93917
93918-#ifdef CONFIG_PRINTK
93919 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93920 void __user *buffer, size_t *lenp, loff_t *ppos);
93921-#endif
93922
93923 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93924 void __user *buffer, size_t *lenp, loff_t *ppos);
93925@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93926
93927 #endif
93928
93929+extern struct ctl_table grsecurity_table[];
93930+
93931 static struct ctl_table kern_table[];
93932 static struct ctl_table vm_table[];
93933 static struct ctl_table fs_table[];
93934@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93935 int sysctl_legacy_va_layout;
93936 #endif
93937
93938+#ifdef CONFIG_PAX_SOFTMODE
93939+static struct ctl_table pax_table[] = {
93940+ {
93941+ .procname = "softmode",
93942+ .data = &pax_softmode,
93943+ .maxlen = sizeof(unsigned int),
93944+ .mode = 0600,
93945+ .proc_handler = &proc_dointvec,
93946+ },
93947+
93948+ { }
93949+};
93950+#endif
93951+
93952 /* The default sysctl tables: */
93953
93954 static struct ctl_table sysctl_base_table[] = {
93955@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93956 #endif
93957
93958 static struct ctl_table kern_table[] = {
93959+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93960+ {
93961+ .procname = "grsecurity",
93962+ .mode = 0500,
93963+ .child = grsecurity_table,
93964+ },
93965+#endif
93966+
93967+#ifdef CONFIG_PAX_SOFTMODE
93968+ {
93969+ .procname = "pax",
93970+ .mode = 0500,
93971+ .child = pax_table,
93972+ },
93973+#endif
93974+
93975 {
93976 .procname = "sched_child_runs_first",
93977 .data = &sysctl_sched_child_runs_first,
93978@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93979 .data = &modprobe_path,
93980 .maxlen = KMOD_PATH_LEN,
93981 .mode = 0644,
93982- .proc_handler = proc_dostring,
93983+ .proc_handler = proc_dostring_modpriv,
93984 },
93985 {
93986 .procname = "modules_disabled",
93987@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93988 .extra1 = &zero,
93989 .extra2 = &one,
93990 },
93991+#endif
93992 {
93993 .procname = "kptr_restrict",
93994 .data = &kptr_restrict,
93995 .maxlen = sizeof(int),
93996 .mode = 0644,
93997 .proc_handler = proc_dointvec_minmax_sysadmin,
93998+#ifdef CONFIG_GRKERNSEC_HIDESYM
93999+ .extra1 = &two,
94000+#else
94001 .extra1 = &zero,
94002+#endif
94003 .extra2 = &two,
94004 },
94005-#endif
94006 {
94007 .procname = "ngroups_max",
94008 .data = &ngroups_max,
94009@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
94010 */
94011 {
94012 .procname = "perf_event_paranoid",
94013- .data = &sysctl_perf_event_paranoid,
94014- .maxlen = sizeof(sysctl_perf_event_paranoid),
94015+ .data = &sysctl_perf_event_legitimately_concerned,
94016+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
94017 .mode = 0644,
94018- .proc_handler = proc_dointvec,
94019+ /* go ahead, be a hero */
94020+ .proc_handler = proc_dointvec_minmax_sysadmin,
94021+ .extra1 = &neg_one,
94022+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
94023+ .extra2 = &three,
94024+#else
94025+ .extra2 = &two,
94026+#endif
94027 },
94028 {
94029 .procname = "perf_event_mlock_kb",
94030@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
94031 .proc_handler = proc_dointvec_minmax,
94032 .extra1 = &zero,
94033 },
94034+ {
94035+ .procname = "heap_stack_gap",
94036+ .data = &sysctl_heap_stack_gap,
94037+ .maxlen = sizeof(sysctl_heap_stack_gap),
94038+ .mode = 0644,
94039+ .proc_handler = proc_doulongvec_minmax,
94040+ },
94041 #else
94042 {
94043 .procname = "nr_trim_pages",
94044@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
94045 (char __user *)buffer, lenp, ppos);
94046 }
94047
94048+int proc_dostring_modpriv(struct ctl_table *table, int write,
94049+ void __user *buffer, size_t *lenp, loff_t *ppos)
94050+{
94051+ if (write && !capable(CAP_SYS_MODULE))
94052+ return -EPERM;
94053+
94054+ return _proc_do_string(table->data, table->maxlen, write,
94055+ buffer, lenp, ppos);
94056+}
94057+
94058 static size_t proc_skip_spaces(char **buf)
94059 {
94060 size_t ret;
94061@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
94062 len = strlen(tmp);
94063 if (len > *size)
94064 len = *size;
94065+ if (len > sizeof(tmp))
94066+ len = sizeof(tmp);
94067 if (copy_to_user(*buf, tmp, len))
94068 return -EFAULT;
94069 *size -= len;
94070@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
94071 static int proc_taint(struct ctl_table *table, int write,
94072 void __user *buffer, size_t *lenp, loff_t *ppos)
94073 {
94074- struct ctl_table t;
94075+ ctl_table_no_const t;
94076 unsigned long tmptaint = get_taint();
94077 int err;
94078
94079@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
94080 return err;
94081 }
94082
94083-#ifdef CONFIG_PRINTK
94084 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94085 void __user *buffer, size_t *lenp, loff_t *ppos)
94086 {
94087@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94088
94089 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
94090 }
94091-#endif
94092
94093 struct do_proc_dointvec_minmax_conv_param {
94094 int *min;
94095@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
94096 return -ENOSYS;
94097 }
94098
94099+int proc_dostring_modpriv(struct ctl_table *table, int write,
94100+ void __user *buffer, size_t *lenp, loff_t *ppos)
94101+{
94102+ return -ENOSYS;
94103+}
94104+
94105 int proc_dointvec(struct ctl_table *table, int write,
94106 void __user *buffer, size_t *lenp, loff_t *ppos)
94107 {
94108@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94109 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94110 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94111 EXPORT_SYMBOL(proc_dostring);
94112+EXPORT_SYMBOL(proc_dostring_modpriv);
94113 EXPORT_SYMBOL(proc_doulongvec_minmax);
94114 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94115diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94116index 21f82c2..c1984e5 100644
94117--- a/kernel/taskstats.c
94118+++ b/kernel/taskstats.c
94119@@ -28,9 +28,12 @@
94120 #include <linux/fs.h>
94121 #include <linux/file.h>
94122 #include <linux/pid_namespace.h>
94123+#include <linux/grsecurity.h>
94124 #include <net/genetlink.h>
94125 #include <linux/atomic.h>
94126
94127+extern int gr_is_taskstats_denied(int pid);
94128+
94129 /*
94130 * Maximum length of a cpumask that can be specified in
94131 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94132@@ -567,6 +570,9 @@ err:
94133
94134 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94135 {
94136+ if (gr_is_taskstats_denied(current->pid))
94137+ return -EACCES;
94138+
94139 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
94140 return cmd_attr_register_cpumask(info);
94141 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
94142diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
94143index 1b001ed..55ef9e4 100644
94144--- a/kernel/time/alarmtimer.c
94145+++ b/kernel/time/alarmtimer.c
94146@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
94147 struct platform_device *pdev;
94148 int error = 0;
94149 int i;
94150- struct k_clock alarm_clock = {
94151+ static struct k_clock alarm_clock = {
94152 .clock_getres = alarm_clock_getres,
94153 .clock_get = alarm_clock_get,
94154 .timer_create = alarm_timer_create,
94155diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
94156index bee0c1f..a23fe2d 100644
94157--- a/kernel/time/hrtimer.c
94158+++ b/kernel/time/hrtimer.c
94159@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
94160 local_irq_restore(flags);
94161 }
94162
94163-static void run_hrtimer_softirq(struct softirq_action *h)
94164+static __latent_entropy void run_hrtimer_softirq(void)
94165 {
94166 hrtimer_peek_ahead_timers();
94167 }
94168diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
94169index 0075da7..63cc872 100644
94170--- a/kernel/time/posix-cpu-timers.c
94171+++ b/kernel/time/posix-cpu-timers.c
94172@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
94173
94174 static __init int init_posix_cpu_timers(void)
94175 {
94176- struct k_clock process = {
94177+ static struct k_clock process = {
94178 .clock_getres = process_cpu_clock_getres,
94179 .clock_get = process_cpu_clock_get,
94180 .timer_create = process_cpu_timer_create,
94181 .nsleep = process_cpu_nsleep,
94182 .nsleep_restart = process_cpu_nsleep_restart,
94183 };
94184- struct k_clock thread = {
94185+ static struct k_clock thread = {
94186 .clock_getres = thread_cpu_clock_getres,
94187 .clock_get = thread_cpu_clock_get,
94188 .timer_create = thread_cpu_timer_create,
94189diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
94190index 31ea01f..7fc61ef 100644
94191--- a/kernel/time/posix-timers.c
94192+++ b/kernel/time/posix-timers.c
94193@@ -43,6 +43,7 @@
94194 #include <linux/hash.h>
94195 #include <linux/posix-clock.h>
94196 #include <linux/posix-timers.h>
94197+#include <linux/grsecurity.h>
94198 #include <linux/syscalls.h>
94199 #include <linux/wait.h>
94200 #include <linux/workqueue.h>
94201@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
94202 * which we beg off on and pass to do_sys_settimeofday().
94203 */
94204
94205-static struct k_clock posix_clocks[MAX_CLOCKS];
94206+static struct k_clock *posix_clocks[MAX_CLOCKS];
94207
94208 /*
94209 * These ones are defined below.
94210@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
94211 */
94212 static __init int init_posix_timers(void)
94213 {
94214- struct k_clock clock_realtime = {
94215+ static struct k_clock clock_realtime = {
94216 .clock_getres = hrtimer_get_res,
94217 .clock_get = posix_clock_realtime_get,
94218 .clock_set = posix_clock_realtime_set,
94219@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
94220 .timer_get = common_timer_get,
94221 .timer_del = common_timer_del,
94222 };
94223- struct k_clock clock_monotonic = {
94224+ static struct k_clock clock_monotonic = {
94225 .clock_getres = hrtimer_get_res,
94226 .clock_get = posix_ktime_get_ts,
94227 .nsleep = common_nsleep,
94228@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
94229 .timer_get = common_timer_get,
94230 .timer_del = common_timer_del,
94231 };
94232- struct k_clock clock_monotonic_raw = {
94233+ static struct k_clock clock_monotonic_raw = {
94234 .clock_getres = hrtimer_get_res,
94235 .clock_get = posix_get_monotonic_raw,
94236 };
94237- struct k_clock clock_realtime_coarse = {
94238+ static struct k_clock clock_realtime_coarse = {
94239 .clock_getres = posix_get_coarse_res,
94240 .clock_get = posix_get_realtime_coarse,
94241 };
94242- struct k_clock clock_monotonic_coarse = {
94243+ static struct k_clock clock_monotonic_coarse = {
94244 .clock_getres = posix_get_coarse_res,
94245 .clock_get = posix_get_monotonic_coarse,
94246 };
94247- struct k_clock clock_tai = {
94248+ static struct k_clock clock_tai = {
94249 .clock_getres = hrtimer_get_res,
94250 .clock_get = posix_get_tai,
94251 .nsleep = common_nsleep,
94252@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
94253 .timer_get = common_timer_get,
94254 .timer_del = common_timer_del,
94255 };
94256- struct k_clock clock_boottime = {
94257+ static struct k_clock clock_boottime = {
94258 .clock_getres = hrtimer_get_res,
94259 .clock_get = posix_get_boottime,
94260 .nsleep = common_nsleep,
94261@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
94262 return;
94263 }
94264
94265- posix_clocks[clock_id] = *new_clock;
94266+ posix_clocks[clock_id] = new_clock;
94267 }
94268 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
94269
94270@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
94271 return (id & CLOCKFD_MASK) == CLOCKFD ?
94272 &clock_posix_dynamic : &clock_posix_cpu;
94273
94274- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
94275+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
94276 return NULL;
94277- return &posix_clocks[id];
94278+ return posix_clocks[id];
94279 }
94280
94281 static int common_timer_create(struct k_itimer *new_timer)
94282@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
94283 struct k_clock *kc = clockid_to_kclock(which_clock);
94284 struct k_itimer *new_timer;
94285 int error, new_timer_id;
94286- sigevent_t event;
94287+ sigevent_t event = { };
94288 int it_id_set = IT_ID_NOT_SET;
94289
94290 if (!kc)
94291@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
94292 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
94293 return -EFAULT;
94294
94295+ /* only the CLOCK_REALTIME clock can be set, all other clocks
94296+ have their clock_set fptr set to a nosettime dummy function
94297+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
94298+ call common_clock_set, which calls do_sys_settimeofday, which
94299+ we hook
94300+ */
94301+
94302 return kc->clock_set(which_clock, &new_tp);
94303 }
94304
94305diff --git a/kernel/time/time.c b/kernel/time/time.c
94306index 2c85b77..6530536 100644
94307--- a/kernel/time/time.c
94308+++ b/kernel/time/time.c
94309@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
94310 return error;
94311
94312 if (tz) {
94313+ /* we log in do_settimeofday called below, so don't log twice
94314+ */
94315+ if (!tv)
94316+ gr_log_timechange();
94317+
94318 sys_tz = *tz;
94319 update_vsyscall_tz();
94320 if (firsttime) {
94321diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94322index 91db941..a371671 100644
94323--- a/kernel/time/timekeeping.c
94324+++ b/kernel/time/timekeeping.c
94325@@ -15,6 +15,7 @@
94326 #include <linux/init.h>
94327 #include <linux/mm.h>
94328 #include <linux/sched.h>
94329+#include <linux/grsecurity.h>
94330 #include <linux/syscore_ops.h>
94331 #include <linux/clocksource.h>
94332 #include <linux/jiffies.h>
94333@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
94334 if (!timespec64_valid_strict(ts))
94335 return -EINVAL;
94336
94337+ gr_log_timechange();
94338+
94339 raw_spin_lock_irqsave(&timekeeper_lock, flags);
94340 write_seqcount_begin(&tk_core.seq);
94341
94342diff --git a/kernel/time/timer.c b/kernel/time/timer.c
94343index 2d3f5c5..7ed7dc5 100644
94344--- a/kernel/time/timer.c
94345+++ b/kernel/time/timer.c
94346@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
94347 /*
94348 * This function runs timers and the timer-tq in bottom half context.
94349 */
94350-static void run_timer_softirq(struct softirq_action *h)
94351+static __latent_entropy void run_timer_softirq(void)
94352 {
94353 struct tvec_base *base = __this_cpu_read(tvec_bases);
94354
94355@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
94356 *
94357 * In all cases the return value is guaranteed to be non-negative.
94358 */
94359-signed long __sched schedule_timeout(signed long timeout)
94360+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
94361 {
94362 struct timer_list timer;
94363 unsigned long expire;
94364diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94365index 61ed862..3b52c65 100644
94366--- a/kernel/time/timer_list.c
94367+++ b/kernel/time/timer_list.c
94368@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94369
94370 static void print_name_offset(struct seq_file *m, void *sym)
94371 {
94372+#ifdef CONFIG_GRKERNSEC_HIDESYM
94373+ SEQ_printf(m, "<%p>", NULL);
94374+#else
94375 char symname[KSYM_NAME_LEN];
94376
94377 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94378 SEQ_printf(m, "<%pK>", sym);
94379 else
94380 SEQ_printf(m, "%s", symname);
94381+#endif
94382 }
94383
94384 static void
94385@@ -119,7 +123,11 @@ next_one:
94386 static void
94387 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94388 {
94389+#ifdef CONFIG_GRKERNSEC_HIDESYM
94390+ SEQ_printf(m, " .base: %p\n", NULL);
94391+#else
94392 SEQ_printf(m, " .base: %pK\n", base);
94393+#endif
94394 SEQ_printf(m, " .index: %d\n",
94395 base->index);
94396 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94397@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
94398 {
94399 struct proc_dir_entry *pe;
94400
94401+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94402+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94403+#else
94404 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94405+#endif
94406 if (!pe)
94407 return -ENOMEM;
94408 return 0;
94409diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94410index 1fb08f2..ca4bb1e 100644
94411--- a/kernel/time/timer_stats.c
94412+++ b/kernel/time/timer_stats.c
94413@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94414 static unsigned long nr_entries;
94415 static struct entry entries[MAX_ENTRIES];
94416
94417-static atomic_t overflow_count;
94418+static atomic_unchecked_t overflow_count;
94419
94420 /*
94421 * The entries are in a hash-table, for fast lookup:
94422@@ -140,7 +140,7 @@ static void reset_entries(void)
94423 nr_entries = 0;
94424 memset(entries, 0, sizeof(entries));
94425 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94426- atomic_set(&overflow_count, 0);
94427+ atomic_set_unchecked(&overflow_count, 0);
94428 }
94429
94430 static struct entry *alloc_entry(void)
94431@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94432 if (likely(entry))
94433 entry->count++;
94434 else
94435- atomic_inc(&overflow_count);
94436+ atomic_inc_unchecked(&overflow_count);
94437
94438 out_unlock:
94439 raw_spin_unlock_irqrestore(lock, flags);
94440@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94441
94442 static void print_name_offset(struct seq_file *m, unsigned long addr)
94443 {
94444+#ifdef CONFIG_GRKERNSEC_HIDESYM
94445+ seq_printf(m, "<%p>", NULL);
94446+#else
94447 char symname[KSYM_NAME_LEN];
94448
94449 if (lookup_symbol_name(addr, symname) < 0)
94450- seq_printf(m, "<%p>", (void *)addr);
94451+ seq_printf(m, "<%pK>", (void *)addr);
94452 else
94453 seq_printf(m, "%s", symname);
94454+#endif
94455 }
94456
94457 static int tstats_show(struct seq_file *m, void *v)
94458@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
94459
94460 seq_puts(m, "Timer Stats Version: v0.3\n");
94461 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94462- if (atomic_read(&overflow_count))
94463- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
94464+ if (atomic_read_unchecked(&overflow_count))
94465+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
94466 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
94467
94468 for (i = 0; i < nr_entries; i++) {
94469@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
94470 {
94471 struct proc_dir_entry *pe;
94472
94473+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94474+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94475+#else
94476 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94477+#endif
94478 if (!pe)
94479 return -ENOMEM;
94480 return 0;
94481diff --git a/kernel/torture.c b/kernel/torture.c
94482index dd70993..0bf694b 100644
94483--- a/kernel/torture.c
94484+++ b/kernel/torture.c
94485@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
94486 mutex_lock(&fullstop_mutex);
94487 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
94488 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
94489- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
94490+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
94491 } else {
94492 pr_warn("Concurrent rmmod and shutdown illegal!\n");
94493 }
94494@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
94495 if (!torture_must_stop()) {
94496 if (stutter > 1) {
94497 schedule_timeout_interruptible(stutter - 1);
94498- ACCESS_ONCE(stutter_pause_test) = 2;
94499+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
94500 }
94501 schedule_timeout_interruptible(1);
94502- ACCESS_ONCE(stutter_pause_test) = 1;
94503+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
94504 }
94505 if (!torture_must_stop())
94506 schedule_timeout_interruptible(stutter);
94507- ACCESS_ONCE(stutter_pause_test) = 0;
94508+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
94509 torture_shutdown_absorb("torture_stutter");
94510 } while (!torture_must_stop());
94511 torture_kthread_stopping("torture_stutter");
94512@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
94513 schedule_timeout_uninterruptible(10);
94514 return true;
94515 }
94516- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
94517+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
94518 mutex_unlock(&fullstop_mutex);
94519 torture_shutdown_cleanup();
94520 torture_shuffle_cleanup();
94521diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94522index 483cecf..ac46091 100644
94523--- a/kernel/trace/blktrace.c
94524+++ b/kernel/trace/blktrace.c
94525@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94526 struct blk_trace *bt = filp->private_data;
94527 char buf[16];
94528
94529- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94530+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94531
94532 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94533 }
94534@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94535 return 1;
94536
94537 bt = buf->chan->private_data;
94538- atomic_inc(&bt->dropped);
94539+ atomic_inc_unchecked(&bt->dropped);
94540 return 0;
94541 }
94542
94543@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94544
94545 bt->dir = dir;
94546 bt->dev = dev;
94547- atomic_set(&bt->dropped, 0);
94548+ atomic_set_unchecked(&bt->dropped, 0);
94549 INIT_LIST_HEAD(&bt->running_list);
94550
94551 ret = -EIO;
94552diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94553index 4f22802..bd268b1 100644
94554--- a/kernel/trace/ftrace.c
94555+++ b/kernel/trace/ftrace.c
94556@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94557 if (unlikely(ftrace_disabled))
94558 return 0;
94559
94560+ ret = ftrace_arch_code_modify_prepare();
94561+ FTRACE_WARN_ON(ret);
94562+ if (ret)
94563+ return 0;
94564+
94565 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94566+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94567 if (ret) {
94568 ftrace_bug(ret, rec);
94569- return 0;
94570 }
94571- return 1;
94572+ return ret ? 0 : 1;
94573 }
94574
94575 /*
94576@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
94577 if (!count)
94578 return 0;
94579
94580+ pax_open_kernel();
94581 sort(start, count, sizeof(*start),
94582 ftrace_cmp_ips, ftrace_swap_ips);
94583+ pax_close_kernel();
94584
94585 start_pg = ftrace_allocate_pages(count);
94586 if (!start_pg)
94587@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
94588
94589 if (t->ret_stack == NULL) {
94590 atomic_set(&t->tracing_graph_pause, 0);
94591- atomic_set(&t->trace_overrun, 0);
94592+ atomic_set_unchecked(&t->trace_overrun, 0);
94593 t->curr_ret_stack = -1;
94594 /* Make sure the tasks see the -1 first: */
94595 smp_wmb();
94596@@ -5876,7 +5883,7 @@ static void
94597 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
94598 {
94599 atomic_set(&t->tracing_graph_pause, 0);
94600- atomic_set(&t->trace_overrun, 0);
94601+ atomic_set_unchecked(&t->trace_overrun, 0);
94602 t->ftrace_timestamp = 0;
94603 /* make curr_ret_stack visible before we add the ret_stack */
94604 smp_wmb();
94605diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94606index 5040d44..d43b2b9 100644
94607--- a/kernel/trace/ring_buffer.c
94608+++ b/kernel/trace/ring_buffer.c
94609@@ -348,9 +348,9 @@ struct buffer_data_page {
94610 */
94611 struct buffer_page {
94612 struct list_head list; /* list of buffer pages */
94613- local_t write; /* index for next write */
94614+ local_unchecked_t write; /* index for next write */
94615 unsigned read; /* index for next read */
94616- local_t entries; /* entries on this page */
94617+ local_unchecked_t entries; /* entries on this page */
94618 unsigned long real_end; /* real end of data */
94619 struct buffer_data_page *page; /* Actual data page */
94620 };
94621@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
94622 unsigned long last_overrun;
94623 local_t entries_bytes;
94624 local_t entries;
94625- local_t overrun;
94626- local_t commit_overrun;
94627- local_t dropped_events;
94628+ local_unchecked_t overrun;
94629+ local_unchecked_t commit_overrun;
94630+ local_unchecked_t dropped_events;
94631 local_t committing;
94632- local_t commits;
94633+ local_unchecked_t commits;
94634 unsigned long read;
94635 unsigned long read_bytes;
94636 u64 write_stamp;
94637@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94638 *
94639 * We add a counter to the write field to denote this.
94640 */
94641- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94642- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94643+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94644+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94645
94646 /*
94647 * Just make sure we have seen our old_write and synchronize
94648@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94649 * cmpxchg to only update if an interrupt did not already
94650 * do it for us. If the cmpxchg fails, we don't care.
94651 */
94652- (void)local_cmpxchg(&next_page->write, old_write, val);
94653- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94654+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94655+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94656
94657 /*
94658 * No need to worry about races with clearing out the commit.
94659@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94660
94661 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94662 {
94663- return local_read(&bpage->entries) & RB_WRITE_MASK;
94664+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94665 }
94666
94667 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94668 {
94669- return local_read(&bpage->write) & RB_WRITE_MASK;
94670+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94671 }
94672
94673 static int
94674@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94675 * bytes consumed in ring buffer from here.
94676 * Increment overrun to account for the lost events.
94677 */
94678- local_add(page_entries, &cpu_buffer->overrun);
94679+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94680 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94681 }
94682
94683@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94684 * it is our responsibility to update
94685 * the counters.
94686 */
94687- local_add(entries, &cpu_buffer->overrun);
94688+ local_add_unchecked(entries, &cpu_buffer->overrun);
94689 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94690
94691 /*
94692@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94693 if (tail == BUF_PAGE_SIZE)
94694 tail_page->real_end = 0;
94695
94696- local_sub(length, &tail_page->write);
94697+ local_sub_unchecked(length, &tail_page->write);
94698 return;
94699 }
94700
94701@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94702 rb_event_set_padding(event);
94703
94704 /* Set the write back to the previous setting */
94705- local_sub(length, &tail_page->write);
94706+ local_sub_unchecked(length, &tail_page->write);
94707 return;
94708 }
94709
94710@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94711
94712 /* Set write to end of buffer */
94713 length = (tail + length) - BUF_PAGE_SIZE;
94714- local_sub(length, &tail_page->write);
94715+ local_sub_unchecked(length, &tail_page->write);
94716 }
94717
94718 /*
94719@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94720 * about it.
94721 */
94722 if (unlikely(next_page == commit_page)) {
94723- local_inc(&cpu_buffer->commit_overrun);
94724+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94725 goto out_reset;
94726 }
94727
94728@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94729 * this is easy, just stop here.
94730 */
94731 if (!(buffer->flags & RB_FL_OVERWRITE)) {
94732- local_inc(&cpu_buffer->dropped_events);
94733+ local_inc_unchecked(&cpu_buffer->dropped_events);
94734 goto out_reset;
94735 }
94736
94737@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94738 cpu_buffer->tail_page) &&
94739 (cpu_buffer->commit_page ==
94740 cpu_buffer->reader_page))) {
94741- local_inc(&cpu_buffer->commit_overrun);
94742+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94743 goto out_reset;
94744 }
94745 }
94746@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94747 length += RB_LEN_TIME_EXTEND;
94748
94749 tail_page = cpu_buffer->tail_page;
94750- write = local_add_return(length, &tail_page->write);
94751+ write = local_add_return_unchecked(length, &tail_page->write);
94752
94753 /* set write to only the index of the write */
94754 write &= RB_WRITE_MASK;
94755@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94756 kmemcheck_annotate_bitfield(event, bitfield);
94757 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94758
94759- local_inc(&tail_page->entries);
94760+ local_inc_unchecked(&tail_page->entries);
94761
94762 /*
94763 * If this is the first commit on the page, then update
94764@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94765
94766 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94767 unsigned long write_mask =
94768- local_read(&bpage->write) & ~RB_WRITE_MASK;
94769+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94770 unsigned long event_length = rb_event_length(event);
94771 /*
94772 * This is on the tail page. It is possible that
94773@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94774 */
94775 old_index += write_mask;
94776 new_index += write_mask;
94777- index = local_cmpxchg(&bpage->write, old_index, new_index);
94778+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94779 if (index == old_index) {
94780 /* update counters */
94781 local_sub(event_length, &cpu_buffer->entries_bytes);
94782@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94783 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
94784 {
94785 local_inc(&cpu_buffer->committing);
94786- local_inc(&cpu_buffer->commits);
94787+ local_inc_unchecked(&cpu_buffer->commits);
94788 }
94789
94790 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94791@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94792 return;
94793
94794 again:
94795- commits = local_read(&cpu_buffer->commits);
94796+ commits = local_read_unchecked(&cpu_buffer->commits);
94797 /* synchronize with interrupts */
94798 barrier();
94799 if (local_read(&cpu_buffer->committing) == 1)
94800@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94801 * updating of the commit page and the clearing of the
94802 * committing counter.
94803 */
94804- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
94805+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
94806 !local_read(&cpu_buffer->committing)) {
94807 local_inc(&cpu_buffer->committing);
94808 goto again;
94809@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
94810 barrier();
94811 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
94812 local_dec(&cpu_buffer->committing);
94813- local_dec(&cpu_buffer->commits);
94814+ local_dec_unchecked(&cpu_buffer->commits);
94815 return NULL;
94816 }
94817 #endif
94818@@ -2902,7 +2902,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94819
94820 /* Do the likely case first */
94821 if (likely(bpage->page == (void *)addr)) {
94822- local_dec(&bpage->entries);
94823+ local_dec_unchecked(&bpage->entries);
94824 return;
94825 }
94826
94827@@ -2914,7 +2914,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94828 start = bpage;
94829 do {
94830 if (bpage->page == (void *)addr) {
94831- local_dec(&bpage->entries);
94832+ local_dec_unchecked(&bpage->entries);
94833 return;
94834 }
94835 rb_inc_page(cpu_buffer, &bpage);
94836@@ -3198,7 +3198,7 @@ static inline unsigned long
94837 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94838 {
94839 return local_read(&cpu_buffer->entries) -
94840- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94841+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94842 }
94843
94844 /**
94845@@ -3287,7 +3287,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94846 return 0;
94847
94848 cpu_buffer = buffer->buffers[cpu];
94849- ret = local_read(&cpu_buffer->overrun);
94850+ ret = local_read_unchecked(&cpu_buffer->overrun);
94851
94852 return ret;
94853 }
94854@@ -3310,7 +3310,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94855 return 0;
94856
94857 cpu_buffer = buffer->buffers[cpu];
94858- ret = local_read(&cpu_buffer->commit_overrun);
94859+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94860
94861 return ret;
94862 }
94863@@ -3332,7 +3332,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
94864 return 0;
94865
94866 cpu_buffer = buffer->buffers[cpu];
94867- ret = local_read(&cpu_buffer->dropped_events);
94868+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
94869
94870 return ret;
94871 }
94872@@ -3395,7 +3395,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94873 /* if you care about this being correct, lock the buffer */
94874 for_each_buffer_cpu(buffer, cpu) {
94875 cpu_buffer = buffer->buffers[cpu];
94876- overruns += local_read(&cpu_buffer->overrun);
94877+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94878 }
94879
94880 return overruns;
94881@@ -3566,8 +3566,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94882 /*
94883 * Reset the reader page to size zero.
94884 */
94885- local_set(&cpu_buffer->reader_page->write, 0);
94886- local_set(&cpu_buffer->reader_page->entries, 0);
94887+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94888+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94889 local_set(&cpu_buffer->reader_page->page->commit, 0);
94890 cpu_buffer->reader_page->real_end = 0;
94891
94892@@ -3601,7 +3601,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94893 * want to compare with the last_overrun.
94894 */
94895 smp_mb();
94896- overwrite = local_read(&(cpu_buffer->overrun));
94897+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94898
94899 /*
94900 * Here's the tricky part.
94901@@ -4173,8 +4173,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94902
94903 cpu_buffer->head_page
94904 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94905- local_set(&cpu_buffer->head_page->write, 0);
94906- local_set(&cpu_buffer->head_page->entries, 0);
94907+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94908+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94909 local_set(&cpu_buffer->head_page->page->commit, 0);
94910
94911 cpu_buffer->head_page->read = 0;
94912@@ -4184,18 +4184,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94913
94914 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94915 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94916- local_set(&cpu_buffer->reader_page->write, 0);
94917- local_set(&cpu_buffer->reader_page->entries, 0);
94918+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94919+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94920 local_set(&cpu_buffer->reader_page->page->commit, 0);
94921 cpu_buffer->reader_page->read = 0;
94922
94923 local_set(&cpu_buffer->entries_bytes, 0);
94924- local_set(&cpu_buffer->overrun, 0);
94925- local_set(&cpu_buffer->commit_overrun, 0);
94926- local_set(&cpu_buffer->dropped_events, 0);
94927+ local_set_unchecked(&cpu_buffer->overrun, 0);
94928+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94929+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
94930 local_set(&cpu_buffer->entries, 0);
94931 local_set(&cpu_buffer->committing, 0);
94932- local_set(&cpu_buffer->commits, 0);
94933+ local_set_unchecked(&cpu_buffer->commits, 0);
94934 cpu_buffer->read = 0;
94935 cpu_buffer->read_bytes = 0;
94936
94937@@ -4596,8 +4596,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94938 rb_init_page(bpage);
94939 bpage = reader->page;
94940 reader->page = *data_page;
94941- local_set(&reader->write, 0);
94942- local_set(&reader->entries, 0);
94943+ local_set_unchecked(&reader->write, 0);
94944+ local_set_unchecked(&reader->entries, 0);
94945 reader->read = 0;
94946 *data_page = bpage;
94947
94948diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94949index 62c6506..5c25989 100644
94950--- a/kernel/trace/trace.c
94951+++ b/kernel/trace/trace.c
94952@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94953 return 0;
94954 }
94955
94956-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94957+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94958 {
94959 /* do nothing if flag is already set */
94960 if (!!(trace_flags & mask) == !!enabled)
94961diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94962index dd8205a..1aae87a 100644
94963--- a/kernel/trace/trace.h
94964+++ b/kernel/trace/trace.h
94965@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94966 void trace_printk_init_buffers(void);
94967 void trace_printk_start_comm(void);
94968 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94969-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94970+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94971
94972 /*
94973 * Normal trace_printk() and friends allocates special buffers
94974diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94975index 57b67b1..66082a9 100644
94976--- a/kernel/trace/trace_clock.c
94977+++ b/kernel/trace/trace_clock.c
94978@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94979 return now;
94980 }
94981
94982-static atomic64_t trace_counter;
94983+static atomic64_unchecked_t trace_counter;
94984
94985 /*
94986 * trace_clock_counter(): simply an atomic counter.
94987@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94988 */
94989 u64 notrace trace_clock_counter(void)
94990 {
94991- return atomic64_add_return(1, &trace_counter);
94992+ return atomic64_inc_return_unchecked(&trace_counter);
94993 }
94994diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94995index db54dda..b9e4f03 100644
94996--- a/kernel/trace/trace_events.c
94997+++ b/kernel/trace/trace_events.c
94998@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94999 return 0;
95000 }
95001
95002-struct ftrace_module_file_ops;
95003 static void __add_event_to_tracers(struct ftrace_event_call *call);
95004
95005 /* Add an additional event_call dynamically */
95006diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
95007index 2d25ad1..5bfd931 100644
95008--- a/kernel/trace/trace_functions_graph.c
95009+++ b/kernel/trace/trace_functions_graph.c
95010@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
95011
95012 /* The return trace stack is full */
95013 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
95014- atomic_inc(&current->trace_overrun);
95015+ atomic_inc_unchecked(&current->trace_overrun);
95016 return -EBUSY;
95017 }
95018
95019@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
95020 *ret = current->ret_stack[index].ret;
95021 trace->func = current->ret_stack[index].func;
95022 trace->calltime = current->ret_stack[index].calltime;
95023- trace->overrun = atomic_read(&current->trace_overrun);
95024+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
95025 trace->depth = index;
95026 }
95027
95028diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95029index 7a9ba62..2e0e4a1 100644
95030--- a/kernel/trace/trace_mmiotrace.c
95031+++ b/kernel/trace/trace_mmiotrace.c
95032@@ -24,7 +24,7 @@ struct header_iter {
95033 static struct trace_array *mmio_trace_array;
95034 static bool overrun_detected;
95035 static unsigned long prev_overruns;
95036-static atomic_t dropped_count;
95037+static atomic_unchecked_t dropped_count;
95038
95039 static void mmio_reset_data(struct trace_array *tr)
95040 {
95041@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
95042
95043 static unsigned long count_overruns(struct trace_iterator *iter)
95044 {
95045- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95046+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95047 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
95048
95049 if (over > prev_overruns)
95050@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95051 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95052 sizeof(*entry), 0, pc);
95053 if (!event) {
95054- atomic_inc(&dropped_count);
95055+ atomic_inc_unchecked(&dropped_count);
95056 return;
95057 }
95058 entry = ring_buffer_event_data(event);
95059@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95060 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95061 sizeof(*entry), 0, pc);
95062 if (!event) {
95063- atomic_inc(&dropped_count);
95064+ atomic_inc_unchecked(&dropped_count);
95065 return;
95066 }
95067 entry = ring_buffer_event_data(event);
95068diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95069index 692bf71..6d9a9cd 100644
95070--- a/kernel/trace/trace_output.c
95071+++ b/kernel/trace/trace_output.c
95072@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
95073 goto out;
95074 }
95075
95076+ pax_open_kernel();
95077 if (event->funcs->trace == NULL)
95078- event->funcs->trace = trace_nop_print;
95079+ *(void **)&event->funcs->trace = trace_nop_print;
95080 if (event->funcs->raw == NULL)
95081- event->funcs->raw = trace_nop_print;
95082+ *(void **)&event->funcs->raw = trace_nop_print;
95083 if (event->funcs->hex == NULL)
95084- event->funcs->hex = trace_nop_print;
95085+ *(void **)&event->funcs->hex = trace_nop_print;
95086 if (event->funcs->binary == NULL)
95087- event->funcs->binary = trace_nop_print;
95088+ *(void **)&event->funcs->binary = trace_nop_print;
95089+ pax_close_kernel();
95090
95091 key = event->type & (EVENT_HASHSIZE - 1);
95092
95093diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
95094index e694c9f..6775a38 100644
95095--- a/kernel/trace/trace_seq.c
95096+++ b/kernel/trace/trace_seq.c
95097@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
95098 return 0;
95099 }
95100
95101- seq_buf_path(&s->seq, path, "\n");
95102+ seq_buf_path(&s->seq, path, "\n\\");
95103
95104 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
95105 s->seq.len = save_len;
95106diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95107index c3e4fcf..ef6cc43 100644
95108--- a/kernel/trace/trace_stack.c
95109+++ b/kernel/trace/trace_stack.c
95110@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
95111 return;
95112
95113 /* we do not handle interrupt stacks yet */
95114- if (!object_is_on_stack(stack))
95115+ if (!object_starts_on_stack(stack))
95116 return;
95117
95118 local_irq_save(flags);
95119diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
95120index f97f6e3..d367b48 100644
95121--- a/kernel/trace/trace_syscalls.c
95122+++ b/kernel/trace/trace_syscalls.c
95123@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
95124 int num;
95125
95126 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95127+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95128+ return -EINVAL;
95129
95130 mutex_lock(&syscall_trace_lock);
95131 if (!sys_perf_refcount_enter)
95132@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
95133 int num;
95134
95135 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95136+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95137+ return;
95138
95139 mutex_lock(&syscall_trace_lock);
95140 sys_perf_refcount_enter--;
95141@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
95142 int num;
95143
95144 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95145+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95146+ return -EINVAL;
95147
95148 mutex_lock(&syscall_trace_lock);
95149 if (!sys_perf_refcount_exit)
95150@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
95151 int num;
95152
95153 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95154+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95155+ return;
95156
95157 mutex_lock(&syscall_trace_lock);
95158 sys_perf_refcount_exit--;
95159diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
95160index 4109f83..fe1f830 100644
95161--- a/kernel/user_namespace.c
95162+++ b/kernel/user_namespace.c
95163@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
95164 !kgid_has_mapping(parent_ns, group))
95165 return -EPERM;
95166
95167+#ifdef CONFIG_GRKERNSEC
95168+ /*
95169+ * This doesn't really inspire confidence:
95170+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
95171+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
95172+ * Increases kernel attack surface in areas developers
95173+ * previously cared little about ("low importance due
95174+ * to requiring "root" capability")
95175+ * To be removed when this code receives *proper* review
95176+ */
95177+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
95178+ !capable(CAP_SETGID))
95179+ return -EPERM;
95180+#endif
95181+
95182 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
95183 if (!ns)
95184 return -ENOMEM;
95185@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
95186 if (atomic_read(&current->mm->mm_users) > 1)
95187 return -EINVAL;
95188
95189- if (current->fs->users != 1)
95190+ if (atomic_read(&current->fs->users) != 1)
95191 return -EINVAL;
95192
95193 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
95194diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
95195index c8eac43..4b5f08f 100644
95196--- a/kernel/utsname_sysctl.c
95197+++ b/kernel/utsname_sysctl.c
95198@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
95199 static int proc_do_uts_string(struct ctl_table *table, int write,
95200 void __user *buffer, size_t *lenp, loff_t *ppos)
95201 {
95202- struct ctl_table uts_table;
95203+ ctl_table_no_const uts_table;
95204 int r;
95205 memcpy(&uts_table, table, sizeof(uts_table));
95206 uts_table.data = get_uts(table, write);
95207diff --git a/kernel/watchdog.c b/kernel/watchdog.c
95208index 3174bf8..3553520 100644
95209--- a/kernel/watchdog.c
95210+++ b/kernel/watchdog.c
95211@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
95212 static void watchdog_nmi_disable(unsigned int cpu) { return; }
95213 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
95214
95215-static struct smp_hotplug_thread watchdog_threads = {
95216+static struct smp_hotplug_thread watchdog_threads __read_only = {
95217 .store = &softlockup_watchdog,
95218 .thread_should_run = watchdog_should_run,
95219 .thread_fn = watchdog,
95220diff --git a/kernel/workqueue.c b/kernel/workqueue.c
95221index 41ff75b..5ad683a 100644
95222--- a/kernel/workqueue.c
95223+++ b/kernel/workqueue.c
95224@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
95225 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
95226 worker_flags |= WORKER_REBOUND;
95227 worker_flags &= ~WORKER_UNBOUND;
95228- ACCESS_ONCE(worker->flags) = worker_flags;
95229+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
95230 }
95231
95232 spin_unlock_irq(&pool->lock);
95233diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95234index c5cefb3..a4241e3 100644
95235--- a/lib/Kconfig.debug
95236+++ b/lib/Kconfig.debug
95237@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
95238
95239 config DEBUG_WW_MUTEX_SLOWPATH
95240 bool "Wait/wound mutex debugging: Slowpath testing"
95241- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95242+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95243 select DEBUG_LOCK_ALLOC
95244 select DEBUG_SPINLOCK
95245 select DEBUG_MUTEXES
95246@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
95247
95248 config DEBUG_LOCK_ALLOC
95249 bool "Lock debugging: detect incorrect freeing of live locks"
95250- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95251+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95252 select DEBUG_SPINLOCK
95253 select DEBUG_MUTEXES
95254 select LOCKDEP
95255@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
95256
95257 config PROVE_LOCKING
95258 bool "Lock debugging: prove locking correctness"
95259- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95260+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95261 select LOCKDEP
95262 select DEBUG_SPINLOCK
95263 select DEBUG_MUTEXES
95264@@ -1005,7 +1005,7 @@ config LOCKDEP
95265
95266 config LOCK_STAT
95267 bool "Lock usage statistics"
95268- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95269+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95270 select LOCKDEP
95271 select DEBUG_SPINLOCK
95272 select DEBUG_MUTEXES
95273@@ -1467,6 +1467,7 @@ config LATENCYTOP
95274 depends on DEBUG_KERNEL
95275 depends on STACKTRACE_SUPPORT
95276 depends on PROC_FS
95277+ depends on !GRKERNSEC_HIDESYM
95278 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
95279 select KALLSYMS
95280 select KALLSYMS_ALL
95281@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95282 config DEBUG_STRICT_USER_COPY_CHECKS
95283 bool "Strict user copy size checks"
95284 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95285- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
95286+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
95287 help
95288 Enabling this option turns a certain set of sanity checks for user
95289 copy operations into compile time failures.
95290@@ -1614,7 +1615,7 @@ endmenu # runtime tests
95291
95292 config PROVIDE_OHCI1394_DMA_INIT
95293 bool "Remote debugging over FireWire early on boot"
95294- depends on PCI && X86
95295+ depends on PCI && X86 && !GRKERNSEC
95296 help
95297 If you want to debug problems which hang or crash the kernel early
95298 on boot and the crashing machine has a FireWire port, you can use
95299diff --git a/lib/Makefile b/lib/Makefile
95300index 58f74d2..08e011f 100644
95301--- a/lib/Makefile
95302+++ b/lib/Makefile
95303@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
95304 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
95305 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
95306 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
95307-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
95308+obj-y += list_debug.o
95309 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
95310
95311 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
95312diff --git a/lib/average.c b/lib/average.c
95313index 114d1be..ab0350c 100644
95314--- a/lib/average.c
95315+++ b/lib/average.c
95316@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
95317 {
95318 unsigned long internal = ACCESS_ONCE(avg->internal);
95319
95320- ACCESS_ONCE(avg->internal) = internal ?
95321+ ACCESS_ONCE_RW(avg->internal) = internal ?
95322 (((internal << avg->weight) - internal) +
95323 (val << avg->factor)) >> avg->weight :
95324 (val << avg->factor);
95325diff --git a/lib/bitmap.c b/lib/bitmap.c
95326index d456f4c1..29a0308 100644
95327--- a/lib/bitmap.c
95328+++ b/lib/bitmap.c
95329@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
95330 }
95331 EXPORT_SYMBOL(__bitmap_subset);
95332
95333-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
95334+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
95335 {
95336 unsigned int k, lim = bits/BITS_PER_LONG;
95337 int w = 0;
95338@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95339 {
95340 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95341 u32 chunk;
95342- const char __user __force *ubuf = (const char __user __force *)buf;
95343+ const char __user *ubuf = (const char __force_user *)buf;
95344
95345 bitmap_zero(maskp, nmaskbits);
95346
95347@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
95348 {
95349 if (!access_ok(VERIFY_READ, ubuf, ulen))
95350 return -EFAULT;
95351- return __bitmap_parse((const char __force *)ubuf,
95352+ return __bitmap_parse((const char __force_kernel *)ubuf,
95353 ulen, 1, maskp, nmaskbits);
95354
95355 }
95356@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
95357 {
95358 unsigned a, b;
95359 int c, old_c, totaldigits;
95360- const char __user __force *ubuf = (const char __user __force *)buf;
95361+ const char __user *ubuf = (const char __force_user *)buf;
95362 int exp_digit, in_range;
95363
95364 totaldigits = c = 0;
95365@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
95366 {
95367 if (!access_ok(VERIFY_READ, ubuf, ulen))
95368 return -EFAULT;
95369- return __bitmap_parselist((const char __force *)ubuf,
95370+ return __bitmap_parselist((const char __force_kernel *)ubuf,
95371 ulen, 1, maskp, nmaskbits);
95372 }
95373 EXPORT_SYMBOL(bitmap_parselist_user);
95374diff --git a/lib/bug.c b/lib/bug.c
95375index 0c3bd95..5a615a1 100644
95376--- a/lib/bug.c
95377+++ b/lib/bug.c
95378@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95379 return BUG_TRAP_TYPE_NONE;
95380
95381 bug = find_bug(bugaddr);
95382+ if (!bug)
95383+ return BUG_TRAP_TYPE_NONE;
95384
95385 file = NULL;
95386 line = 0;
95387diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95388index 547f7f9..a6d4ba0 100644
95389--- a/lib/debugobjects.c
95390+++ b/lib/debugobjects.c
95391@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95392 if (limit > 4)
95393 return;
95394
95395- is_on_stack = object_is_on_stack(addr);
95396+ is_on_stack = object_starts_on_stack(addr);
95397 if (is_on_stack == onstack)
95398 return;
95399
95400diff --git a/lib/div64.c b/lib/div64.c
95401index 4382ad7..08aa558 100644
95402--- a/lib/div64.c
95403+++ b/lib/div64.c
95404@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
95405 EXPORT_SYMBOL(__div64_32);
95406
95407 #ifndef div_s64_rem
95408-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95409+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95410 {
95411 u64 quotient;
95412
95413@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
95414 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
95415 */
95416 #ifndef div64_u64
95417-u64 div64_u64(u64 dividend, u64 divisor)
95418+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
95419 {
95420 u32 high = divisor >> 32;
95421 u64 quot;
95422diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95423index 9722bd2..0d826f4 100644
95424--- a/lib/dma-debug.c
95425+++ b/lib/dma-debug.c
95426@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
95427
95428 void dma_debug_add_bus(struct bus_type *bus)
95429 {
95430- struct notifier_block *nb;
95431+ notifier_block_no_const *nb;
95432
95433 if (dma_debug_disabled())
95434 return;
95435@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
95436
95437 static void check_for_stack(struct device *dev, void *addr)
95438 {
95439- if (object_is_on_stack(addr))
95440+ if (object_starts_on_stack(addr))
95441 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
95442 "stack [addr=%p]\n", addr);
95443 }
95444diff --git a/lib/inflate.c b/lib/inflate.c
95445index 013a761..c28f3fc 100644
95446--- a/lib/inflate.c
95447+++ b/lib/inflate.c
95448@@ -269,7 +269,7 @@ static void free(void *where)
95449 malloc_ptr = free_mem_ptr;
95450 }
95451 #else
95452-#define malloc(a) kmalloc(a, GFP_KERNEL)
95453+#define malloc(a) kmalloc((a), GFP_KERNEL)
95454 #define free(a) kfree(a)
95455 #endif
95456
95457diff --git a/lib/ioremap.c b/lib/ioremap.c
95458index 0c9216c..863bd89 100644
95459--- a/lib/ioremap.c
95460+++ b/lib/ioremap.c
95461@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
95462 unsigned long next;
95463
95464 phys_addr -= addr;
95465- pmd = pmd_alloc(&init_mm, pud, addr);
95466+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95467 if (!pmd)
95468 return -ENOMEM;
95469 do {
95470@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
95471 unsigned long next;
95472
95473 phys_addr -= addr;
95474- pud = pud_alloc(&init_mm, pgd, addr);
95475+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95476 if (!pud)
95477 return -ENOMEM;
95478 do {
95479diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95480index bd2bea9..6b3c95e 100644
95481--- a/lib/is_single_threaded.c
95482+++ b/lib/is_single_threaded.c
95483@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95484 struct task_struct *p, *t;
95485 bool ret;
95486
95487+ if (!mm)
95488+ return true;
95489+
95490 if (atomic_read(&task->signal->live) != 1)
95491 return false;
95492
95493diff --git a/lib/kobject.c b/lib/kobject.c
95494index 03d4ab3..46f6374 100644
95495--- a/lib/kobject.c
95496+++ b/lib/kobject.c
95497@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
95498
95499
95500 static DEFINE_SPINLOCK(kobj_ns_type_lock);
95501-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
95502+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
95503
95504-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95505+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95506 {
95507 enum kobj_ns_type type = ops->type;
95508 int error;
95509diff --git a/lib/list_debug.c b/lib/list_debug.c
95510index c24c2f7..f0296f4 100644
95511--- a/lib/list_debug.c
95512+++ b/lib/list_debug.c
95513@@ -11,7 +11,9 @@
95514 #include <linux/bug.h>
95515 #include <linux/kernel.h>
95516 #include <linux/rculist.h>
95517+#include <linux/mm.h>
95518
95519+#ifdef CONFIG_DEBUG_LIST
95520 /*
95521 * Insert a new entry between two known consecutive entries.
95522 *
95523@@ -19,21 +21,40 @@
95524 * the prev/next entries already!
95525 */
95526
95527+static bool __list_add_debug(struct list_head *new,
95528+ struct list_head *prev,
95529+ struct list_head *next)
95530+{
95531+ if (unlikely(next->prev != prev)) {
95532+ printk(KERN_ERR "list_add corruption. next->prev should be "
95533+ "prev (%p), but was %p. (next=%p).\n",
95534+ prev, next->prev, next);
95535+ BUG();
95536+ return false;
95537+ }
95538+ if (unlikely(prev->next != next)) {
95539+ printk(KERN_ERR "list_add corruption. prev->next should be "
95540+ "next (%p), but was %p. (prev=%p).\n",
95541+ next, prev->next, prev);
95542+ BUG();
95543+ return false;
95544+ }
95545+ if (unlikely(new == prev || new == next)) {
95546+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
95547+ new, prev, next);
95548+ BUG();
95549+ return false;
95550+ }
95551+ return true;
95552+}
95553+
95554 void __list_add(struct list_head *new,
95555- struct list_head *prev,
95556- struct list_head *next)
95557+ struct list_head *prev,
95558+ struct list_head *next)
95559 {
95560- WARN(next->prev != prev,
95561- "list_add corruption. next->prev should be "
95562- "prev (%p), but was %p. (next=%p).\n",
95563- prev, next->prev, next);
95564- WARN(prev->next != next,
95565- "list_add corruption. prev->next should be "
95566- "next (%p), but was %p. (prev=%p).\n",
95567- next, prev->next, prev);
95568- WARN(new == prev || new == next,
95569- "list_add double add: new=%p, prev=%p, next=%p.\n",
95570- new, prev, next);
95571+ if (!__list_add_debug(new, prev, next))
95572+ return;
95573+
95574 next->prev = new;
95575 new->next = next;
95576 new->prev = prev;
95577@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95578 }
95579 EXPORT_SYMBOL(__list_add);
95580
95581-void __list_del_entry(struct list_head *entry)
95582+static bool __list_del_entry_debug(struct list_head *entry)
95583 {
95584 struct list_head *prev, *next;
95585
95586 prev = entry->prev;
95587 next = entry->next;
95588
95589- if (WARN(next == LIST_POISON1,
95590- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95591- entry, LIST_POISON1) ||
95592- WARN(prev == LIST_POISON2,
95593- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95594- entry, LIST_POISON2) ||
95595- WARN(prev->next != entry,
95596- "list_del corruption. prev->next should be %p, "
95597- "but was %p\n", entry, prev->next) ||
95598- WARN(next->prev != entry,
95599- "list_del corruption. next->prev should be %p, "
95600- "but was %p\n", entry, next->prev))
95601+ if (unlikely(next == LIST_POISON1)) {
95602+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95603+ entry, LIST_POISON1);
95604+ BUG();
95605+ return false;
95606+ }
95607+ if (unlikely(prev == LIST_POISON2)) {
95608+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95609+ entry, LIST_POISON2);
95610+ BUG();
95611+ return false;
95612+ }
95613+ if (unlikely(entry->prev->next != entry)) {
95614+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95615+ "but was %p\n", entry, prev->next);
95616+ BUG();
95617+ return false;
95618+ }
95619+ if (unlikely(entry->next->prev != entry)) {
95620+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95621+ "but was %p\n", entry, next->prev);
95622+ BUG();
95623+ return false;
95624+ }
95625+ return true;
95626+}
95627+
95628+void __list_del_entry(struct list_head *entry)
95629+{
95630+ if (!__list_del_entry_debug(entry))
95631 return;
95632
95633- __list_del(prev, next);
95634+ __list_del(entry->prev, entry->next);
95635 }
95636 EXPORT_SYMBOL(__list_del_entry);
95637
95638@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95639 void __list_add_rcu(struct list_head *new,
95640 struct list_head *prev, struct list_head *next)
95641 {
95642- WARN(next->prev != prev,
95643- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95644- prev, next->prev, next);
95645- WARN(prev->next != next,
95646- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95647- next, prev->next, prev);
95648+ if (!__list_add_debug(new, prev, next))
95649+ return;
95650+
95651 new->next = next;
95652 new->prev = prev;
95653 rcu_assign_pointer(list_next_rcu(prev), new);
95654 next->prev = new;
95655 }
95656 EXPORT_SYMBOL(__list_add_rcu);
95657+#endif
95658+
95659+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95660+{
95661+#ifdef CONFIG_DEBUG_LIST
95662+ if (!__list_add_debug(new, prev, next))
95663+ return;
95664+#endif
95665+
95666+ pax_open_kernel();
95667+ next->prev = new;
95668+ new->next = next;
95669+ new->prev = prev;
95670+ prev->next = new;
95671+ pax_close_kernel();
95672+}
95673+EXPORT_SYMBOL(__pax_list_add);
95674+
95675+void pax_list_del(struct list_head *entry)
95676+{
95677+#ifdef CONFIG_DEBUG_LIST
95678+ if (!__list_del_entry_debug(entry))
95679+ return;
95680+#endif
95681+
95682+ pax_open_kernel();
95683+ __list_del(entry->prev, entry->next);
95684+ entry->next = LIST_POISON1;
95685+ entry->prev = LIST_POISON2;
95686+ pax_close_kernel();
95687+}
95688+EXPORT_SYMBOL(pax_list_del);
95689+
95690+void pax_list_del_init(struct list_head *entry)
95691+{
95692+ pax_open_kernel();
95693+ __list_del(entry->prev, entry->next);
95694+ INIT_LIST_HEAD(entry);
95695+ pax_close_kernel();
95696+}
95697+EXPORT_SYMBOL(pax_list_del_init);
95698+
95699+void __pax_list_add_rcu(struct list_head *new,
95700+ struct list_head *prev, struct list_head *next)
95701+{
95702+#ifdef CONFIG_DEBUG_LIST
95703+ if (!__list_add_debug(new, prev, next))
95704+ return;
95705+#endif
95706+
95707+ pax_open_kernel();
95708+ new->next = next;
95709+ new->prev = prev;
95710+ rcu_assign_pointer(list_next_rcu(prev), new);
95711+ next->prev = new;
95712+ pax_close_kernel();
95713+}
95714+EXPORT_SYMBOL(__pax_list_add_rcu);
95715+
95716+void pax_list_del_rcu(struct list_head *entry)
95717+{
95718+#ifdef CONFIG_DEBUG_LIST
95719+ if (!__list_del_entry_debug(entry))
95720+ return;
95721+#endif
95722+
95723+ pax_open_kernel();
95724+ __list_del(entry->prev, entry->next);
95725+ entry->next = LIST_POISON1;
95726+ entry->prev = LIST_POISON2;
95727+ pax_close_kernel();
95728+}
95729+EXPORT_SYMBOL(pax_list_del_rcu);
95730diff --git a/lib/lockref.c b/lib/lockref.c
95731index ecb9a66..a044fc5 100644
95732--- a/lib/lockref.c
95733+++ b/lib/lockref.c
95734@@ -48,13 +48,13 @@
95735 void lockref_get(struct lockref *lockref)
95736 {
95737 CMPXCHG_LOOP(
95738- new.count++;
95739+ __lockref_inc(&new);
95740 ,
95741 return;
95742 );
95743
95744 spin_lock(&lockref->lock);
95745- lockref->count++;
95746+ __lockref_inc(lockref);
95747 spin_unlock(&lockref->lock);
95748 }
95749 EXPORT_SYMBOL(lockref_get);
95750@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
95751 int retval;
95752
95753 CMPXCHG_LOOP(
95754- new.count++;
95755- if (old.count <= 0)
95756+ __lockref_inc(&new);
95757+ if (__lockref_read(&old) <= 0)
95758 return 0;
95759 ,
95760 return 1;
95761@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
95762
95763 spin_lock(&lockref->lock);
95764 retval = 0;
95765- if (lockref->count > 0) {
95766- lockref->count++;
95767+ if (__lockref_read(lockref) > 0) {
95768+ __lockref_inc(lockref);
95769 retval = 1;
95770 }
95771 spin_unlock(&lockref->lock);
95772@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95773 int lockref_get_or_lock(struct lockref *lockref)
95774 {
95775 CMPXCHG_LOOP(
95776- new.count++;
95777- if (old.count <= 0)
95778+ __lockref_inc(&new);
95779+ if (__lockref_read(&old) <= 0)
95780 break;
95781 ,
95782 return 1;
95783 );
95784
95785 spin_lock(&lockref->lock);
95786- if (lockref->count <= 0)
95787+ if (__lockref_read(lockref) <= 0)
95788 return 0;
95789- lockref->count++;
95790+ __lockref_inc(lockref);
95791 spin_unlock(&lockref->lock);
95792 return 1;
95793 }
95794@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95795 int lockref_put_return(struct lockref *lockref)
95796 {
95797 CMPXCHG_LOOP(
95798- new.count--;
95799- if (old.count <= 0)
95800+ __lockref_dec(&new);
95801+ if (__lockref_read(&old) <= 0)
95802 return -1;
95803 ,
95804- return new.count;
95805+ return __lockref_read(&new);
95806 );
95807 return -1;
95808 }
95809@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
95810 int lockref_put_or_lock(struct lockref *lockref)
95811 {
95812 CMPXCHG_LOOP(
95813- new.count--;
95814- if (old.count <= 1)
95815+ __lockref_dec(&new);
95816+ if (__lockref_read(&old) <= 1)
95817 break;
95818 ,
95819 return 1;
95820 );
95821
95822 spin_lock(&lockref->lock);
95823- if (lockref->count <= 1)
95824+ if (__lockref_read(lockref) <= 1)
95825 return 0;
95826- lockref->count--;
95827+ __lockref_dec(lockref);
95828 spin_unlock(&lockref->lock);
95829 return 1;
95830 }
95831@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
95832 void lockref_mark_dead(struct lockref *lockref)
95833 {
95834 assert_spin_locked(&lockref->lock);
95835- lockref->count = -128;
95836+ __lockref_set(lockref, -128);
95837 }
95838 EXPORT_SYMBOL(lockref_mark_dead);
95839
95840@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
95841 int retval;
95842
95843 CMPXCHG_LOOP(
95844- new.count++;
95845- if (old.count < 0)
95846+ __lockref_inc(&new);
95847+ if (__lockref_read(&old) < 0)
95848 return 0;
95849 ,
95850 return 1;
95851@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
95852
95853 spin_lock(&lockref->lock);
95854 retval = 0;
95855- if (lockref->count >= 0) {
95856- lockref->count++;
95857+ if (__lockref_read(lockref) >= 0) {
95858+ __lockref_inc(lockref);
95859 retval = 1;
95860 }
95861 spin_unlock(&lockref->lock);
95862diff --git a/lib/nlattr.c b/lib/nlattr.c
95863index f5907d2..36072be 100644
95864--- a/lib/nlattr.c
95865+++ b/lib/nlattr.c
95866@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
95867 {
95868 int minlen = min_t(int, count, nla_len(src));
95869
95870+ BUG_ON(minlen < 0);
95871+
95872 memcpy(dest, nla_data(src), minlen);
95873 if (count > minlen)
95874 memset(dest + minlen, 0, count - minlen);
95875diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95876index 6111bcb..02e816b 100644
95877--- a/lib/percpu-refcount.c
95878+++ b/lib/percpu-refcount.c
95879@@ -31,7 +31,7 @@
95880 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95881 */
95882
95883-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95884+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95885
95886 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95887
95888diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95889index 3d2aa27..a472f20 100644
95890--- a/lib/radix-tree.c
95891+++ b/lib/radix-tree.c
95892@@ -67,7 +67,7 @@ struct radix_tree_preload {
95893 int nr;
95894 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95895 };
95896-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95897+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95898
95899 static inline void *ptr_to_indirect(void *ptr)
95900 {
95901diff --git a/lib/random32.c b/lib/random32.c
95902index 0bee183..526f12f 100644
95903--- a/lib/random32.c
95904+++ b/lib/random32.c
95905@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95906 }
95907 #endif
95908
95909-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95910+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95911
95912 /**
95913 * prandom_u32_state - seeded pseudo-random number generator.
95914diff --git a/lib/rbtree.c b/lib/rbtree.c
95915index c16c81a..4dcbda1 100644
95916--- a/lib/rbtree.c
95917+++ b/lib/rbtree.c
95918@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95919 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95920
95921 static const struct rb_augment_callbacks dummy_callbacks = {
95922- dummy_propagate, dummy_copy, dummy_rotate
95923+ .propagate = dummy_propagate,
95924+ .copy = dummy_copy,
95925+ .rotate = dummy_rotate
95926 };
95927
95928 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95929diff --git a/lib/show_mem.c b/lib/show_mem.c
95930index adc98e18..0ce83c2 100644
95931--- a/lib/show_mem.c
95932+++ b/lib/show_mem.c
95933@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
95934 quicklist_total_size());
95935 #endif
95936 #ifdef CONFIG_MEMORY_FAILURE
95937- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95938+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95939 #endif
95940 }
95941diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95942index e0af6ff..fcc9f15 100644
95943--- a/lib/strncpy_from_user.c
95944+++ b/lib/strncpy_from_user.c
95945@@ -22,7 +22,7 @@
95946 */
95947 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95948 {
95949- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95950+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95951 long res = 0;
95952
95953 /*
95954diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95955index a28df52..3d55877 100644
95956--- a/lib/strnlen_user.c
95957+++ b/lib/strnlen_user.c
95958@@ -26,7 +26,7 @@
95959 */
95960 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95961 {
95962- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95963+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95964 long align, res = 0;
95965 unsigned long c;
95966
95967diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95968index 4abda07..b9d3765 100644
95969--- a/lib/swiotlb.c
95970+++ b/lib/swiotlb.c
95971@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95972
95973 void
95974 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95975- dma_addr_t dev_addr)
95976+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95977 {
95978 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95979
95980diff --git a/lib/usercopy.c b/lib/usercopy.c
95981index 4f5b1dd..7cab418 100644
95982--- a/lib/usercopy.c
95983+++ b/lib/usercopy.c
95984@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95985 WARN(1, "Buffer overflow detected!\n");
95986 }
95987 EXPORT_SYMBOL(copy_from_user_overflow);
95988+
95989+void copy_to_user_overflow(void)
95990+{
95991+ WARN(1, "Buffer overflow detected!\n");
95992+}
95993+EXPORT_SYMBOL(copy_to_user_overflow);
95994diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95995index b235c96..343ffc1 100644
95996--- a/lib/vsprintf.c
95997+++ b/lib/vsprintf.c
95998@@ -16,6 +16,9 @@
95999 * - scnprintf and vscnprintf
96000 */
96001
96002+#ifdef CONFIG_GRKERNSEC_HIDESYM
96003+#define __INCLUDED_BY_HIDESYM 1
96004+#endif
96005 #include <stdarg.h>
96006 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
96007 #include <linux/types.h>
96008@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
96009 #ifdef CONFIG_KALLSYMS
96010 if (*fmt == 'B')
96011 sprint_backtrace(sym, value);
96012- else if (*fmt != 'f' && *fmt != 's')
96013+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
96014 sprint_symbol(sym, value);
96015 else
96016 sprint_symbol_no_offset(sym, value);
96017@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
96018 return number(buf, end, num, spec);
96019 }
96020
96021+#ifdef CONFIG_GRKERNSEC_HIDESYM
96022+int kptr_restrict __read_mostly = 2;
96023+#else
96024 int kptr_restrict __read_mostly;
96025+#endif
96026
96027 /*
96028 * Show a '%p' thing. A kernel extension is that the '%p' is followed
96029@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
96030 *
96031 * - 'F' For symbolic function descriptor pointers with offset
96032 * - 'f' For simple symbolic function names without offset
96033+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
96034 * - 'S' For symbolic direct pointers with offset
96035 * - 's' For symbolic direct pointers without offset
96036+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
96037 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
96038 * - 'B' For backtraced symbolic direct pointers with offset
96039 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
96040@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96041
96042 if (!ptr && *fmt != 'K') {
96043 /*
96044- * Print (null) with the same width as a pointer so it makes
96045+ * Print (nil) with the same width as a pointer so it makes
96046 * tabular output look nice.
96047 */
96048 if (spec.field_width == -1)
96049 spec.field_width = default_width;
96050- return string(buf, end, "(null)", spec);
96051+ return string(buf, end, "(nil)", spec);
96052 }
96053
96054 switch (*fmt) {
96055@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96056 /* Fallthrough */
96057 case 'S':
96058 case 's':
96059+#ifdef CONFIG_GRKERNSEC_HIDESYM
96060+ break;
96061+#else
96062+ return symbol_string(buf, end, ptr, spec, fmt);
96063+#endif
96064+ case 'X':
96065+ ptr = dereference_function_descriptor(ptr);
96066+ case 'A':
96067 case 'B':
96068 return symbol_string(buf, end, ptr, spec, fmt);
96069 case 'R':
96070@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96071 va_end(va);
96072 return buf;
96073 }
96074+ case 'P':
96075+ break;
96076 case 'K':
96077 /*
96078 * %pK cannot be used in IRQ context because its test
96079@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96080 ((const struct file *)ptr)->f_path.dentry,
96081 spec, fmt);
96082 }
96083+
96084+#ifdef CONFIG_GRKERNSEC_HIDESYM
96085+ /* 'P' = approved pointers to copy to userland,
96086+ as in the /proc/kallsyms case, as we make it display nothing
96087+ for non-root users, and the real contents for root users
96088+ 'X' = approved simple symbols
96089+ Also ignore 'K' pointers, since we force their NULLing for non-root users
96090+ above
96091+ */
96092+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
96093+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
96094+ dump_stack();
96095+ ptr = NULL;
96096+ }
96097+#endif
96098+
96099 spec.flags |= SMALL;
96100 if (spec.field_width == -1) {
96101 spec.field_width = default_width;
96102@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96103 typeof(type) value; \
96104 if (sizeof(type) == 8) { \
96105 args = PTR_ALIGN(args, sizeof(u32)); \
96106- *(u32 *)&value = *(u32 *)args; \
96107- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
96108+ *(u32 *)&value = *(const u32 *)args; \
96109+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
96110 } else { \
96111 args = PTR_ALIGN(args, sizeof(type)); \
96112- value = *(typeof(type) *)args; \
96113+ value = *(const typeof(type) *)args; \
96114 } \
96115 args += sizeof(type); \
96116 value; \
96117@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96118 case FORMAT_TYPE_STR: {
96119 const char *str_arg = args;
96120 args += strlen(str_arg) + 1;
96121- str = string(str, end, (char *)str_arg, spec);
96122+ str = string(str, end, str_arg, spec);
96123 break;
96124 }
96125
96126diff --git a/localversion-grsec b/localversion-grsec
96127new file mode 100644
96128index 0000000..7cd6065
96129--- /dev/null
96130+++ b/localversion-grsec
96131@@ -0,0 +1 @@
96132+-grsec
96133diff --git a/mm/Kconfig b/mm/Kconfig
96134index a03131b..1b1bafb 100644
96135--- a/mm/Kconfig
96136+++ b/mm/Kconfig
96137@@ -342,10 +342,11 @@ config KSM
96138 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
96139
96140 config DEFAULT_MMAP_MIN_ADDR
96141- int "Low address space to protect from user allocation"
96142+ int "Low address space to protect from user allocation"
96143 depends on MMU
96144- default 4096
96145- help
96146+ default 32768 if ALPHA || ARM || PARISC || SPARC32
96147+ default 65536
96148+ help
96149 This is the portion of low virtual memory which should be protected
96150 from userspace allocation. Keeping a user from writing to low pages
96151 can help reduce the impact of kernel NULL pointer bugs.
96152@@ -376,7 +377,7 @@ config MEMORY_FAILURE
96153
96154 config HWPOISON_INJECT
96155 tristate "HWPoison pages injector"
96156- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
96157+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
96158 select PROC_PAGE_MONITOR
96159
96160 config NOMMU_INITIAL_TRIM_EXCESS
96161diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
96162index 957d3da..1d34e20 100644
96163--- a/mm/Kconfig.debug
96164+++ b/mm/Kconfig.debug
96165@@ -10,6 +10,7 @@ config PAGE_EXTENSION
96166 config DEBUG_PAGEALLOC
96167 bool "Debug page memory allocations"
96168 depends on DEBUG_KERNEL
96169+ depends on !PAX_MEMORY_SANITIZE
96170 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
96171 depends on !KMEMCHECK
96172 select PAGE_EXTENSION
96173diff --git a/mm/backing-dev.c b/mm/backing-dev.c
96174index 6dc4580..e031ec1 100644
96175--- a/mm/backing-dev.c
96176+++ b/mm/backing-dev.c
96177@@ -12,7 +12,7 @@
96178 #include <linux/device.h>
96179 #include <trace/events/writeback.h>
96180
96181-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
96182+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
96183
96184 struct backing_dev_info noop_backing_dev_info = {
96185 .name = "noop",
96186@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
96187 return err;
96188
96189 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
96190- atomic_long_inc_return(&bdi_seq));
96191+ atomic_long_inc_return_unchecked(&bdi_seq));
96192 if (err) {
96193 bdi_destroy(bdi);
96194 return err;
96195diff --git a/mm/filemap.c b/mm/filemap.c
96196index ad72420..0a20ef2 100644
96197--- a/mm/filemap.c
96198+++ b/mm/filemap.c
96199@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
96200 struct address_space *mapping = file->f_mapping;
96201
96202 if (!mapping->a_ops->readpage)
96203- return -ENOEXEC;
96204+ return -ENODEV;
96205 file_accessed(file);
96206 vma->vm_ops = &generic_file_vm_ops;
96207 return 0;
96208@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
96209 *pos = i_size_read(inode);
96210
96211 if (limit != RLIM_INFINITY) {
96212+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
96213 if (*pos >= limit) {
96214 send_sig(SIGXFSZ, current, 0);
96215 return -EFBIG;
96216diff --git a/mm/gup.c b/mm/gup.c
96217index a6e24e2..72dd2cf 100644
96218--- a/mm/gup.c
96219+++ b/mm/gup.c
96220@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
96221 unsigned int fault_flags = 0;
96222 int ret;
96223
96224- /* For mlock, just skip the stack guard page. */
96225- if ((*flags & FOLL_MLOCK) &&
96226- (stack_guard_page_start(vma, address) ||
96227- stack_guard_page_end(vma, address + PAGE_SIZE)))
96228- return -ENOENT;
96229 if (*flags & FOLL_WRITE)
96230 fault_flags |= FAULT_FLAG_WRITE;
96231 if (nonblocking)
96232@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96233 if (!(gup_flags & FOLL_FORCE))
96234 gup_flags |= FOLL_NUMA;
96235
96236- do {
96237+ while (nr_pages) {
96238 struct page *page;
96239 unsigned int foll_flags = gup_flags;
96240 unsigned int page_increm;
96241
96242 /* first iteration or cross vma bound */
96243 if (!vma || start >= vma->vm_end) {
96244- vma = find_extend_vma(mm, start);
96245+ vma = find_vma(mm, start);
96246 if (!vma && in_gate_area(mm, start)) {
96247 int ret;
96248 ret = get_gate_page(mm, start & PAGE_MASK,
96249@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96250 goto next_page;
96251 }
96252
96253- if (!vma || check_vma_flags(vma, gup_flags))
96254+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
96255 return i ? : -EFAULT;
96256 if (is_vm_hugetlb_page(vma)) {
96257 i = follow_hugetlb_page(mm, vma, pages, vmas,
96258@@ -509,7 +504,7 @@ next_page:
96259 i += page_increm;
96260 start += page_increm * PAGE_SIZE;
96261 nr_pages -= page_increm;
96262- } while (nr_pages);
96263+ }
96264 return i;
96265 }
96266 EXPORT_SYMBOL(__get_user_pages);
96267diff --git a/mm/highmem.c b/mm/highmem.c
96268index 123bcd3..0de52ba 100644
96269--- a/mm/highmem.c
96270+++ b/mm/highmem.c
96271@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
96272 * So no dangers, even with speculative execution.
96273 */
96274 page = pte_page(pkmap_page_table[i]);
96275+ pax_open_kernel();
96276 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
96277-
96278+ pax_close_kernel();
96279 set_page_address(page, NULL);
96280 need_flush = 1;
96281 }
96282@@ -259,9 +260,11 @@ start:
96283 }
96284 }
96285 vaddr = PKMAP_ADDR(last_pkmap_nr);
96286+
96287+ pax_open_kernel();
96288 set_pte_at(&init_mm, vaddr,
96289 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
96290-
96291+ pax_close_kernel();
96292 pkmap_count[last_pkmap_nr] = 1;
96293 set_page_address(page, (void *)vaddr);
96294
96295diff --git a/mm/hugetlb.c b/mm/hugetlb.c
96296index c41b2a0..100cf92 100644
96297--- a/mm/hugetlb.c
96298+++ b/mm/hugetlb.c
96299@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96300 struct ctl_table *table, int write,
96301 void __user *buffer, size_t *length, loff_t *ppos)
96302 {
96303+ ctl_table_no_const t;
96304 struct hstate *h = &default_hstate;
96305 unsigned long tmp = h->max_huge_pages;
96306 int ret;
96307@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96308 if (!hugepages_supported())
96309 return -ENOTSUPP;
96310
96311- table->data = &tmp;
96312- table->maxlen = sizeof(unsigned long);
96313- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96314+ t = *table;
96315+ t.data = &tmp;
96316+ t.maxlen = sizeof(unsigned long);
96317+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
96318 if (ret)
96319 goto out;
96320
96321@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96322 struct hstate *h = &default_hstate;
96323 unsigned long tmp;
96324 int ret;
96325+ ctl_table_no_const hugetlb_table;
96326
96327 if (!hugepages_supported())
96328 return -ENOTSUPP;
96329@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96330 if (write && hstate_is_gigantic(h))
96331 return -EINVAL;
96332
96333- table->data = &tmp;
96334- table->maxlen = sizeof(unsigned long);
96335- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96336+ hugetlb_table = *table;
96337+ hugetlb_table.data = &tmp;
96338+ hugetlb_table.maxlen = sizeof(unsigned long);
96339+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
96340 if (ret)
96341 goto out;
96342
96343@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
96344 i_mmap_unlock_write(mapping);
96345 }
96346
96347+#ifdef CONFIG_PAX_SEGMEXEC
96348+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
96349+{
96350+ struct mm_struct *mm = vma->vm_mm;
96351+ struct vm_area_struct *vma_m;
96352+ unsigned long address_m;
96353+ pte_t *ptep_m;
96354+
96355+ vma_m = pax_find_mirror_vma(vma);
96356+ if (!vma_m)
96357+ return;
96358+
96359+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96360+ address_m = address + SEGMEXEC_TASK_SIZE;
96361+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
96362+ get_page(page_m);
96363+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
96364+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
96365+}
96366+#endif
96367+
96368 /*
96369 * Hugetlb_cow() should be called with page lock of the original hugepage held.
96370 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
96371@@ -2912,6 +2937,11 @@ retry_avoidcopy:
96372 make_huge_pte(vma, new_page, 1));
96373 page_remove_rmap(old_page);
96374 hugepage_add_new_anon_rmap(new_page, vma, address);
96375+
96376+#ifdef CONFIG_PAX_SEGMEXEC
96377+ pax_mirror_huge_pte(vma, address, new_page);
96378+#endif
96379+
96380 /* Make the old page be freed below */
96381 new_page = old_page;
96382 }
96383@@ -3072,6 +3102,10 @@ retry:
96384 && (vma->vm_flags & VM_SHARED)));
96385 set_huge_pte_at(mm, address, ptep, new_pte);
96386
96387+#ifdef CONFIG_PAX_SEGMEXEC
96388+ pax_mirror_huge_pte(vma, address, page);
96389+#endif
96390+
96391 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
96392 /* Optimization, do the COW without a second fault */
96393 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
96394@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96395 struct address_space *mapping;
96396 int need_wait_lock = 0;
96397
96398+#ifdef CONFIG_PAX_SEGMEXEC
96399+ struct vm_area_struct *vma_m;
96400+#endif
96401+
96402 address &= huge_page_mask(h);
96403
96404 ptep = huge_pte_offset(mm, address);
96405@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96406 VM_FAULT_SET_HINDEX(hstate_index(h));
96407 }
96408
96409+#ifdef CONFIG_PAX_SEGMEXEC
96410+ vma_m = pax_find_mirror_vma(vma);
96411+ if (vma_m) {
96412+ unsigned long address_m;
96413+
96414+ if (vma->vm_start > vma_m->vm_start) {
96415+ address_m = address;
96416+ address -= SEGMEXEC_TASK_SIZE;
96417+ vma = vma_m;
96418+ h = hstate_vma(vma);
96419+ } else
96420+ address_m = address + SEGMEXEC_TASK_SIZE;
96421+
96422+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
96423+ return VM_FAULT_OOM;
96424+ address_m &= HPAGE_MASK;
96425+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
96426+ }
96427+#endif
96428+
96429 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
96430 if (!ptep)
96431 return VM_FAULT_OOM;
96432diff --git a/mm/internal.h b/mm/internal.h
96433index a96da5b..42ebd54 100644
96434--- a/mm/internal.h
96435+++ b/mm/internal.h
96436@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
96437
96438 extern int __isolate_free_page(struct page *page, unsigned int order);
96439 extern void __free_pages_bootmem(struct page *page, unsigned int order);
96440+extern void free_compound_page(struct page *page);
96441 extern void prep_compound_page(struct page *page, unsigned long order);
96442 #ifdef CONFIG_MEMORY_FAILURE
96443 extern bool is_free_buddy_page(struct page *page);
96444@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
96445
96446 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
96447 unsigned long, unsigned long,
96448- unsigned long, unsigned long);
96449+ unsigned long, unsigned long) __intentional_overflow(-1);
96450
96451 extern void set_pageblock_order(void);
96452 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
96453diff --git a/mm/kmemleak.c b/mm/kmemleak.c
96454index 5405aff..483406d 100644
96455--- a/mm/kmemleak.c
96456+++ b/mm/kmemleak.c
96457@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
96458
96459 for (i = 0; i < object->trace_len; i++) {
96460 void *ptr = (void *)object->trace[i];
96461- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
96462+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
96463 }
96464 }
96465
96466@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
96467 return -ENOMEM;
96468 }
96469
96470- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
96471+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
96472 &kmemleak_fops);
96473 if (!dentry)
96474 pr_warning("Failed to create the debugfs kmemleak file\n");
96475diff --git a/mm/maccess.c b/mm/maccess.c
96476index d53adf9..03a24bf 100644
96477--- a/mm/maccess.c
96478+++ b/mm/maccess.c
96479@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
96480 set_fs(KERNEL_DS);
96481 pagefault_disable();
96482 ret = __copy_from_user_inatomic(dst,
96483- (__force const void __user *)src, size);
96484+ (const void __force_user *)src, size);
96485 pagefault_enable();
96486 set_fs(old_fs);
96487
96488@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
96489
96490 set_fs(KERNEL_DS);
96491 pagefault_disable();
96492- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96493+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96494 pagefault_enable();
96495 set_fs(old_fs);
96496
96497diff --git a/mm/madvise.c b/mm/madvise.c
96498index d551475..8fdd7f3 100644
96499--- a/mm/madvise.c
96500+++ b/mm/madvise.c
96501@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
96502 pgoff_t pgoff;
96503 unsigned long new_flags = vma->vm_flags;
96504
96505+#ifdef CONFIG_PAX_SEGMEXEC
96506+ struct vm_area_struct *vma_m;
96507+#endif
96508+
96509 switch (behavior) {
96510 case MADV_NORMAL:
96511 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96512@@ -126,6 +130,13 @@ success:
96513 /*
96514 * vm_flags is protected by the mmap_sem held in write mode.
96515 */
96516+
96517+#ifdef CONFIG_PAX_SEGMEXEC
96518+ vma_m = pax_find_mirror_vma(vma);
96519+ if (vma_m)
96520+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96521+#endif
96522+
96523 vma->vm_flags = new_flags;
96524
96525 out:
96526@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96527 struct vm_area_struct **prev,
96528 unsigned long start, unsigned long end)
96529 {
96530+
96531+#ifdef CONFIG_PAX_SEGMEXEC
96532+ struct vm_area_struct *vma_m;
96533+#endif
96534+
96535 *prev = vma;
96536 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96537 return -EINVAL;
96538
96539 zap_page_range(vma, start, end - start, NULL);
96540+
96541+#ifdef CONFIG_PAX_SEGMEXEC
96542+ vma_m = pax_find_mirror_vma(vma);
96543+ if (vma_m) {
96544+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96545+ return -EINVAL;
96546+
96547+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96548+ }
96549+#endif
96550+
96551 return 0;
96552 }
96553
96554@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96555 if (end < start)
96556 return error;
96557
96558+#ifdef CONFIG_PAX_SEGMEXEC
96559+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96560+ if (end > SEGMEXEC_TASK_SIZE)
96561+ return error;
96562+ } else
96563+#endif
96564+
96565+ if (end > TASK_SIZE)
96566+ return error;
96567+
96568 error = 0;
96569 if (end == start)
96570 return error;
96571diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96572index d487f8d..39ebbf6 100644
96573--- a/mm/memory-failure.c
96574+++ b/mm/memory-failure.c
96575@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96576
96577 int sysctl_memory_failure_recovery __read_mostly = 1;
96578
96579-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96580+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96581
96582 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96583
96584@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96585 pfn, t->comm, t->pid);
96586 si.si_signo = SIGBUS;
96587 si.si_errno = 0;
96588- si.si_addr = (void *)addr;
96589+ si.si_addr = (void __user *)addr;
96590 #ifdef __ARCH_SI_TRAPNO
96591 si.si_trapno = trapno;
96592 #endif
96593@@ -779,7 +779,7 @@ static struct page_state {
96594 unsigned long res;
96595 char *msg;
96596 int (*action)(struct page *p, unsigned long pfn);
96597-} error_states[] = {
96598+} __do_const error_states[] = {
96599 { reserved, reserved, "reserved kernel", me_kernel },
96600 /*
96601 * free pages are specially detected outside this table:
96602@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96603 nr_pages = 1 << compound_order(hpage);
96604 else /* normal page or thp */
96605 nr_pages = 1;
96606- atomic_long_add(nr_pages, &num_poisoned_pages);
96607+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96608
96609 /*
96610 * We need/can do nothing about count=0 pages.
96611@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96612 if (PageHWPoison(hpage)) {
96613 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96614 || (p != hpage && TestSetPageHWPoison(hpage))) {
96615- atomic_long_sub(nr_pages, &num_poisoned_pages);
96616+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96617 unlock_page(hpage);
96618 return 0;
96619 }
96620@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96621 */
96622 if (!PageHWPoison(p)) {
96623 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96624- atomic_long_sub(nr_pages, &num_poisoned_pages);
96625+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96626 put_page(hpage);
96627 res = 0;
96628 goto out;
96629 }
96630 if (hwpoison_filter(p)) {
96631 if (TestClearPageHWPoison(p))
96632- atomic_long_sub(nr_pages, &num_poisoned_pages);
96633+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96634 unlock_page(hpage);
96635 put_page(hpage);
96636 return 0;
96637@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
96638 return 0;
96639 }
96640 if (TestClearPageHWPoison(p))
96641- atomic_long_dec(&num_poisoned_pages);
96642+ atomic_long_dec_unchecked(&num_poisoned_pages);
96643 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96644 return 0;
96645 }
96646@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
96647 */
96648 if (TestClearPageHWPoison(page)) {
96649 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96650- atomic_long_sub(nr_pages, &num_poisoned_pages);
96651+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96652 freeit = 1;
96653 if (PageHuge(page))
96654 clear_page_hwpoison_huge_page(page);
96655@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96656 if (PageHuge(page)) {
96657 set_page_hwpoison_huge_page(hpage);
96658 dequeue_hwpoisoned_huge_page(hpage);
96659- atomic_long_add(1 << compound_order(hpage),
96660+ atomic_long_add_unchecked(1 << compound_order(hpage),
96661 &num_poisoned_pages);
96662 } else {
96663 SetPageHWPoison(page);
96664- atomic_long_inc(&num_poisoned_pages);
96665+ atomic_long_inc_unchecked(&num_poisoned_pages);
96666 }
96667 }
96668 return ret;
96669@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
96670 put_page(page);
96671 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96672 SetPageHWPoison(page);
96673- atomic_long_inc(&num_poisoned_pages);
96674+ atomic_long_inc_unchecked(&num_poisoned_pages);
96675 return 0;
96676 }
96677
96678@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
96679 if (!is_free_buddy_page(page))
96680 pr_info("soft offline: %#lx: page leaked\n",
96681 pfn);
96682- atomic_long_inc(&num_poisoned_pages);
96683+ atomic_long_inc_unchecked(&num_poisoned_pages);
96684 }
96685 } else {
96686 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96687@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
96688 if (PageHuge(page)) {
96689 set_page_hwpoison_huge_page(hpage);
96690 dequeue_hwpoisoned_huge_page(hpage);
96691- atomic_long_add(1 << compound_order(hpage),
96692+ atomic_long_add_unchecked(1 << compound_order(hpage),
96693 &num_poisoned_pages);
96694 } else {
96695 SetPageHWPoison(page);
96696- atomic_long_inc(&num_poisoned_pages);
96697+ atomic_long_inc_unchecked(&num_poisoned_pages);
96698 }
96699 }
96700 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96701diff --git a/mm/memory.c b/mm/memory.c
96702index 97839f5..4bc5530 100644
96703--- a/mm/memory.c
96704+++ b/mm/memory.c
96705@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96706 free_pte_range(tlb, pmd, addr);
96707 } while (pmd++, addr = next, addr != end);
96708
96709+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96710 start &= PUD_MASK;
96711 if (start < floor)
96712 return;
96713@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96714 pud_clear(pud);
96715 pmd_free_tlb(tlb, pmd, start);
96716 mm_dec_nr_pmds(tlb->mm);
96717+#endif
96718 }
96719
96720 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96721@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96722 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96723 } while (pud++, addr = next, addr != end);
96724
96725+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96726 start &= PGDIR_MASK;
96727 if (start < floor)
96728 return;
96729@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96730 pud = pud_offset(pgd, start);
96731 pgd_clear(pgd);
96732 pud_free_tlb(tlb, pud, start);
96733+#endif
96734+
96735 }
96736
96737 /*
96738@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96739 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96740 */
96741 if (vma->vm_ops)
96742- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96743+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96744 vma->vm_ops->fault);
96745 if (vma->vm_file)
96746- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96747+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96748 vma->vm_file->f_op->mmap);
96749 dump_stack();
96750 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96751@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96752 page_add_file_rmap(page);
96753 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96754
96755+#ifdef CONFIG_PAX_SEGMEXEC
96756+ pax_mirror_file_pte(vma, addr, page, ptl);
96757+#endif
96758+
96759 retval = 0;
96760 pte_unmap_unlock(pte, ptl);
96761 return retval;
96762@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96763 if (!page_count(page))
96764 return -EINVAL;
96765 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96766+
96767+#ifdef CONFIG_PAX_SEGMEXEC
96768+ struct vm_area_struct *vma_m;
96769+#endif
96770+
96771 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96772 BUG_ON(vma->vm_flags & VM_PFNMAP);
96773 vma->vm_flags |= VM_MIXEDMAP;
96774+
96775+#ifdef CONFIG_PAX_SEGMEXEC
96776+ vma_m = pax_find_mirror_vma(vma);
96777+ if (vma_m)
96778+ vma_m->vm_flags |= VM_MIXEDMAP;
96779+#endif
96780+
96781 }
96782 return insert_page(vma, addr, page, vma->vm_page_prot);
96783 }
96784@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96785 unsigned long pfn)
96786 {
96787 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96788+ BUG_ON(vma->vm_mirror);
96789
96790 if (addr < vma->vm_start || addr >= vma->vm_end)
96791 return -EFAULT;
96792@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96793
96794 BUG_ON(pud_huge(*pud));
96795
96796- pmd = pmd_alloc(mm, pud, addr);
96797+ pmd = (mm == &init_mm) ?
96798+ pmd_alloc_kernel(mm, pud, addr) :
96799+ pmd_alloc(mm, pud, addr);
96800 if (!pmd)
96801 return -ENOMEM;
96802 do {
96803@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96804 unsigned long next;
96805 int err;
96806
96807- pud = pud_alloc(mm, pgd, addr);
96808+ pud = (mm == &init_mm) ?
96809+ pud_alloc_kernel(mm, pgd, addr) :
96810+ pud_alloc(mm, pgd, addr);
96811 if (!pud)
96812 return -ENOMEM;
96813 do {
96814@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96815 return ret;
96816 }
96817
96818+#ifdef CONFIG_PAX_SEGMEXEC
96819+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96820+{
96821+ struct mm_struct *mm = vma->vm_mm;
96822+ spinlock_t *ptl;
96823+ pte_t *pte, entry;
96824+
96825+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96826+ entry = *pte;
96827+ if (!pte_present(entry)) {
96828+ if (!pte_none(entry)) {
96829+ free_swap_and_cache(pte_to_swp_entry(entry));
96830+ pte_clear_not_present_full(mm, address, pte, 0);
96831+ }
96832+ } else {
96833+ struct page *page;
96834+
96835+ flush_cache_page(vma, address, pte_pfn(entry));
96836+ entry = ptep_clear_flush(vma, address, pte);
96837+ BUG_ON(pte_dirty(entry));
96838+ page = vm_normal_page(vma, address, entry);
96839+ if (page) {
96840+ update_hiwater_rss(mm);
96841+ if (PageAnon(page))
96842+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96843+ else
96844+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96845+ page_remove_rmap(page);
96846+ page_cache_release(page);
96847+ }
96848+ }
96849+ pte_unmap_unlock(pte, ptl);
96850+}
96851+
96852+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96853+ *
96854+ * the ptl of the lower mapped page is held on entry and is not released on exit
96855+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96856+ */
96857+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96858+{
96859+ struct mm_struct *mm = vma->vm_mm;
96860+ unsigned long address_m;
96861+ spinlock_t *ptl_m;
96862+ struct vm_area_struct *vma_m;
96863+ pmd_t *pmd_m;
96864+ pte_t *pte_m, entry_m;
96865+
96866+ BUG_ON(!page_m || !PageAnon(page_m));
96867+
96868+ vma_m = pax_find_mirror_vma(vma);
96869+ if (!vma_m)
96870+ return;
96871+
96872+ BUG_ON(!PageLocked(page_m));
96873+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96874+ address_m = address + SEGMEXEC_TASK_SIZE;
96875+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96876+ pte_m = pte_offset_map(pmd_m, address_m);
96877+ ptl_m = pte_lockptr(mm, pmd_m);
96878+ if (ptl != ptl_m) {
96879+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96880+ if (!pte_none(*pte_m))
96881+ goto out;
96882+ }
96883+
96884+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96885+ page_cache_get(page_m);
96886+ page_add_anon_rmap(page_m, vma_m, address_m);
96887+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96888+ set_pte_at(mm, address_m, pte_m, entry_m);
96889+ update_mmu_cache(vma_m, address_m, pte_m);
96890+out:
96891+ if (ptl != ptl_m)
96892+ spin_unlock(ptl_m);
96893+ pte_unmap(pte_m);
96894+ unlock_page(page_m);
96895+}
96896+
96897+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96898+{
96899+ struct mm_struct *mm = vma->vm_mm;
96900+ unsigned long address_m;
96901+ spinlock_t *ptl_m;
96902+ struct vm_area_struct *vma_m;
96903+ pmd_t *pmd_m;
96904+ pte_t *pte_m, entry_m;
96905+
96906+ BUG_ON(!page_m || PageAnon(page_m));
96907+
96908+ vma_m = pax_find_mirror_vma(vma);
96909+ if (!vma_m)
96910+ return;
96911+
96912+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96913+ address_m = address + SEGMEXEC_TASK_SIZE;
96914+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96915+ pte_m = pte_offset_map(pmd_m, address_m);
96916+ ptl_m = pte_lockptr(mm, pmd_m);
96917+ if (ptl != ptl_m) {
96918+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96919+ if (!pte_none(*pte_m))
96920+ goto out;
96921+ }
96922+
96923+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96924+ page_cache_get(page_m);
96925+ page_add_file_rmap(page_m);
96926+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96927+ set_pte_at(mm, address_m, pte_m, entry_m);
96928+ update_mmu_cache(vma_m, address_m, pte_m);
96929+out:
96930+ if (ptl != ptl_m)
96931+ spin_unlock(ptl_m);
96932+ pte_unmap(pte_m);
96933+}
96934+
96935+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96936+{
96937+ struct mm_struct *mm = vma->vm_mm;
96938+ unsigned long address_m;
96939+ spinlock_t *ptl_m;
96940+ struct vm_area_struct *vma_m;
96941+ pmd_t *pmd_m;
96942+ pte_t *pte_m, entry_m;
96943+
96944+ vma_m = pax_find_mirror_vma(vma);
96945+ if (!vma_m)
96946+ return;
96947+
96948+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96949+ address_m = address + SEGMEXEC_TASK_SIZE;
96950+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96951+ pte_m = pte_offset_map(pmd_m, address_m);
96952+ ptl_m = pte_lockptr(mm, pmd_m);
96953+ if (ptl != ptl_m) {
96954+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96955+ if (!pte_none(*pte_m))
96956+ goto out;
96957+ }
96958+
96959+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96960+ set_pte_at(mm, address_m, pte_m, entry_m);
96961+out:
96962+ if (ptl != ptl_m)
96963+ spin_unlock(ptl_m);
96964+ pte_unmap(pte_m);
96965+}
96966+
96967+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96968+{
96969+ struct page *page_m;
96970+ pte_t entry;
96971+
96972+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96973+ goto out;
96974+
96975+ entry = *pte;
96976+ page_m = vm_normal_page(vma, address, entry);
96977+ if (!page_m)
96978+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96979+ else if (PageAnon(page_m)) {
96980+ if (pax_find_mirror_vma(vma)) {
96981+ pte_unmap_unlock(pte, ptl);
96982+ lock_page(page_m);
96983+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96984+ if (pte_same(entry, *pte))
96985+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96986+ else
96987+ unlock_page(page_m);
96988+ }
96989+ } else
96990+ pax_mirror_file_pte(vma, address, page_m, ptl);
96991+
96992+out:
96993+ pte_unmap_unlock(pte, ptl);
96994+}
96995+#endif
96996+
96997 /*
96998 * This routine handles present pages, when users try to write
96999 * to a shared page. It is done by copying the page to a new address
97000@@ -2172,6 +2377,12 @@ gotten:
97001 */
97002 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97003 if (likely(pte_same(*page_table, orig_pte))) {
97004+
97005+#ifdef CONFIG_PAX_SEGMEXEC
97006+ if (pax_find_mirror_vma(vma))
97007+ BUG_ON(!trylock_page(new_page));
97008+#endif
97009+
97010 if (old_page) {
97011 if (!PageAnon(old_page)) {
97012 dec_mm_counter_fast(mm, MM_FILEPAGES);
97013@@ -2225,6 +2436,10 @@ gotten:
97014 page_remove_rmap(old_page);
97015 }
97016
97017+#ifdef CONFIG_PAX_SEGMEXEC
97018+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97019+#endif
97020+
97021 /* Free the old page.. */
97022 new_page = old_page;
97023 ret |= VM_FAULT_WRITE;
97024@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97025 swap_free(entry);
97026 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
97027 try_to_free_swap(page);
97028+
97029+#ifdef CONFIG_PAX_SEGMEXEC
97030+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
97031+#endif
97032+
97033 unlock_page(page);
97034 if (page != swapcache) {
97035 /*
97036@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97037
97038 /* No need to invalidate - it was non-present before */
97039 update_mmu_cache(vma, address, page_table);
97040+
97041+#ifdef CONFIG_PAX_SEGMEXEC
97042+ pax_mirror_anon_pte(vma, address, page, ptl);
97043+#endif
97044+
97045 unlock:
97046 pte_unmap_unlock(page_table, ptl);
97047 out:
97048@@ -2525,40 +2750,6 @@ out_release:
97049 }
97050
97051 /*
97052- * This is like a special single-page "expand_{down|up}wards()",
97053- * except we must first make sure that 'address{-|+}PAGE_SIZE'
97054- * doesn't hit another vma.
97055- */
97056-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
97057-{
97058- address &= PAGE_MASK;
97059- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
97060- struct vm_area_struct *prev = vma->vm_prev;
97061-
97062- /*
97063- * Is there a mapping abutting this one below?
97064- *
97065- * That's only ok if it's the same stack mapping
97066- * that has gotten split..
97067- */
97068- if (prev && prev->vm_end == address)
97069- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
97070-
97071- return expand_downwards(vma, address - PAGE_SIZE);
97072- }
97073- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
97074- struct vm_area_struct *next = vma->vm_next;
97075-
97076- /* As VM_GROWSDOWN but s/below/above/ */
97077- if (next && next->vm_start == address + PAGE_SIZE)
97078- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
97079-
97080- return expand_upwards(vma, address + PAGE_SIZE);
97081- }
97082- return 0;
97083-}
97084-
97085-/*
97086 * We enter with non-exclusive mmap_sem (to exclude vma changes,
97087 * but allow concurrent faults), and pte mapped but not yet locked.
97088 * We return with mmap_sem still held, but pte unmapped and unlocked.
97089@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97090 unsigned int flags)
97091 {
97092 struct mem_cgroup *memcg;
97093- struct page *page;
97094+ struct page *page = NULL;
97095 spinlock_t *ptl;
97096 pte_t entry;
97097
97098- pte_unmap(page_table);
97099-
97100- /* Check if we need to add a guard page to the stack */
97101- if (check_stack_guard_page(vma, address) < 0)
97102- return VM_FAULT_SIGSEGV;
97103-
97104- /* Use the zero-page for reads */
97105 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
97106 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
97107 vma->vm_page_prot));
97108- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97109+ ptl = pte_lockptr(mm, pmd);
97110+ spin_lock(ptl);
97111 if (!pte_none(*page_table))
97112 goto unlock;
97113 goto setpte;
97114 }
97115
97116 /* Allocate our own private page. */
97117+ pte_unmap(page_table);
97118+
97119 if (unlikely(anon_vma_prepare(vma)))
97120 goto oom;
97121 page = alloc_zeroed_user_highpage_movable(vma, address);
97122@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97123 if (!pte_none(*page_table))
97124 goto release;
97125
97126+#ifdef CONFIG_PAX_SEGMEXEC
97127+ if (pax_find_mirror_vma(vma))
97128+ BUG_ON(!trylock_page(page));
97129+#endif
97130+
97131 inc_mm_counter_fast(mm, MM_ANONPAGES);
97132 page_add_new_anon_rmap(page, vma, address);
97133 mem_cgroup_commit_charge(page, memcg, false);
97134@@ -2621,6 +2813,12 @@ setpte:
97135
97136 /* No need to invalidate - it was non-present before */
97137 update_mmu_cache(vma, address, page_table);
97138+
97139+#ifdef CONFIG_PAX_SEGMEXEC
97140+ if (page)
97141+ pax_mirror_anon_pte(vma, address, page, ptl);
97142+#endif
97143+
97144 unlock:
97145 pte_unmap_unlock(page_table, ptl);
97146 return 0;
97147@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97148 return ret;
97149 }
97150 do_set_pte(vma, address, fault_page, pte, false, false);
97151+
97152+#ifdef CONFIG_PAX_SEGMEXEC
97153+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97154+#endif
97155+
97156 unlock_page(fault_page);
97157 unlock_out:
97158 pte_unmap_unlock(pte, ptl);
97159@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97160 }
97161 goto uncharge_out;
97162 }
97163+
97164+#ifdef CONFIG_PAX_SEGMEXEC
97165+ if (pax_find_mirror_vma(vma))
97166+ BUG_ON(!trylock_page(new_page));
97167+#endif
97168+
97169 do_set_pte(vma, address, new_page, pte, true, true);
97170+
97171+#ifdef CONFIG_PAX_SEGMEXEC
97172+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97173+#endif
97174+
97175 mem_cgroup_commit_charge(new_page, memcg, false);
97176 lru_cache_add_active_or_unevictable(new_page, vma);
97177 pte_unmap_unlock(pte, ptl);
97178@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97179 return ret;
97180 }
97181 do_set_pte(vma, address, fault_page, pte, true, false);
97182+
97183+#ifdef CONFIG_PAX_SEGMEXEC
97184+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97185+#endif
97186+
97187 pte_unmap_unlock(pte, ptl);
97188
97189 if (set_page_dirty(fault_page))
97190@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
97191 if (flags & FAULT_FLAG_WRITE)
97192 flush_tlb_fix_spurious_fault(vma, address);
97193 }
97194+
97195+#ifdef CONFIG_PAX_SEGMEXEC
97196+ pax_mirror_pte(vma, address, pte, pmd, ptl);
97197+ return 0;
97198+#endif
97199+
97200 unlock:
97201 pte_unmap_unlock(pte, ptl);
97202 return 0;
97203@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97204 pmd_t *pmd;
97205 pte_t *pte;
97206
97207+#ifdef CONFIG_PAX_SEGMEXEC
97208+ struct vm_area_struct *vma_m;
97209+#endif
97210+
97211 if (unlikely(is_vm_hugetlb_page(vma)))
97212 return hugetlb_fault(mm, vma, address, flags);
97213
97214+#ifdef CONFIG_PAX_SEGMEXEC
97215+ vma_m = pax_find_mirror_vma(vma);
97216+ if (vma_m) {
97217+ unsigned long address_m;
97218+ pgd_t *pgd_m;
97219+ pud_t *pud_m;
97220+ pmd_t *pmd_m;
97221+
97222+ if (vma->vm_start > vma_m->vm_start) {
97223+ address_m = address;
97224+ address -= SEGMEXEC_TASK_SIZE;
97225+ vma = vma_m;
97226+ } else
97227+ address_m = address + SEGMEXEC_TASK_SIZE;
97228+
97229+ pgd_m = pgd_offset(mm, address_m);
97230+ pud_m = pud_alloc(mm, pgd_m, address_m);
97231+ if (!pud_m)
97232+ return VM_FAULT_OOM;
97233+ pmd_m = pmd_alloc(mm, pud_m, address_m);
97234+ if (!pmd_m)
97235+ return VM_FAULT_OOM;
97236+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
97237+ return VM_FAULT_OOM;
97238+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
97239+ }
97240+#endif
97241+
97242 pgd = pgd_offset(mm, address);
97243 pud = pud_alloc(mm, pgd, address);
97244 if (!pud)
97245@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97246 spin_unlock(&mm->page_table_lock);
97247 return 0;
97248 }
97249+
97250+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97251+{
97252+ pud_t *new = pud_alloc_one(mm, address);
97253+ if (!new)
97254+ return -ENOMEM;
97255+
97256+ smp_wmb(); /* See comment in __pte_alloc */
97257+
97258+ spin_lock(&mm->page_table_lock);
97259+ if (pgd_present(*pgd)) /* Another has populated it */
97260+ pud_free(mm, new);
97261+ else
97262+ pgd_populate_kernel(mm, pgd, new);
97263+ spin_unlock(&mm->page_table_lock);
97264+ return 0;
97265+}
97266 #endif /* __PAGETABLE_PUD_FOLDED */
97267
97268 #ifndef __PAGETABLE_PMD_FOLDED
97269@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
97270 spin_unlock(&mm->page_table_lock);
97271 return 0;
97272 }
97273+
97274+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
97275+{
97276+ pmd_t *new = pmd_alloc_one(mm, address);
97277+ if (!new)
97278+ return -ENOMEM;
97279+
97280+ smp_wmb(); /* See comment in __pte_alloc */
97281+
97282+ spin_lock(&mm->page_table_lock);
97283+#ifndef __ARCH_HAS_4LEVEL_HACK
97284+ if (!pud_present(*pud)) {
97285+ mm_inc_nr_pmds(mm);
97286+ pud_populate_kernel(mm, pud, new);
97287+ } else /* Another has populated it */
97288+ pmd_free(mm, new);
97289+#else
97290+ if (!pgd_present(*pud)) {
97291+ mm_inc_nr_pmds(mm);
97292+ pgd_populate_kernel(mm, pud, new);
97293+ } else /* Another has populated it */
97294+ pmd_free(mm, new);
97295+#endif /* __ARCH_HAS_4LEVEL_HACK */
97296+ spin_unlock(&mm->page_table_lock);
97297+ return 0;
97298+}
97299 #endif /* __PAGETABLE_PMD_FOLDED */
97300
97301 static int __follow_pte(struct mm_struct *mm, unsigned long address,
97302@@ -3482,8 +3782,8 @@ out:
97303 return ret;
97304 }
97305
97306-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97307- void *buf, int len, int write)
97308+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97309+ void *buf, size_t len, int write)
97310 {
97311 resource_size_t phys_addr;
97312 unsigned long prot = 0;
97313@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
97314 * Access another process' address space as given in mm. If non-NULL, use the
97315 * given task for page fault accounting.
97316 */
97317-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97318- unsigned long addr, void *buf, int len, int write)
97319+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97320+ unsigned long addr, void *buf, size_t len, int write)
97321 {
97322 struct vm_area_struct *vma;
97323 void *old_buf = buf;
97324@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97325 down_read(&mm->mmap_sem);
97326 /* ignore errors, just check how much was successfully transferred */
97327 while (len) {
97328- int bytes, ret, offset;
97329+ ssize_t bytes, ret, offset;
97330 void *maddr;
97331 struct page *page = NULL;
97332
97333@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97334 *
97335 * The caller must hold a reference on @mm.
97336 */
97337-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97338- void *buf, int len, int write)
97339+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
97340+ void *buf, size_t len, int write)
97341 {
97342 return __access_remote_vm(NULL, mm, addr, buf, len, write);
97343 }
97344@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97345 * Source/target buffer must be kernel space,
97346 * Do not walk the page table directly, use get_user_pages
97347 */
97348-int access_process_vm(struct task_struct *tsk, unsigned long addr,
97349- void *buf, int len, int write)
97350+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
97351+ void *buf, size_t len, int write)
97352 {
97353 struct mm_struct *mm;
97354- int ret;
97355+ ssize_t ret;
97356
97357 mm = get_task_mm(tsk);
97358 if (!mm)
97359diff --git a/mm/mempolicy.c b/mm/mempolicy.c
97360index 4721046..6ae2056 100644
97361--- a/mm/mempolicy.c
97362+++ b/mm/mempolicy.c
97363@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97364 unsigned long vmstart;
97365 unsigned long vmend;
97366
97367+#ifdef CONFIG_PAX_SEGMEXEC
97368+ struct vm_area_struct *vma_m;
97369+#endif
97370+
97371 vma = find_vma(mm, start);
97372 if (!vma || vma->vm_start > start)
97373 return -EFAULT;
97374@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97375 err = vma_replace_policy(vma, new_pol);
97376 if (err)
97377 goto out;
97378+
97379+#ifdef CONFIG_PAX_SEGMEXEC
97380+ vma_m = pax_find_mirror_vma(vma);
97381+ if (vma_m) {
97382+ err = vma_replace_policy(vma_m, new_pol);
97383+ if (err)
97384+ goto out;
97385+ }
97386+#endif
97387+
97388 }
97389
97390 out:
97391@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
97392
97393 if (end < start)
97394 return -EINVAL;
97395+
97396+#ifdef CONFIG_PAX_SEGMEXEC
97397+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97398+ if (end > SEGMEXEC_TASK_SIZE)
97399+ return -EINVAL;
97400+ } else
97401+#endif
97402+
97403+ if (end > TASK_SIZE)
97404+ return -EINVAL;
97405+
97406 if (end == start)
97407 return 0;
97408
97409@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97410 */
97411 tcred = __task_cred(task);
97412 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97413- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97414- !capable(CAP_SYS_NICE)) {
97415+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97416 rcu_read_unlock();
97417 err = -EPERM;
97418 goto out_put;
97419@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97420 goto out;
97421 }
97422
97423+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97424+ if (mm != current->mm &&
97425+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
97426+ mmput(mm);
97427+ err = -EPERM;
97428+ goto out;
97429+ }
97430+#endif
97431+
97432 err = do_migrate_pages(mm, old, new,
97433 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
97434
97435diff --git a/mm/migrate.c b/mm/migrate.c
97436index 85e0426..be49beb 100644
97437--- a/mm/migrate.c
97438+++ b/mm/migrate.c
97439@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
97440 */
97441 tcred = __task_cred(task);
97442 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97443- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97444- !capable(CAP_SYS_NICE)) {
97445+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97446 rcu_read_unlock();
97447 err = -EPERM;
97448 goto out;
97449diff --git a/mm/mlock.c b/mm/mlock.c
97450index 8a54cd2..92f1747 100644
97451--- a/mm/mlock.c
97452+++ b/mm/mlock.c
97453@@ -14,6 +14,7 @@
97454 #include <linux/pagevec.h>
97455 #include <linux/mempolicy.h>
97456 #include <linux/syscalls.h>
97457+#include <linux/security.h>
97458 #include <linux/sched.h>
97459 #include <linux/export.h>
97460 #include <linux/rmap.h>
97461@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
97462 {
97463 unsigned long nstart, end, tmp;
97464 struct vm_area_struct * vma, * prev;
97465- int error;
97466+ int error = 0;
97467
97468 VM_BUG_ON(start & ~PAGE_MASK);
97469 VM_BUG_ON(len != PAGE_ALIGN(len));
97470@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
97471 return -EINVAL;
97472 if (end == start)
97473 return 0;
97474+ if (end > TASK_SIZE)
97475+ return -EINVAL;
97476+
97477 vma = find_vma(current->mm, start);
97478 if (!vma || vma->vm_start > start)
97479 return -ENOMEM;
97480@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
97481 for (nstart = start ; ; ) {
97482 vm_flags_t newflags;
97483
97484+#ifdef CONFIG_PAX_SEGMEXEC
97485+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97486+ break;
97487+#endif
97488+
97489 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
97490
97491 newflags = vma->vm_flags & ~VM_LOCKED;
97492@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
97493 locked += current->mm->locked_vm;
97494
97495 /* check against resource limits */
97496+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
97497 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
97498 error = do_mlock(start, len, 1);
97499
97500@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
97501 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
97502 vm_flags_t newflags;
97503
97504+#ifdef CONFIG_PAX_SEGMEXEC
97505+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97506+ break;
97507+#endif
97508+
97509 newflags = vma->vm_flags & ~VM_LOCKED;
97510 if (flags & MCL_CURRENT)
97511 newflags |= VM_LOCKED;
97512@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
97513 lock_limit >>= PAGE_SHIFT;
97514
97515 ret = -ENOMEM;
97516+
97517+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
97518+
97519 down_write(&current->mm->mmap_sem);
97520-
97521 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
97522 capable(CAP_IPC_LOCK))
97523 ret = do_mlockall(flags);
97524diff --git a/mm/mm_init.c b/mm/mm_init.c
97525index 5f420f7..dd42fb1b 100644
97526--- a/mm/mm_init.c
97527+++ b/mm/mm_init.c
97528@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
97529 return NOTIFY_OK;
97530 }
97531
97532-static struct notifier_block compute_batch_nb __meminitdata = {
97533+static struct notifier_block compute_batch_nb __meminitconst = {
97534 .notifier_call = mm_compute_batch_notifier,
97535 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
97536 };
97537diff --git a/mm/mmap.c b/mm/mmap.c
97538index 9ec50a3..0476e2d 100644
97539--- a/mm/mmap.c
97540+++ b/mm/mmap.c
97541@@ -41,6 +41,7 @@
97542 #include <linux/notifier.h>
97543 #include <linux/memory.h>
97544 #include <linux/printk.h>
97545+#include <linux/random.h>
97546
97547 #include <asm/uaccess.h>
97548 #include <asm/cacheflush.h>
97549@@ -57,6 +58,16 @@
97550 #define arch_rebalance_pgtables(addr, len) (addr)
97551 #endif
97552
97553+static inline void verify_mm_writelocked(struct mm_struct *mm)
97554+{
97555+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
97556+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97557+ up_read(&mm->mmap_sem);
97558+ BUG();
97559+ }
97560+#endif
97561+}
97562+
97563 static void unmap_region(struct mm_struct *mm,
97564 struct vm_area_struct *vma, struct vm_area_struct *prev,
97565 unsigned long start, unsigned long end);
97566@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
97567 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
97568 *
97569 */
97570-pgprot_t protection_map[16] = {
97571+pgprot_t protection_map[16] __read_only = {
97572 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97573 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
97574 };
97575
97576-pgprot_t vm_get_page_prot(unsigned long vm_flags)
97577+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
97578 {
97579- return __pgprot(pgprot_val(protection_map[vm_flags &
97580+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
97581 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
97582 pgprot_val(arch_vm_get_page_prot(vm_flags)));
97583+
97584+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97585+ if (!(__supported_pte_mask & _PAGE_NX) &&
97586+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97587+ (vm_flags & (VM_READ | VM_WRITE)))
97588+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97589+#endif
97590+
97591+ return prot;
97592 }
97593 EXPORT_SYMBOL(vm_get_page_prot);
97594
97595@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97596 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97597 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97598 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97599+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97600 /*
97601 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97602 * other variables. It can be updated by several CPUs frequently.
97603@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97604 struct vm_area_struct *next = vma->vm_next;
97605
97606 might_sleep();
97607+ BUG_ON(vma->vm_mirror);
97608 if (vma->vm_ops && vma->vm_ops->close)
97609 vma->vm_ops->close(vma);
97610 if (vma->vm_file)
97611@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
97612
97613 SYSCALL_DEFINE1(brk, unsigned long, brk)
97614 {
97615+ unsigned long rlim;
97616 unsigned long retval;
97617 unsigned long newbrk, oldbrk;
97618 struct mm_struct *mm = current->mm;
97619@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97620 * segment grow beyond its set limit the in case where the limit is
97621 * not page aligned -Ram Gupta
97622 */
97623- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
97624+ rlim = rlimit(RLIMIT_DATA);
97625+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97626+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97627+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97628+ rlim = 4096 * PAGE_SIZE;
97629+#endif
97630+ if (check_data_rlimit(rlim, brk, mm->start_brk,
97631 mm->end_data, mm->start_data))
97632 goto out;
97633
97634@@ -967,6 +996,12 @@ static int
97635 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97636 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97637 {
97638+
97639+#ifdef CONFIG_PAX_SEGMEXEC
97640+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97641+ return 0;
97642+#endif
97643+
97644 if (is_mergeable_vma(vma, file, vm_flags) &&
97645 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97646 if (vma->vm_pgoff == vm_pgoff)
97647@@ -986,6 +1021,12 @@ static int
97648 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97649 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97650 {
97651+
97652+#ifdef CONFIG_PAX_SEGMEXEC
97653+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97654+ return 0;
97655+#endif
97656+
97657 if (is_mergeable_vma(vma, file, vm_flags) &&
97658 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97659 pgoff_t vm_pglen;
97660@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97661 struct vm_area_struct *area, *next;
97662 int err;
97663
97664+#ifdef CONFIG_PAX_SEGMEXEC
97665+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97666+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97667+
97668+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97669+#endif
97670+
97671 /*
97672 * We later require that vma->vm_flags == vm_flags,
97673 * so this tests vma->vm_flags & VM_SPECIAL, too.
97674@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97675 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97676 next = next->vm_next;
97677
97678+#ifdef CONFIG_PAX_SEGMEXEC
97679+ if (prev)
97680+ prev_m = pax_find_mirror_vma(prev);
97681+ if (area)
97682+ area_m = pax_find_mirror_vma(area);
97683+ if (next)
97684+ next_m = pax_find_mirror_vma(next);
97685+#endif
97686+
97687 /*
97688 * Can it merge with the predecessor?
97689 */
97690@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97691 /* cases 1, 6 */
97692 err = vma_adjust(prev, prev->vm_start,
97693 next->vm_end, prev->vm_pgoff, NULL);
97694- } else /* cases 2, 5, 7 */
97695+
97696+#ifdef CONFIG_PAX_SEGMEXEC
97697+ if (!err && prev_m)
97698+ err = vma_adjust(prev_m, prev_m->vm_start,
97699+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97700+#endif
97701+
97702+ } else { /* cases 2, 5, 7 */
97703 err = vma_adjust(prev, prev->vm_start,
97704 end, prev->vm_pgoff, NULL);
97705+
97706+#ifdef CONFIG_PAX_SEGMEXEC
97707+ if (!err && prev_m)
97708+ err = vma_adjust(prev_m, prev_m->vm_start,
97709+ end_m, prev_m->vm_pgoff, NULL);
97710+#endif
97711+
97712+ }
97713 if (err)
97714 return NULL;
97715 khugepaged_enter_vma_merge(prev, vm_flags);
97716@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97717 mpol_equal(policy, vma_policy(next)) &&
97718 can_vma_merge_before(next, vm_flags,
97719 anon_vma, file, pgoff+pglen)) {
97720- if (prev && addr < prev->vm_end) /* case 4 */
97721+ if (prev && addr < prev->vm_end) { /* case 4 */
97722 err = vma_adjust(prev, prev->vm_start,
97723 addr, prev->vm_pgoff, NULL);
97724- else /* cases 3, 8 */
97725+
97726+#ifdef CONFIG_PAX_SEGMEXEC
97727+ if (!err && prev_m)
97728+ err = vma_adjust(prev_m, prev_m->vm_start,
97729+ addr_m, prev_m->vm_pgoff, NULL);
97730+#endif
97731+
97732+ } else { /* cases 3, 8 */
97733 err = vma_adjust(area, addr, next->vm_end,
97734 next->vm_pgoff - pglen, NULL);
97735+
97736+#ifdef CONFIG_PAX_SEGMEXEC
97737+ if (!err && area_m)
97738+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97739+ next_m->vm_pgoff - pglen, NULL);
97740+#endif
97741+
97742+ }
97743 if (err)
97744 return NULL;
97745 khugepaged_enter_vma_merge(area, vm_flags);
97746@@ -1199,8 +1286,10 @@ none:
97747 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97748 struct file *file, long pages)
97749 {
97750- const unsigned long stack_flags
97751- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97752+
97753+#ifdef CONFIG_PAX_RANDMMAP
97754+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97755+#endif
97756
97757 mm->total_vm += pages;
97758
97759@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97760 mm->shared_vm += pages;
97761 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97762 mm->exec_vm += pages;
97763- } else if (flags & stack_flags)
97764+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97765 mm->stack_vm += pages;
97766 }
97767 #endif /* CONFIG_PROC_FS */
97768@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97769 locked += mm->locked_vm;
97770 lock_limit = rlimit(RLIMIT_MEMLOCK);
97771 lock_limit >>= PAGE_SHIFT;
97772+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97773 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97774 return -EAGAIN;
97775 }
97776@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97777 * (the exception is when the underlying filesystem is noexec
97778 * mounted, in which case we dont add PROT_EXEC.)
97779 */
97780- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97781+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97782 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97783 prot |= PROT_EXEC;
97784
97785@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97786 /* Obtain the address to map to. we verify (or select) it and ensure
97787 * that it represents a valid section of the address space.
97788 */
97789- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97790+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97791 if (addr & ~PAGE_MASK)
97792 return addr;
97793
97794@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97795 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97796 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97797
97798+#ifdef CONFIG_PAX_MPROTECT
97799+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97800+
97801+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97802+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97803+ mm->binfmt->handle_mmap)
97804+ mm->binfmt->handle_mmap(file);
97805+#endif
97806+
97807+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97808+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97809+ gr_log_rwxmmap(file);
97810+
97811+#ifdef CONFIG_PAX_EMUPLT
97812+ vm_flags &= ~VM_EXEC;
97813+#else
97814+ return -EPERM;
97815+#endif
97816+
97817+ }
97818+
97819+ if (!(vm_flags & VM_EXEC))
97820+ vm_flags &= ~VM_MAYEXEC;
97821+#else
97822+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97823+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97824+#endif
97825+ else
97826+ vm_flags &= ~VM_MAYWRITE;
97827+ }
97828+#endif
97829+
97830+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97831+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97832+ vm_flags &= ~VM_PAGEEXEC;
97833+#endif
97834+
97835 if (flags & MAP_LOCKED)
97836 if (!can_do_mlock())
97837 return -EPERM;
97838@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97839 vm_flags |= VM_NORESERVE;
97840 }
97841
97842+ if (!gr_acl_handle_mmap(file, prot))
97843+ return -EACCES;
97844+
97845 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97846 if (!IS_ERR_VALUE(addr) &&
97847 ((vm_flags & VM_LOCKED) ||
97848@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97849 vm_flags_t vm_flags = vma->vm_flags;
97850
97851 /* If it was private or non-writable, the write bit is already clear */
97852- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97853+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97854 return 0;
97855
97856 /* The backer wishes to know when pages are first written to? */
97857@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97858 struct rb_node **rb_link, *rb_parent;
97859 unsigned long charged = 0;
97860
97861+#ifdef CONFIG_PAX_SEGMEXEC
97862+ struct vm_area_struct *vma_m = NULL;
97863+#endif
97864+
97865+ /*
97866+ * mm->mmap_sem is required to protect against another thread
97867+ * changing the mappings in case we sleep.
97868+ */
97869+ verify_mm_writelocked(mm);
97870+
97871 /* Check against address space limit. */
97872+
97873+#ifdef CONFIG_PAX_RANDMMAP
97874+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97875+#endif
97876+
97877 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97878 unsigned long nr_pages;
97879
97880@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97881
97882 /* Clear old maps */
97883 error = -ENOMEM;
97884-munmap_back:
97885 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97886 if (do_munmap(mm, addr, len))
97887 return -ENOMEM;
97888- goto munmap_back;
97889+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97890 }
97891
97892 /*
97893@@ -1586,6 +1730,16 @@ munmap_back:
97894 goto unacct_error;
97895 }
97896
97897+#ifdef CONFIG_PAX_SEGMEXEC
97898+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97899+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97900+ if (!vma_m) {
97901+ error = -ENOMEM;
97902+ goto free_vma;
97903+ }
97904+ }
97905+#endif
97906+
97907 vma->vm_mm = mm;
97908 vma->vm_start = addr;
97909 vma->vm_end = addr + len;
97910@@ -1616,6 +1770,13 @@ munmap_back:
97911 if (error)
97912 goto unmap_and_free_vma;
97913
97914+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97915+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97916+ vma->vm_flags |= VM_PAGEEXEC;
97917+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97918+ }
97919+#endif
97920+
97921 /* Can addr have changed??
97922 *
97923 * Answer: Yes, several device drivers can do it in their
97924@@ -1634,6 +1795,12 @@ munmap_back:
97925 }
97926
97927 vma_link(mm, vma, prev, rb_link, rb_parent);
97928+
97929+#ifdef CONFIG_PAX_SEGMEXEC
97930+ if (vma_m)
97931+ BUG_ON(pax_mirror_vma(vma_m, vma));
97932+#endif
97933+
97934 /* Once vma denies write, undo our temporary denial count */
97935 if (file) {
97936 if (vm_flags & VM_SHARED)
97937@@ -1646,6 +1813,7 @@ out:
97938 perf_event_mmap(vma);
97939
97940 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97941+ track_exec_limit(mm, addr, addr + len, vm_flags);
97942 if (vm_flags & VM_LOCKED) {
97943 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97944 vma == get_gate_vma(current->mm)))
97945@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
97946 if (vm_flags & VM_DENYWRITE)
97947 allow_write_access(file);
97948 free_vma:
97949+
97950+#ifdef CONFIG_PAX_SEGMEXEC
97951+ if (vma_m)
97952+ kmem_cache_free(vm_area_cachep, vma_m);
97953+#endif
97954+
97955 kmem_cache_free(vm_area_cachep, vma);
97956 unacct_error:
97957 if (charged)
97958@@ -1690,7 +1864,63 @@ unacct_error:
97959 return error;
97960 }
97961
97962-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97963+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97964+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97965+{
97966+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97967+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97968+
97969+ return 0;
97970+}
97971+#endif
97972+
97973+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97974+{
97975+ if (!vma) {
97976+#ifdef CONFIG_STACK_GROWSUP
97977+ if (addr > sysctl_heap_stack_gap)
97978+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97979+ else
97980+ vma = find_vma(current->mm, 0);
97981+ if (vma && (vma->vm_flags & VM_GROWSUP))
97982+ return false;
97983+#endif
97984+ return true;
97985+ }
97986+
97987+ if (addr + len > vma->vm_start)
97988+ return false;
97989+
97990+ if (vma->vm_flags & VM_GROWSDOWN)
97991+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97992+#ifdef CONFIG_STACK_GROWSUP
97993+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97994+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97995+#endif
97996+ else if (offset)
97997+ return offset <= vma->vm_start - addr - len;
97998+
97999+ return true;
98000+}
98001+
98002+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
98003+{
98004+ if (vma->vm_start < len)
98005+ return -ENOMEM;
98006+
98007+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
98008+ if (offset <= vma->vm_start - len)
98009+ return vma->vm_start - len - offset;
98010+ else
98011+ return -ENOMEM;
98012+ }
98013+
98014+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
98015+ return vma->vm_start - len - sysctl_heap_stack_gap;
98016+ return -ENOMEM;
98017+}
98018+
98019+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
98020 {
98021 /*
98022 * We implement the search by looking for an rbtree node that
98023@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98024 }
98025 }
98026
98027- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
98028+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
98029 check_current:
98030 /* Check if current node has a suitable gap */
98031 if (gap_start > high_limit)
98032 return -ENOMEM;
98033+
98034+ if (gap_end - gap_start > info->threadstack_offset)
98035+ gap_start += info->threadstack_offset;
98036+ else
98037+ gap_start = gap_end;
98038+
98039+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98040+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98041+ gap_start += sysctl_heap_stack_gap;
98042+ else
98043+ gap_start = gap_end;
98044+ }
98045+ if (vma->vm_flags & VM_GROWSDOWN) {
98046+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98047+ gap_end -= sysctl_heap_stack_gap;
98048+ else
98049+ gap_end = gap_start;
98050+ }
98051 if (gap_end >= low_limit && gap_end - gap_start >= length)
98052 goto found;
98053
98054@@ -1792,7 +2040,7 @@ found:
98055 return gap_start;
98056 }
98057
98058-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
98059+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
98060 {
98061 struct mm_struct *mm = current->mm;
98062 struct vm_area_struct *vma;
98063@@ -1846,6 +2094,24 @@ check_current:
98064 gap_end = vma->vm_start;
98065 if (gap_end < low_limit)
98066 return -ENOMEM;
98067+
98068+ if (gap_end - gap_start > info->threadstack_offset)
98069+ gap_end -= info->threadstack_offset;
98070+ else
98071+ gap_end = gap_start;
98072+
98073+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98074+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98075+ gap_start += sysctl_heap_stack_gap;
98076+ else
98077+ gap_start = gap_end;
98078+ }
98079+ if (vma->vm_flags & VM_GROWSDOWN) {
98080+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98081+ gap_end -= sysctl_heap_stack_gap;
98082+ else
98083+ gap_end = gap_start;
98084+ }
98085 if (gap_start <= high_limit && gap_end - gap_start >= length)
98086 goto found;
98087
98088@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98089 struct mm_struct *mm = current->mm;
98090 struct vm_area_struct *vma;
98091 struct vm_unmapped_area_info info;
98092+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98093
98094 if (len > TASK_SIZE - mmap_min_addr)
98095 return -ENOMEM;
98096@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98097 if (flags & MAP_FIXED)
98098 return addr;
98099
98100+#ifdef CONFIG_PAX_RANDMMAP
98101+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98102+#endif
98103+
98104 if (addr) {
98105 addr = PAGE_ALIGN(addr);
98106 vma = find_vma(mm, addr);
98107 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98108- (!vma || addr + len <= vma->vm_start))
98109+ check_heap_stack_gap(vma, addr, len, offset))
98110 return addr;
98111 }
98112
98113@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98114 info.low_limit = mm->mmap_base;
98115 info.high_limit = TASK_SIZE;
98116 info.align_mask = 0;
98117+ info.threadstack_offset = offset;
98118 return vm_unmapped_area(&info);
98119 }
98120 #endif
98121@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98122 struct mm_struct *mm = current->mm;
98123 unsigned long addr = addr0;
98124 struct vm_unmapped_area_info info;
98125+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98126
98127 /* requested length too big for entire address space */
98128 if (len > TASK_SIZE - mmap_min_addr)
98129@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98130 if (flags & MAP_FIXED)
98131 return addr;
98132
98133+#ifdef CONFIG_PAX_RANDMMAP
98134+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98135+#endif
98136+
98137 /* requesting a specific address */
98138 if (addr) {
98139 addr = PAGE_ALIGN(addr);
98140 vma = find_vma(mm, addr);
98141 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98142- (!vma || addr + len <= vma->vm_start))
98143+ check_heap_stack_gap(vma, addr, len, offset))
98144 return addr;
98145 }
98146
98147@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98148 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
98149 info.high_limit = mm->mmap_base;
98150 info.align_mask = 0;
98151+ info.threadstack_offset = offset;
98152 addr = vm_unmapped_area(&info);
98153
98154 /*
98155@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98156 VM_BUG_ON(addr != -ENOMEM);
98157 info.flags = 0;
98158 info.low_limit = TASK_UNMAPPED_BASE;
98159+
98160+#ifdef CONFIG_PAX_RANDMMAP
98161+ if (mm->pax_flags & MF_PAX_RANDMMAP)
98162+ info.low_limit += mm->delta_mmap;
98163+#endif
98164+
98165 info.high_limit = TASK_SIZE;
98166 addr = vm_unmapped_area(&info);
98167 }
98168@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
98169 return vma;
98170 }
98171
98172+#ifdef CONFIG_PAX_SEGMEXEC
98173+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
98174+{
98175+ struct vm_area_struct *vma_m;
98176+
98177+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
98178+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
98179+ BUG_ON(vma->vm_mirror);
98180+ return NULL;
98181+ }
98182+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
98183+ vma_m = vma->vm_mirror;
98184+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
98185+ BUG_ON(vma->vm_file != vma_m->vm_file);
98186+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
98187+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
98188+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
98189+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
98190+ return vma_m;
98191+}
98192+#endif
98193+
98194 /*
98195 * Verify that the stack growth is acceptable and
98196 * update accounting. This is shared with both the
98197@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98198
98199 /* Stack limit test */
98200 actual_size = size;
98201- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
98202- actual_size -= PAGE_SIZE;
98203+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
98204 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
98205 return -ENOMEM;
98206
98207@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98208 locked = mm->locked_vm + grow;
98209 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
98210 limit >>= PAGE_SHIFT;
98211+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98212 if (locked > limit && !capable(CAP_IPC_LOCK))
98213 return -ENOMEM;
98214 }
98215@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98216 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
98217 * vma is the last one with address > vma->vm_end. Have to extend vma.
98218 */
98219+#ifndef CONFIG_IA64
98220+static
98221+#endif
98222 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98223 {
98224 int error;
98225+ bool locknext;
98226
98227 if (!(vma->vm_flags & VM_GROWSUP))
98228 return -EFAULT;
98229
98230+ /* Also guard against wrapping around to address 0. */
98231+ if (address < PAGE_ALIGN(address+1))
98232+ address = PAGE_ALIGN(address+1);
98233+ else
98234+ return -ENOMEM;
98235+
98236 /*
98237 * We must make sure the anon_vma is allocated
98238 * so that the anon_vma locking is not a noop.
98239 */
98240 if (unlikely(anon_vma_prepare(vma)))
98241 return -ENOMEM;
98242+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
98243+ if (locknext && anon_vma_prepare(vma->vm_next))
98244+ return -ENOMEM;
98245 vma_lock_anon_vma(vma);
98246+ if (locknext)
98247+ vma_lock_anon_vma(vma->vm_next);
98248
98249 /*
98250 * vma->vm_start/vm_end cannot change under us because the caller
98251 * is required to hold the mmap_sem in read mode. We need the
98252- * anon_vma lock to serialize against concurrent expand_stacks.
98253- * Also guard against wrapping around to address 0.
98254+ * anon_vma locks to serialize against concurrent expand_stacks
98255+ * and expand_upwards.
98256 */
98257- if (address < PAGE_ALIGN(address+4))
98258- address = PAGE_ALIGN(address+4);
98259- else {
98260- vma_unlock_anon_vma(vma);
98261- return -ENOMEM;
98262- }
98263 error = 0;
98264
98265 /* Somebody else might have raced and expanded it already */
98266- if (address > vma->vm_end) {
98267+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
98268+ error = -ENOMEM;
98269+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
98270 unsigned long size, grow;
98271
98272 size = address - vma->vm_start;
98273@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98274 }
98275 }
98276 }
98277+ if (locknext)
98278+ vma_unlock_anon_vma(vma->vm_next);
98279 vma_unlock_anon_vma(vma);
98280 khugepaged_enter_vma_merge(vma, vma->vm_flags);
98281 validate_mm(vma->vm_mm);
98282@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
98283 unsigned long address)
98284 {
98285 int error;
98286+ bool lockprev = false;
98287+ struct vm_area_struct *prev;
98288
98289 /*
98290 * We must make sure the anon_vma is allocated
98291@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
98292 if (error)
98293 return error;
98294
98295+ prev = vma->vm_prev;
98296+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
98297+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
98298+#endif
98299+ if (lockprev && anon_vma_prepare(prev))
98300+ return -ENOMEM;
98301+ if (lockprev)
98302+ vma_lock_anon_vma(prev);
98303+
98304 vma_lock_anon_vma(vma);
98305
98306 /*
98307@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
98308 */
98309
98310 /* Somebody else might have raced and expanded it already */
98311- if (address < vma->vm_start) {
98312+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
98313+ error = -ENOMEM;
98314+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
98315 unsigned long size, grow;
98316
98317+#ifdef CONFIG_PAX_SEGMEXEC
98318+ struct vm_area_struct *vma_m;
98319+
98320+ vma_m = pax_find_mirror_vma(vma);
98321+#endif
98322+
98323 size = vma->vm_end - address;
98324 grow = (vma->vm_start - address) >> PAGE_SHIFT;
98325
98326@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
98327 vma->vm_pgoff -= grow;
98328 anon_vma_interval_tree_post_update_vma(vma);
98329 vma_gap_update(vma);
98330+
98331+#ifdef CONFIG_PAX_SEGMEXEC
98332+ if (vma_m) {
98333+ anon_vma_interval_tree_pre_update_vma(vma_m);
98334+ vma_m->vm_start -= grow << PAGE_SHIFT;
98335+ vma_m->vm_pgoff -= grow;
98336+ anon_vma_interval_tree_post_update_vma(vma_m);
98337+ vma_gap_update(vma_m);
98338+ }
98339+#endif
98340+
98341 spin_unlock(&vma->vm_mm->page_table_lock);
98342
98343+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
98344 perf_event_mmap(vma);
98345 }
98346 }
98347 }
98348 vma_unlock_anon_vma(vma);
98349+ if (lockprev)
98350+ vma_unlock_anon_vma(prev);
98351 khugepaged_enter_vma_merge(vma, vma->vm_flags);
98352 validate_mm(vma->vm_mm);
98353 return error;
98354@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
98355 do {
98356 long nrpages = vma_pages(vma);
98357
98358+#ifdef CONFIG_PAX_SEGMEXEC
98359+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
98360+ vma = remove_vma(vma);
98361+ continue;
98362+ }
98363+#endif
98364+
98365 if (vma->vm_flags & VM_ACCOUNT)
98366 nr_accounted += nrpages;
98367 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
98368@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
98369 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
98370 vma->vm_prev = NULL;
98371 do {
98372+
98373+#ifdef CONFIG_PAX_SEGMEXEC
98374+ if (vma->vm_mirror) {
98375+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
98376+ vma->vm_mirror->vm_mirror = NULL;
98377+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
98378+ vma->vm_mirror = NULL;
98379+ }
98380+#endif
98381+
98382 vma_rb_erase(vma, &mm->mm_rb);
98383 mm->map_count--;
98384 tail_vma = vma;
98385@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98386 struct vm_area_struct *new;
98387 int err = -ENOMEM;
98388
98389+#ifdef CONFIG_PAX_SEGMEXEC
98390+ struct vm_area_struct *vma_m, *new_m = NULL;
98391+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
98392+#endif
98393+
98394 if (is_vm_hugetlb_page(vma) && (addr &
98395 ~(huge_page_mask(hstate_vma(vma)))))
98396 return -EINVAL;
98397
98398+#ifdef CONFIG_PAX_SEGMEXEC
98399+ vma_m = pax_find_mirror_vma(vma);
98400+#endif
98401+
98402 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98403 if (!new)
98404 goto out_err;
98405
98406+#ifdef CONFIG_PAX_SEGMEXEC
98407+ if (vma_m) {
98408+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98409+ if (!new_m) {
98410+ kmem_cache_free(vm_area_cachep, new);
98411+ goto out_err;
98412+ }
98413+ }
98414+#endif
98415+
98416 /* most fields are the same, copy all, and then fixup */
98417 *new = *vma;
98418
98419@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98420 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
98421 }
98422
98423+#ifdef CONFIG_PAX_SEGMEXEC
98424+ if (vma_m) {
98425+ *new_m = *vma_m;
98426+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
98427+ new_m->vm_mirror = new;
98428+ new->vm_mirror = new_m;
98429+
98430+ if (new_below)
98431+ new_m->vm_end = addr_m;
98432+ else {
98433+ new_m->vm_start = addr_m;
98434+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
98435+ }
98436+ }
98437+#endif
98438+
98439 err = vma_dup_policy(vma, new);
98440 if (err)
98441 goto out_free_vma;
98442@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98443 else
98444 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
98445
98446+#ifdef CONFIG_PAX_SEGMEXEC
98447+ if (!err && vma_m) {
98448+ struct mempolicy *pol = vma_policy(new);
98449+
98450+ if (anon_vma_clone(new_m, vma_m))
98451+ goto out_free_mpol;
98452+
98453+ mpol_get(pol);
98454+ set_vma_policy(new_m, pol);
98455+
98456+ if (new_m->vm_file)
98457+ get_file(new_m->vm_file);
98458+
98459+ if (new_m->vm_ops && new_m->vm_ops->open)
98460+ new_m->vm_ops->open(new_m);
98461+
98462+ if (new_below)
98463+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
98464+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
98465+ else
98466+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
98467+
98468+ if (err) {
98469+ if (new_m->vm_ops && new_m->vm_ops->close)
98470+ new_m->vm_ops->close(new_m);
98471+ if (new_m->vm_file)
98472+ fput(new_m->vm_file);
98473+ mpol_put(pol);
98474+ }
98475+ }
98476+#endif
98477+
98478 /* Success. */
98479 if (!err)
98480 return 0;
98481@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98482 new->vm_ops->close(new);
98483 if (new->vm_file)
98484 fput(new->vm_file);
98485- unlink_anon_vmas(new);
98486 out_free_mpol:
98487 mpol_put(vma_policy(new));
98488 out_free_vma:
98489+
98490+#ifdef CONFIG_PAX_SEGMEXEC
98491+ if (new_m) {
98492+ unlink_anon_vmas(new_m);
98493+ kmem_cache_free(vm_area_cachep, new_m);
98494+ }
98495+#endif
98496+
98497+ unlink_anon_vmas(new);
98498 kmem_cache_free(vm_area_cachep, new);
98499 out_err:
98500 return err;
98501@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98502 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98503 unsigned long addr, int new_below)
98504 {
98505+
98506+#ifdef CONFIG_PAX_SEGMEXEC
98507+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98508+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
98509+ if (mm->map_count >= sysctl_max_map_count-1)
98510+ return -ENOMEM;
98511+ } else
98512+#endif
98513+
98514 if (mm->map_count >= sysctl_max_map_count)
98515 return -ENOMEM;
98516
98517@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98518 * work. This now handles partial unmappings.
98519 * Jeremy Fitzhardinge <jeremy@goop.org>
98520 */
98521+#ifdef CONFIG_PAX_SEGMEXEC
98522 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98523 {
98524+ int ret = __do_munmap(mm, start, len);
98525+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
98526+ return ret;
98527+
98528+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
98529+}
98530+
98531+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98532+#else
98533+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98534+#endif
98535+{
98536 unsigned long end;
98537 struct vm_area_struct *vma, *prev, *last;
98538
98539+ /*
98540+ * mm->mmap_sem is required to protect against another thread
98541+ * changing the mappings in case we sleep.
98542+ */
98543+ verify_mm_writelocked(mm);
98544+
98545 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
98546 return -EINVAL;
98547
98548@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98549 /* Fix up all other VM information */
98550 remove_vma_list(mm, vma);
98551
98552+ track_exec_limit(mm, start, end, 0UL);
98553+
98554 return 0;
98555 }
98556
98557@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
98558 int ret;
98559 struct mm_struct *mm = current->mm;
98560
98561+
98562+#ifdef CONFIG_PAX_SEGMEXEC
98563+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98564+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
98565+ return -EINVAL;
98566+#endif
98567+
98568 down_write(&mm->mmap_sem);
98569 ret = do_munmap(mm, start, len);
98570 up_write(&mm->mmap_sem);
98571@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
98572 down_write(&mm->mmap_sem);
98573 vma = find_vma(mm, start);
98574
98575+#ifdef CONFIG_PAX_SEGMEXEC
98576+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
98577+ goto out;
98578+#endif
98579+
98580 if (!vma || !(vma->vm_flags & VM_SHARED))
98581 goto out;
98582
98583@@ -2692,16 +3178,6 @@ out:
98584 return ret;
98585 }
98586
98587-static inline void verify_mm_writelocked(struct mm_struct *mm)
98588-{
98589-#ifdef CONFIG_DEBUG_VM
98590- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98591- WARN_ON(1);
98592- up_read(&mm->mmap_sem);
98593- }
98594-#endif
98595-}
98596-
98597 /*
98598 * this is really a simplified "do_mmap". it only handles
98599 * anonymous maps. eventually we may be able to do some
98600@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98601 struct rb_node **rb_link, *rb_parent;
98602 pgoff_t pgoff = addr >> PAGE_SHIFT;
98603 int error;
98604+ unsigned long charged;
98605
98606 len = PAGE_ALIGN(len);
98607 if (!len)
98608@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98609
98610 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98611
98612+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98613+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98614+ flags &= ~VM_EXEC;
98615+
98616+#ifdef CONFIG_PAX_MPROTECT
98617+ if (mm->pax_flags & MF_PAX_MPROTECT)
98618+ flags &= ~VM_MAYEXEC;
98619+#endif
98620+
98621+ }
98622+#endif
98623+
98624 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98625 if (error & ~PAGE_MASK)
98626 return error;
98627
98628+ charged = len >> PAGE_SHIFT;
98629+
98630 error = mlock_future_check(mm, mm->def_flags, len);
98631 if (error)
98632 return error;
98633@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98634 /*
98635 * Clear old maps. this also does some error checking for us
98636 */
98637- munmap_back:
98638 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98639 if (do_munmap(mm, addr, len))
98640 return -ENOMEM;
98641- goto munmap_back;
98642+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98643 }
98644
98645 /* Check against address space limits *after* clearing old maps... */
98646- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98647+ if (!may_expand_vm(mm, charged))
98648 return -ENOMEM;
98649
98650 if (mm->map_count > sysctl_max_map_count)
98651 return -ENOMEM;
98652
98653- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98654+ if (security_vm_enough_memory_mm(mm, charged))
98655 return -ENOMEM;
98656
98657 /* Can we just expand an old private anonymous mapping? */
98658@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98659 */
98660 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98661 if (!vma) {
98662- vm_unacct_memory(len >> PAGE_SHIFT);
98663+ vm_unacct_memory(charged);
98664 return -ENOMEM;
98665 }
98666
98667@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98668 vma_link(mm, vma, prev, rb_link, rb_parent);
98669 out:
98670 perf_event_mmap(vma);
98671- mm->total_vm += len >> PAGE_SHIFT;
98672+ mm->total_vm += charged;
98673 if (flags & VM_LOCKED)
98674- mm->locked_vm += (len >> PAGE_SHIFT);
98675+ mm->locked_vm += charged;
98676 vma->vm_flags |= VM_SOFTDIRTY;
98677+ track_exec_limit(mm, addr, addr + len, flags);
98678 return addr;
98679 }
98680
98681@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
98682 while (vma) {
98683 if (vma->vm_flags & VM_ACCOUNT)
98684 nr_accounted += vma_pages(vma);
98685+ vma->vm_mirror = NULL;
98686 vma = remove_vma(vma);
98687 }
98688 vm_unacct_memory(nr_accounted);
98689@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98690 struct vm_area_struct *prev;
98691 struct rb_node **rb_link, *rb_parent;
98692
98693+#ifdef CONFIG_PAX_SEGMEXEC
98694+ struct vm_area_struct *vma_m = NULL;
98695+#endif
98696+
98697+ if (security_mmap_addr(vma->vm_start))
98698+ return -EPERM;
98699+
98700 /*
98701 * The vm_pgoff of a purely anonymous vma should be irrelevant
98702 * until its first write fault, when page's anon_vma and index
98703@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98704 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98705 return -ENOMEM;
98706
98707+#ifdef CONFIG_PAX_SEGMEXEC
98708+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98709+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98710+ if (!vma_m)
98711+ return -ENOMEM;
98712+ }
98713+#endif
98714+
98715 vma_link(mm, vma, prev, rb_link, rb_parent);
98716+
98717+#ifdef CONFIG_PAX_SEGMEXEC
98718+ if (vma_m)
98719+ BUG_ON(pax_mirror_vma(vma_m, vma));
98720+#endif
98721+
98722 return 0;
98723 }
98724
98725@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98726 struct rb_node **rb_link, *rb_parent;
98727 bool faulted_in_anon_vma = true;
98728
98729+ BUG_ON(vma->vm_mirror);
98730+
98731 /*
98732 * If anonymous vma has not yet been faulted, update new pgoff
98733 * to match new location, to increase its chance of merging.
98734@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98735 return NULL;
98736 }
98737
98738+#ifdef CONFIG_PAX_SEGMEXEC
98739+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98740+{
98741+ struct vm_area_struct *prev_m;
98742+ struct rb_node **rb_link_m, *rb_parent_m;
98743+ struct mempolicy *pol_m;
98744+
98745+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98746+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98747+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98748+ *vma_m = *vma;
98749+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98750+ if (anon_vma_clone(vma_m, vma))
98751+ return -ENOMEM;
98752+ pol_m = vma_policy(vma_m);
98753+ mpol_get(pol_m);
98754+ set_vma_policy(vma_m, pol_m);
98755+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98756+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98757+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98758+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98759+ if (vma_m->vm_file)
98760+ get_file(vma_m->vm_file);
98761+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98762+ vma_m->vm_ops->open(vma_m);
98763+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98764+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98765+ vma_m->vm_mirror = vma;
98766+ vma->vm_mirror = vma_m;
98767+ return 0;
98768+}
98769+#endif
98770+
98771 /*
98772 * Return true if the calling process may expand its vm space by the passed
98773 * number of pages
98774@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98775
98776 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98777
98778+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98779 if (cur + npages > lim)
98780 return 0;
98781 return 1;
98782@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
98783 vma->vm_start = addr;
98784 vma->vm_end = addr + len;
98785
98786+#ifdef CONFIG_PAX_MPROTECT
98787+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98788+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98789+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98790+ return ERR_PTR(-EPERM);
98791+ if (!(vm_flags & VM_EXEC))
98792+ vm_flags &= ~VM_MAYEXEC;
98793+#else
98794+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98795+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98796+#endif
98797+ else
98798+ vm_flags &= ~VM_MAYWRITE;
98799+ }
98800+#endif
98801+
98802 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98803 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98804
98805diff --git a/mm/mprotect.c b/mm/mprotect.c
98806index 8858483..8145fa5 100644
98807--- a/mm/mprotect.c
98808+++ b/mm/mprotect.c
98809@@ -24,10 +24,18 @@
98810 #include <linux/migrate.h>
98811 #include <linux/perf_event.h>
98812 #include <linux/ksm.h>
98813+#include <linux/sched/sysctl.h>
98814+
98815+#ifdef CONFIG_PAX_MPROTECT
98816+#include <linux/elf.h>
98817+#include <linux/binfmts.h>
98818+#endif
98819+
98820 #include <asm/uaccess.h>
98821 #include <asm/pgtable.h>
98822 #include <asm/cacheflush.h>
98823 #include <asm/tlbflush.h>
98824+#include <asm/mmu_context.h>
98825
98826 /*
98827 * For a prot_numa update we only hold mmap_sem for read so there is a
98828@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98829 return pages;
98830 }
98831
98832+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98833+/* called while holding the mmap semaphor for writing except stack expansion */
98834+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98835+{
98836+ unsigned long oldlimit, newlimit = 0UL;
98837+
98838+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98839+ return;
98840+
98841+ spin_lock(&mm->page_table_lock);
98842+ oldlimit = mm->context.user_cs_limit;
98843+ if ((prot & VM_EXEC) && oldlimit < end)
98844+ /* USER_CS limit moved up */
98845+ newlimit = end;
98846+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98847+ /* USER_CS limit moved down */
98848+ newlimit = start;
98849+
98850+ if (newlimit) {
98851+ mm->context.user_cs_limit = newlimit;
98852+
98853+#ifdef CONFIG_SMP
98854+ wmb();
98855+ cpus_clear(mm->context.cpu_user_cs_mask);
98856+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98857+#endif
98858+
98859+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98860+ }
98861+ spin_unlock(&mm->page_table_lock);
98862+ if (newlimit == end) {
98863+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98864+
98865+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98866+ if (is_vm_hugetlb_page(vma))
98867+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98868+ else
98869+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98870+ }
98871+}
98872+#endif
98873+
98874 int
98875 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98876 unsigned long start, unsigned long end, unsigned long newflags)
98877@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98878 int error;
98879 int dirty_accountable = 0;
98880
98881+#ifdef CONFIG_PAX_SEGMEXEC
98882+ struct vm_area_struct *vma_m = NULL;
98883+ unsigned long start_m, end_m;
98884+
98885+ start_m = start + SEGMEXEC_TASK_SIZE;
98886+ end_m = end + SEGMEXEC_TASK_SIZE;
98887+#endif
98888+
98889 if (newflags == oldflags) {
98890 *pprev = vma;
98891 return 0;
98892 }
98893
98894+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98895+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98896+
98897+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98898+ return -ENOMEM;
98899+
98900+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98901+ return -ENOMEM;
98902+ }
98903+
98904 /*
98905 * If we make a private mapping writable we increase our commit;
98906 * but (without finer accounting) cannot reduce our commit if we
98907@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98908 }
98909 }
98910
98911+#ifdef CONFIG_PAX_SEGMEXEC
98912+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98913+ if (start != vma->vm_start) {
98914+ error = split_vma(mm, vma, start, 1);
98915+ if (error)
98916+ goto fail;
98917+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98918+ *pprev = (*pprev)->vm_next;
98919+ }
98920+
98921+ if (end != vma->vm_end) {
98922+ error = split_vma(mm, vma, end, 0);
98923+ if (error)
98924+ goto fail;
98925+ }
98926+
98927+ if (pax_find_mirror_vma(vma)) {
98928+ error = __do_munmap(mm, start_m, end_m - start_m);
98929+ if (error)
98930+ goto fail;
98931+ } else {
98932+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98933+ if (!vma_m) {
98934+ error = -ENOMEM;
98935+ goto fail;
98936+ }
98937+ vma->vm_flags = newflags;
98938+ error = pax_mirror_vma(vma_m, vma);
98939+ if (error) {
98940+ vma->vm_flags = oldflags;
98941+ goto fail;
98942+ }
98943+ }
98944+ }
98945+#endif
98946+
98947 /*
98948 * First try to merge with previous and/or next vma.
98949 */
98950@@ -315,7 +419,19 @@ success:
98951 * vm_flags and vm_page_prot are protected by the mmap_sem
98952 * held in write mode.
98953 */
98954+
98955+#ifdef CONFIG_PAX_SEGMEXEC
98956+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98957+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98958+#endif
98959+
98960 vma->vm_flags = newflags;
98961+
98962+#ifdef CONFIG_PAX_MPROTECT
98963+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98964+ mm->binfmt->handle_mprotect(vma, newflags);
98965+#endif
98966+
98967 dirty_accountable = vma_wants_writenotify(vma);
98968 vma_set_page_prot(vma);
98969
98970@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98971 end = start + len;
98972 if (end <= start)
98973 return -ENOMEM;
98974+
98975+#ifdef CONFIG_PAX_SEGMEXEC
98976+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98977+ if (end > SEGMEXEC_TASK_SIZE)
98978+ return -EINVAL;
98979+ } else
98980+#endif
98981+
98982+ if (end > TASK_SIZE)
98983+ return -EINVAL;
98984+
98985 if (!arch_validate_prot(prot))
98986 return -EINVAL;
98987
98988@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98989 /*
98990 * Does the application expect PROT_READ to imply PROT_EXEC:
98991 */
98992- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98993+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98994 prot |= PROT_EXEC;
98995
98996 vm_flags = calc_vm_prot_bits(prot);
98997@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98998 if (start > vma->vm_start)
98999 prev = vma;
99000
99001+#ifdef CONFIG_PAX_MPROTECT
99002+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
99003+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
99004+#endif
99005+
99006 for (nstart = start ; ; ) {
99007 unsigned long newflags;
99008
99009@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99010
99011 /* newflags >> 4 shift VM_MAY% in place of VM_% */
99012 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
99013+ if (prot & (PROT_WRITE | PROT_EXEC))
99014+ gr_log_rwxmprotect(vma);
99015+
99016+ error = -EACCES;
99017+ goto out;
99018+ }
99019+
99020+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
99021 error = -EACCES;
99022 goto out;
99023 }
99024@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99025 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
99026 if (error)
99027 goto out;
99028+
99029+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
99030+
99031 nstart = tmp;
99032
99033 if (nstart < prev->vm_end)
99034diff --git a/mm/mremap.c b/mm/mremap.c
99035index 2dc44b1..caa1819 100644
99036--- a/mm/mremap.c
99037+++ b/mm/mremap.c
99038@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
99039 continue;
99040 pte = ptep_get_and_clear(mm, old_addr, old_pte);
99041 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
99042+
99043+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99044+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
99045+ pte = pte_exprotect(pte);
99046+#endif
99047+
99048 pte = move_soft_dirty_pte(pte);
99049 set_pte_at(mm, new_addr, new_pte, pte);
99050 }
99051@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
99052 if (is_vm_hugetlb_page(vma))
99053 goto Einval;
99054
99055+#ifdef CONFIG_PAX_SEGMEXEC
99056+ if (pax_find_mirror_vma(vma))
99057+ goto Einval;
99058+#endif
99059+
99060 /* We can't remap across vm area boundaries */
99061 if (old_len > vma->vm_end - addr)
99062 goto Efault;
99063@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
99064 unsigned long ret = -EINVAL;
99065 unsigned long charged = 0;
99066 unsigned long map_flags;
99067+ unsigned long pax_task_size = TASK_SIZE;
99068
99069 if (new_addr & ~PAGE_MASK)
99070 goto out;
99071
99072- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
99073+#ifdef CONFIG_PAX_SEGMEXEC
99074+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99075+ pax_task_size = SEGMEXEC_TASK_SIZE;
99076+#endif
99077+
99078+ pax_task_size -= PAGE_SIZE;
99079+
99080+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
99081 goto out;
99082
99083 /* Check if the location we're moving into overlaps the
99084 * old location at all, and fail if it does.
99085 */
99086- if ((new_addr <= addr) && (new_addr+new_len) > addr)
99087- goto out;
99088-
99089- if ((addr <= new_addr) && (addr+old_len) > new_addr)
99090+ if (addr + old_len > new_addr && new_addr + new_len > addr)
99091 goto out;
99092
99093 ret = do_munmap(mm, new_addr, new_len);
99094@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99095 unsigned long ret = -EINVAL;
99096 unsigned long charged = 0;
99097 bool locked = false;
99098+ unsigned long pax_task_size = TASK_SIZE;
99099
99100 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
99101 return ret;
99102@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99103 if (!new_len)
99104 return ret;
99105
99106+#ifdef CONFIG_PAX_SEGMEXEC
99107+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99108+ pax_task_size = SEGMEXEC_TASK_SIZE;
99109+#endif
99110+
99111+ pax_task_size -= PAGE_SIZE;
99112+
99113+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
99114+ old_len > pax_task_size || addr > pax_task_size-old_len)
99115+ return ret;
99116+
99117 down_write(&current->mm->mmap_sem);
99118
99119 if (flags & MREMAP_FIXED) {
99120@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99121 new_addr = addr;
99122 }
99123 ret = addr;
99124+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
99125 goto out;
99126 }
99127 }
99128@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99129 goto out;
99130 }
99131
99132+ map_flags = vma->vm_flags;
99133 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
99134+ if (!(ret & ~PAGE_MASK)) {
99135+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
99136+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
99137+ }
99138 }
99139 out:
99140 if (ret & ~PAGE_MASK)
99141diff --git a/mm/nommu.c b/mm/nommu.c
99142index 3fba2dc..fdad748 100644
99143--- a/mm/nommu.c
99144+++ b/mm/nommu.c
99145@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
99146 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
99147 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
99148 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
99149-int heap_stack_gap = 0;
99150
99151 atomic_long_t mmap_pages_allocated;
99152
99153@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
99154 EXPORT_SYMBOL(find_vma);
99155
99156 /*
99157- * find a VMA
99158- * - we don't extend stack VMAs under NOMMU conditions
99159- */
99160-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
99161-{
99162- return find_vma(mm, addr);
99163-}
99164-
99165-/*
99166 * expand a stack to a given address
99167 * - not supported under NOMMU conditions
99168 */
99169@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99170
99171 /* most fields are the same, copy all, and then fixup */
99172 *new = *vma;
99173+ INIT_LIST_HEAD(&new->anon_vma_chain);
99174 *region = *vma->vm_region;
99175 new->vm_region = region;
99176
99177@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
99178 }
99179 EXPORT_SYMBOL(filemap_map_pages);
99180
99181-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99182- unsigned long addr, void *buf, int len, int write)
99183+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99184+ unsigned long addr, void *buf, size_t len, int write)
99185 {
99186 struct vm_area_struct *vma;
99187
99188@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99189 *
99190 * The caller must hold a reference on @mm.
99191 */
99192-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99193- void *buf, int len, int write)
99194+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
99195+ void *buf, size_t len, int write)
99196 {
99197 return __access_remote_vm(NULL, mm, addr, buf, len, write);
99198 }
99199@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99200 * Access another process' address space.
99201 * - source/target buffer must be kernel space
99202 */
99203-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
99204+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
99205 {
99206 struct mm_struct *mm;
99207
99208diff --git a/mm/page-writeback.c b/mm/page-writeback.c
99209index 644bcb6..444a2c4 100644
99210--- a/mm/page-writeback.c
99211+++ b/mm/page-writeback.c
99212@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
99213 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
99214 * - the bdi dirty thresh drops quickly due to change of JBOD workload
99215 */
99216-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
99217+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
99218 unsigned long thresh,
99219 unsigned long bg_thresh,
99220 unsigned long dirty,
99221diff --git a/mm/page_alloc.c b/mm/page_alloc.c
99222index 40e2942..0eb29a2 100644
99223--- a/mm/page_alloc.c
99224+++ b/mm/page_alloc.c
99225@@ -61,6 +61,7 @@
99226 #include <linux/hugetlb.h>
99227 #include <linux/sched/rt.h>
99228 #include <linux/page_owner.h>
99229+#include <linux/random.h>
99230
99231 #include <asm/sections.h>
99232 #include <asm/tlbflush.h>
99233@@ -357,7 +358,7 @@ out:
99234 * This usage means that zero-order pages may not be compound.
99235 */
99236
99237-static void free_compound_page(struct page *page)
99238+void free_compound_page(struct page *page)
99239 {
99240 __free_pages_ok(page, compound_order(page));
99241 }
99242@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
99243 __mod_zone_freepage_state(zone, (1 << order), migratetype);
99244 }
99245 #else
99246-struct page_ext_operations debug_guardpage_ops = { NULL, };
99247+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
99248 static inline void set_page_guard(struct zone *zone, struct page *page,
99249 unsigned int order, int migratetype) {}
99250 static inline void clear_page_guard(struct zone *zone, struct page *page,
99251@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99252 bool compound = PageCompound(page);
99253 int i, bad = 0;
99254
99255+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99256+ unsigned long index = 1UL << order;
99257+#endif
99258+
99259 VM_BUG_ON_PAGE(PageTail(page), page);
99260 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
99261
99262@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99263 debug_check_no_obj_freed(page_address(page),
99264 PAGE_SIZE << order);
99265 }
99266+
99267+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99268+ for (; index; --index)
99269+ sanitize_highpage(page + index - 1);
99270+#endif
99271+
99272 arch_free_page(page, order);
99273 kernel_map_pages(page, 1 << order, 0);
99274
99275@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
99276 local_irq_restore(flags);
99277 }
99278
99279+#ifdef CONFIG_PAX_LATENT_ENTROPY
99280+bool __meminitdata extra_latent_entropy;
99281+
99282+static int __init setup_pax_extra_latent_entropy(char *str)
99283+{
99284+ extra_latent_entropy = true;
99285+ return 0;
99286+}
99287+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
99288+
99289+volatile u64 latent_entropy __latent_entropy;
99290+EXPORT_SYMBOL(latent_entropy);
99291+#endif
99292+
99293 void __init __free_pages_bootmem(struct page *page, unsigned int order)
99294 {
99295 unsigned int nr_pages = 1 << order;
99296@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
99297 __ClearPageReserved(p);
99298 set_page_count(p, 0);
99299
99300+#ifdef CONFIG_PAX_LATENT_ENTROPY
99301+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
99302+ u64 hash = 0;
99303+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
99304+ const u64 *data = lowmem_page_address(page);
99305+
99306+ for (index = 0; index < end; index++)
99307+ hash ^= hash + data[index];
99308+ latent_entropy ^= hash;
99309+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
99310+ }
99311+#endif
99312+
99313 page_zone(page)->managed_pages += nr_pages;
99314 set_page_refcounted(page);
99315 __free_pages(page, order);
99316@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
99317 kernel_map_pages(page, 1 << order, 1);
99318 kasan_alloc_pages(page, order);
99319
99320+#ifndef CONFIG_PAX_MEMORY_SANITIZE
99321 if (gfp_flags & __GFP_ZERO)
99322 prep_zero_page(page, order, gfp_flags);
99323+#endif
99324
99325 if (order && (gfp_flags & __GFP_COMP))
99326 prep_compound_page(page, order);
99327@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
99328 }
99329
99330 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
99331- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99332+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99333 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
99334 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
99335
99336@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
99337 do {
99338 mod_zone_page_state(zone, NR_ALLOC_BATCH,
99339 high_wmark_pages(zone) - low_wmark_pages(zone) -
99340- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99341+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99342 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
99343 } while (zone++ != preferred_zone);
99344 }
99345@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
99346
99347 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
99348 high_wmark_pages(zone) - low_wmark_pages(zone) -
99349- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99350+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99351
99352 setup_zone_migrate_reserve(zone);
99353 spin_unlock_irqrestore(&zone->lock, flags);
99354diff --git a/mm/percpu.c b/mm/percpu.c
99355index 73c97a5..508ee25 100644
99356--- a/mm/percpu.c
99357+++ b/mm/percpu.c
99358@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
99359 static unsigned int pcpu_high_unit_cpu __read_mostly;
99360
99361 /* the address of the first chunk which starts with the kernel static area */
99362-void *pcpu_base_addr __read_mostly;
99363+void *pcpu_base_addr __read_only;
99364 EXPORT_SYMBOL_GPL(pcpu_base_addr);
99365
99366 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
99367diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
99368index b159769..d07037f 100644
99369--- a/mm/process_vm_access.c
99370+++ b/mm/process_vm_access.c
99371@@ -13,6 +13,7 @@
99372 #include <linux/uio.h>
99373 #include <linux/sched.h>
99374 #include <linux/highmem.h>
99375+#include <linux/security.h>
99376 #include <linux/ptrace.h>
99377 #include <linux/slab.h>
99378 #include <linux/syscalls.h>
99379@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99380 ssize_t iov_len;
99381 size_t total_len = iov_iter_count(iter);
99382
99383+ return -ENOSYS; // PaX: until properly audited
99384+
99385 /*
99386 * Work out how many pages of struct pages we're going to need
99387 * when eventually calling get_user_pages
99388 */
99389 for (i = 0; i < riovcnt; i++) {
99390 iov_len = rvec[i].iov_len;
99391- if (iov_len > 0) {
99392- nr_pages_iov = ((unsigned long)rvec[i].iov_base
99393- + iov_len)
99394- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
99395- / PAGE_SIZE + 1;
99396- nr_pages = max(nr_pages, nr_pages_iov);
99397- }
99398+ if (iov_len <= 0)
99399+ continue;
99400+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
99401+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
99402+ nr_pages = max(nr_pages, nr_pages_iov);
99403 }
99404
99405 if (nr_pages == 0)
99406@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99407 goto free_proc_pages;
99408 }
99409
99410+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
99411+ rc = -EPERM;
99412+ goto put_task_struct;
99413+ }
99414+
99415 mm = mm_access(task, PTRACE_MODE_ATTACH);
99416 if (!mm || IS_ERR(mm)) {
99417 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
99418diff --git a/mm/rmap.c b/mm/rmap.c
99419index c161a14..8a069bb 100644
99420--- a/mm/rmap.c
99421+++ b/mm/rmap.c
99422@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99423 struct anon_vma *anon_vma = vma->anon_vma;
99424 struct anon_vma_chain *avc;
99425
99426+#ifdef CONFIG_PAX_SEGMEXEC
99427+ struct anon_vma_chain *avc_m = NULL;
99428+#endif
99429+
99430 might_sleep();
99431 if (unlikely(!anon_vma)) {
99432 struct mm_struct *mm = vma->vm_mm;
99433@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99434 if (!avc)
99435 goto out_enomem;
99436
99437+#ifdef CONFIG_PAX_SEGMEXEC
99438+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
99439+ if (!avc_m)
99440+ goto out_enomem_free_avc;
99441+#endif
99442+
99443 anon_vma = find_mergeable_anon_vma(vma);
99444 allocated = NULL;
99445 if (!anon_vma) {
99446@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99447 /* page_table_lock to protect against threads */
99448 spin_lock(&mm->page_table_lock);
99449 if (likely(!vma->anon_vma)) {
99450+
99451+#ifdef CONFIG_PAX_SEGMEXEC
99452+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
99453+
99454+ if (vma_m) {
99455+ BUG_ON(vma_m->anon_vma);
99456+ vma_m->anon_vma = anon_vma;
99457+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
99458+ anon_vma->degree++;
99459+ avc_m = NULL;
99460+ }
99461+#endif
99462+
99463 vma->anon_vma = anon_vma;
99464 anon_vma_chain_link(vma, avc, anon_vma);
99465 /* vma reference or self-parent link for new root */
99466@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99467
99468 if (unlikely(allocated))
99469 put_anon_vma(allocated);
99470+
99471+#ifdef CONFIG_PAX_SEGMEXEC
99472+ if (unlikely(avc_m))
99473+ anon_vma_chain_free(avc_m);
99474+#endif
99475+
99476 if (unlikely(avc))
99477 anon_vma_chain_free(avc);
99478 }
99479 return 0;
99480
99481 out_enomem_free_avc:
99482+
99483+#ifdef CONFIG_PAX_SEGMEXEC
99484+ if (avc_m)
99485+ anon_vma_chain_free(avc_m);
99486+#endif
99487+
99488 anon_vma_chain_free(avc);
99489 out_enomem:
99490 return -ENOMEM;
99491@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
99492 * good chance of avoiding scanning the whole hierarchy when it searches where
99493 * page is mapped.
99494 */
99495-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99496+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
99497 {
99498 struct anon_vma_chain *avc, *pavc;
99499 struct anon_vma *root = NULL;
99500@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99501 * the corresponding VMA in the parent process is attached to.
99502 * Returns 0 on success, non-zero on failure.
99503 */
99504-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
99505+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
99506 {
99507 struct anon_vma_chain *avc;
99508 struct anon_vma *anon_vma;
99509@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
99510 void __init anon_vma_init(void)
99511 {
99512 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
99513- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
99514- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
99515+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
99516+ anon_vma_ctor);
99517+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
99518+ SLAB_PANIC|SLAB_NO_SANITIZE);
99519 }
99520
99521 /*
99522diff --git a/mm/shmem.c b/mm/shmem.c
99523index cf2d0ca..ec06b8b 100644
99524--- a/mm/shmem.c
99525+++ b/mm/shmem.c
99526@@ -33,7 +33,7 @@
99527 #include <linux/swap.h>
99528 #include <linux/aio.h>
99529
99530-static struct vfsmount *shm_mnt;
99531+struct vfsmount *shm_mnt;
99532
99533 #ifdef CONFIG_SHMEM
99534 /*
99535@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
99536 #define BOGO_DIRENT_SIZE 20
99537
99538 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99539-#define SHORT_SYMLINK_LEN 128
99540+#define SHORT_SYMLINK_LEN 64
99541
99542 /*
99543 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99544@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
99545 static int shmem_xattr_validate(const char *name)
99546 {
99547 struct { const char *prefix; size_t len; } arr[] = {
99548+
99549+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99550+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
99551+#endif
99552+
99553 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
99554 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
99555 };
99556@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
99557 if (err)
99558 return err;
99559
99560+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99561+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
99562+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
99563+ return -EOPNOTSUPP;
99564+ if (size > 8)
99565+ return -EINVAL;
99566+ }
99567+#endif
99568+
99569 return simple_xattr_set(&info->xattrs, name, value, size, flags);
99570 }
99571
99572@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
99573 int err = -ENOMEM;
99574
99575 /* Round up to L1_CACHE_BYTES to resist false sharing */
99576- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
99577- L1_CACHE_BYTES), GFP_KERNEL);
99578+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
99579 if (!sbinfo)
99580 return -ENOMEM;
99581
99582diff --git a/mm/slab.c b/mm/slab.c
99583index c4b89ea..20990be 100644
99584--- a/mm/slab.c
99585+++ b/mm/slab.c
99586@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99587 if ((x)->max_freeable < i) \
99588 (x)->max_freeable = i; \
99589 } while (0)
99590-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
99591-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
99592-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
99593-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
99594+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
99595+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
99596+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99597+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99598+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99599+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99600 #else
99601 #define STATS_INC_ACTIVE(x) do { } while (0)
99602 #define STATS_DEC_ACTIVE(x) do { } while (0)
99603@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99604 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99605 #define STATS_INC_FREEHIT(x) do { } while (0)
99606 #define STATS_INC_FREEMISS(x) do { } while (0)
99607+#define STATS_INC_SANITIZED(x) do { } while (0)
99608+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99609 #endif
99610
99611 #if DEBUG
99612@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99613 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99614 */
99615 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99616- const struct page *page, void *obj)
99617+ const struct page *page, const void *obj)
99618 {
99619 u32 offset = (obj - page->s_mem);
99620 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99621@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
99622 * structures first. Without this, further allocations will bug.
99623 */
99624 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
99625- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99626+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99627 slab_state = PARTIAL_NODE;
99628
99629 slab_early_init = 0;
99630@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99631
99632 cachep = find_mergeable(size, align, flags, name, ctor);
99633 if (cachep) {
99634- cachep->refcount++;
99635+ atomic_inc(&cachep->refcount);
99636
99637 /*
99638 * Adjust the object sizes so that we clear
99639@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99640 struct array_cache *ac = cpu_cache_get(cachep);
99641
99642 check_irq_off();
99643+
99644+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99645+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99646+ STATS_INC_NOT_SANITIZED(cachep);
99647+ else {
99648+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99649+
99650+ if (cachep->ctor)
99651+ cachep->ctor(objp);
99652+
99653+ STATS_INC_SANITIZED(cachep);
99654+ }
99655+#endif
99656+
99657 kmemleak_free_recursive(objp, cachep->flags);
99658 objp = cache_free_debugcheck(cachep, objp, caller);
99659
99660@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
99661 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
99662 }
99663
99664-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99665+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99666 {
99667 return __do_kmalloc_node(size, flags, node, _RET_IP_);
99668 }
99669@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
99670 * @flags: the type of memory to allocate (see kmalloc).
99671 * @caller: function caller for debug tracking of the caller
99672 */
99673-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
99674+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
99675 unsigned long caller)
99676 {
99677 struct kmem_cache *cachep;
99678@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
99679
99680 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99681 return;
99682+ VM_BUG_ON(!virt_addr_valid(objp));
99683 local_irq_save(flags);
99684 kfree_debugcheck(objp);
99685 c = virt_to_cache(objp);
99686@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99687 }
99688 /* cpu stats */
99689 {
99690- unsigned long allochit = atomic_read(&cachep->allochit);
99691- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99692- unsigned long freehit = atomic_read(&cachep->freehit);
99693- unsigned long freemiss = atomic_read(&cachep->freemiss);
99694+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99695+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99696+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99697+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99698
99699 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99700 allochit, allocmiss, freehit, freemiss);
99701 }
99702+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99703+ {
99704+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99705+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99706+
99707+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99708+ }
99709+#endif
99710 #endif
99711 }
99712
99713@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
99714 static int __init slab_proc_init(void)
99715 {
99716 #ifdef CONFIG_DEBUG_SLAB_LEAK
99717- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99718+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99719 #endif
99720 return 0;
99721 }
99722 module_init(slab_proc_init);
99723 #endif
99724
99725+bool is_usercopy_object(const void *ptr)
99726+{
99727+ struct page *page;
99728+ struct kmem_cache *cachep;
99729+
99730+ if (ZERO_OR_NULL_PTR(ptr))
99731+ return false;
99732+
99733+ if (!slab_is_available())
99734+ return false;
99735+
99736+ if (!virt_addr_valid(ptr))
99737+ return false;
99738+
99739+ page = virt_to_head_page(ptr);
99740+
99741+ if (!PageSlab(page))
99742+ return false;
99743+
99744+ cachep = page->slab_cache;
99745+ return cachep->flags & SLAB_USERCOPY;
99746+}
99747+
99748+#ifdef CONFIG_PAX_USERCOPY
99749+const char *check_heap_object(const void *ptr, unsigned long n)
99750+{
99751+ struct page *page;
99752+ struct kmem_cache *cachep;
99753+ unsigned int objnr;
99754+ unsigned long offset;
99755+
99756+ if (ZERO_OR_NULL_PTR(ptr))
99757+ return "<null>";
99758+
99759+ if (!virt_addr_valid(ptr))
99760+ return NULL;
99761+
99762+ page = virt_to_head_page(ptr);
99763+
99764+ if (!PageSlab(page))
99765+ return NULL;
99766+
99767+ cachep = page->slab_cache;
99768+ if (!(cachep->flags & SLAB_USERCOPY))
99769+ return cachep->name;
99770+
99771+ objnr = obj_to_index(cachep, page, ptr);
99772+ BUG_ON(objnr >= cachep->num);
99773+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99774+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99775+ return NULL;
99776+
99777+ return cachep->name;
99778+}
99779+#endif
99780+
99781 /**
99782 * ksize - get the actual amount of memory allocated for a given object
99783 * @objp: Pointer to the object
99784diff --git a/mm/slab.h b/mm/slab.h
99785index 4c3ac12..7b2e470 100644
99786--- a/mm/slab.h
99787+++ b/mm/slab.h
99788@@ -22,7 +22,7 @@ struct kmem_cache {
99789 unsigned int align; /* Alignment as calculated */
99790 unsigned long flags; /* Active flags on the slab */
99791 const char *name; /* Slab name for sysfs */
99792- int refcount; /* Use counter */
99793+ atomic_t refcount; /* Use counter */
99794 void (*ctor)(void *); /* Called on object slot creation */
99795 struct list_head list; /* List of all slab caches on the system */
99796 };
99797@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99798 /* The slab cache that manages slab cache information */
99799 extern struct kmem_cache *kmem_cache;
99800
99801+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99802+#ifdef CONFIG_X86_64
99803+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99804+#else
99805+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99806+#endif
99807+enum pax_sanitize_mode {
99808+ PAX_SANITIZE_SLAB_OFF = 0,
99809+ PAX_SANITIZE_SLAB_FAST,
99810+ PAX_SANITIZE_SLAB_FULL,
99811+};
99812+extern enum pax_sanitize_mode pax_sanitize_slab;
99813+#endif
99814+
99815 unsigned long calculate_alignment(unsigned long flags,
99816 unsigned long align, unsigned long size);
99817
99818@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99819
99820 /* Legal flag mask for kmem_cache_create(), for various configurations */
99821 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99822- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99823+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99824+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99825
99826 #if defined(CONFIG_DEBUG_SLAB)
99827 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99828@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99829 return s;
99830
99831 page = virt_to_head_page(x);
99832+
99833+ BUG_ON(!PageSlab(page));
99834+
99835 cachep = page->slab_cache;
99836 if (slab_equal_or_root(cachep, s))
99837 return cachep;
99838diff --git a/mm/slab_common.c b/mm/slab_common.c
99839index 999bb34..9843aea 100644
99840--- a/mm/slab_common.c
99841+++ b/mm/slab_common.c
99842@@ -25,11 +25,35 @@
99843
99844 #include "slab.h"
99845
99846-enum slab_state slab_state;
99847+enum slab_state slab_state __read_only;
99848 LIST_HEAD(slab_caches);
99849 DEFINE_MUTEX(slab_mutex);
99850 struct kmem_cache *kmem_cache;
99851
99852+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99853+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99854+static int __init pax_sanitize_slab_setup(char *str)
99855+{
99856+ if (!str)
99857+ return 0;
99858+
99859+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99860+ pr_info("PaX slab sanitization: %s\n", "disabled");
99861+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99862+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99863+ pr_info("PaX slab sanitization: %s\n", "fast");
99864+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99865+ } else if (!strcmp(str, "full")) {
99866+ pr_info("PaX slab sanitization: %s\n", "full");
99867+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99868+ } else
99869+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99870+
99871+ return 0;
99872+}
99873+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99874+#endif
99875+
99876 /*
99877 * Set of flags that will prevent slab merging
99878 */
99879@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99880 * Merge control. If this is set then no merging of slab caches will occur.
99881 * (Could be removed. This was introduced to pacify the merge skeptics.)
99882 */
99883-static int slab_nomerge;
99884+static int slab_nomerge = 1;
99885
99886 static int __init setup_slab_nomerge(char *str)
99887 {
99888@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
99889 /*
99890 * We may have set a slab to be unmergeable during bootstrap.
99891 */
99892- if (s->refcount < 0)
99893+ if (atomic_read(&s->refcount) < 0)
99894 return 1;
99895
99896 return 0;
99897@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
99898 if (err)
99899 goto out_free_cache;
99900
99901- s->refcount = 1;
99902+ atomic_set(&s->refcount, 1);
99903 list_add(&s->list, &slab_caches);
99904 out:
99905 if (err)
99906@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99907 */
99908 flags &= CACHE_CREATE_MASK;
99909
99910+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99911+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99912+ flags |= SLAB_NO_SANITIZE;
99913+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99914+ flags &= ~SLAB_NO_SANITIZE;
99915+#endif
99916+
99917 s = __kmem_cache_alias(name, size, align, flags, ctor);
99918 if (s)
99919 goto out_unlock;
99920@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
99921 rcu_barrier();
99922
99923 list_for_each_entry_safe(s, s2, release, list) {
99924-#ifdef SLAB_SUPPORTS_SYSFS
99925+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99926 sysfs_slab_remove(s);
99927 #else
99928 slab_kmem_cache_release(s);
99929@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99930
99931 mutex_lock(&slab_mutex);
99932
99933- s->refcount--;
99934- if (s->refcount)
99935+ if (!atomic_dec_and_test(&s->refcount))
99936 goto out_unlock;
99937
99938 for_each_memcg_cache_safe(c, c2, s) {
99939@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99940 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99941 name, size, err);
99942
99943- s->refcount = -1; /* Exempt from merging for now */
99944+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99945 }
99946
99947 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99948@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99949
99950 create_boot_cache(s, name, size, flags);
99951 list_add(&s->list, &slab_caches);
99952- s->refcount = 1;
99953+ atomic_set(&s->refcount, 1);
99954 return s;
99955 }
99956
99957@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99958 EXPORT_SYMBOL(kmalloc_dma_caches);
99959 #endif
99960
99961+#ifdef CONFIG_PAX_USERCOPY_SLABS
99962+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99963+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99964+#endif
99965+
99966 /*
99967 * Conversion table for small slabs sizes / 8 to the index in the
99968 * kmalloc array. This is necessary for slabs < 192 since we have non power
99969@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99970 return kmalloc_dma_caches[index];
99971
99972 #endif
99973+
99974+#ifdef CONFIG_PAX_USERCOPY_SLABS
99975+ if (unlikely((flags & GFP_USERCOPY)))
99976+ return kmalloc_usercopy_caches[index];
99977+
99978+#endif
99979+
99980 return kmalloc_caches[index];
99981 }
99982
99983@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99984 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99985 if (!kmalloc_caches[i]) {
99986 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99987- 1 << i, flags);
99988+ 1 << i, SLAB_USERCOPY | flags);
99989 }
99990
99991 /*
99992@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99993 * earlier power of two caches
99994 */
99995 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99996- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99997+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99998
99999 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
100000- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
100001+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
100002 }
100003
100004 /* Kmalloc array is now usable */
100005@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
100006 }
100007 }
100008 #endif
100009+
100010+#ifdef CONFIG_PAX_USERCOPY_SLABS
100011+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
100012+ struct kmem_cache *s = kmalloc_caches[i];
100013+
100014+ if (s) {
100015+ int size = kmalloc_size(i);
100016+ char *n = kasprintf(GFP_NOWAIT,
100017+ "usercopy-kmalloc-%d", size);
100018+
100019+ BUG_ON(!n);
100020+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
100021+ size, SLAB_USERCOPY | flags);
100022+ }
100023+ }
100024+#endif
100025+
100026 }
100027 #endif /* !CONFIG_SLOB */
100028
100029@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
100030 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
100031 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
100032 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
100033+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100034+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
100035+#endif
100036 #endif
100037 seq_putc(m, '\n');
100038 }
100039@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
100040 module_init(slab_proc_init);
100041 #endif /* CONFIG_SLABINFO */
100042
100043-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
100044+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
100045 gfp_t flags)
100046 {
100047 void *ret;
100048diff --git a/mm/slob.c b/mm/slob.c
100049index 94a7fed..cf3fb1a 100644
100050--- a/mm/slob.c
100051+++ b/mm/slob.c
100052@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
100053 /*
100054 * Return the size of a slob block.
100055 */
100056-static slobidx_t slob_units(slob_t *s)
100057+static slobidx_t slob_units(const slob_t *s)
100058 {
100059 if (s->units > 0)
100060 return s->units;
100061@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
100062 /*
100063 * Return the next free slob block pointer after this one.
100064 */
100065-static slob_t *slob_next(slob_t *s)
100066+static slob_t *slob_next(const slob_t *s)
100067 {
100068 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
100069 slobidx_t next;
100070@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
100071 /*
100072 * Returns true if s is the last free block in its page.
100073 */
100074-static int slob_last(slob_t *s)
100075+static int slob_last(const slob_t *s)
100076 {
100077 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
100078 }
100079
100080-static void *slob_new_pages(gfp_t gfp, int order, int node)
100081+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
100082 {
100083- void *page;
100084+ struct page *page;
100085
100086 #ifdef CONFIG_NUMA
100087 if (node != NUMA_NO_NODE)
100088@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
100089 if (!page)
100090 return NULL;
100091
100092- return page_address(page);
100093+ __SetPageSlab(page);
100094+ return page;
100095 }
100096
100097-static void slob_free_pages(void *b, int order)
100098+static void slob_free_pages(struct page *sp, int order)
100099 {
100100 if (current->reclaim_state)
100101 current->reclaim_state->reclaimed_slab += 1 << order;
100102- free_pages((unsigned long)b, order);
100103+ __ClearPageSlab(sp);
100104+ page_mapcount_reset(sp);
100105+ sp->private = 0;
100106+ __free_pages(sp, order);
100107 }
100108
100109 /*
100110@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100111
100112 /* Not enough space: must allocate a new page */
100113 if (!b) {
100114- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100115- if (!b)
100116+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100117+ if (!sp)
100118 return NULL;
100119- sp = virt_to_page(b);
100120- __SetPageSlab(sp);
100121+ b = page_address(sp);
100122
100123 spin_lock_irqsave(&slob_lock, flags);
100124 sp->units = SLOB_UNITS(PAGE_SIZE);
100125 sp->freelist = b;
100126+ sp->private = 0;
100127 INIT_LIST_HEAD(&sp->lru);
100128 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
100129 set_slob_page_free(sp, slob_list);
100130@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100131 /*
100132 * slob_free: entry point into the slob allocator.
100133 */
100134-static void slob_free(void *block, int size)
100135+static void slob_free(struct kmem_cache *c, void *block, int size)
100136 {
100137 struct page *sp;
100138 slob_t *prev, *next, *b = (slob_t *)block;
100139@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
100140 if (slob_page_free(sp))
100141 clear_slob_page_free(sp);
100142 spin_unlock_irqrestore(&slob_lock, flags);
100143- __ClearPageSlab(sp);
100144- page_mapcount_reset(sp);
100145- slob_free_pages(b, 0);
100146+ slob_free_pages(sp, 0);
100147 return;
100148 }
100149
100150+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100151+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
100152+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
100153+#endif
100154+
100155 if (!slob_page_free(sp)) {
100156 /* This slob page is about to become partially free. Easy! */
100157 sp->units = units;
100158@@ -424,11 +431,10 @@ out:
100159 */
100160
100161 static __always_inline void *
100162-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100163+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
100164 {
100165- unsigned int *m;
100166- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100167- void *ret;
100168+ slob_t *m;
100169+ void *ret = NULL;
100170
100171 gfp &= gfp_allowed_mask;
100172
100173@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100174
100175 if (!m)
100176 return NULL;
100177- *m = size;
100178+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
100179+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
100180+ m[0].units = size;
100181+ m[1].units = align;
100182 ret = (void *)m + align;
100183
100184 trace_kmalloc_node(caller, ret,
100185 size, size + align, gfp, node);
100186 } else {
100187 unsigned int order = get_order(size);
100188+ struct page *page;
100189
100190 if (likely(order))
100191 gfp |= __GFP_COMP;
100192- ret = slob_new_pages(gfp, order, node);
100193+ page = slob_new_pages(gfp, order, node);
100194+ if (page) {
100195+ ret = page_address(page);
100196+ page->private = size;
100197+ }
100198
100199 trace_kmalloc_node(caller, ret,
100200 size, PAGE_SIZE << order, gfp, node);
100201 }
100202
100203- kmemleak_alloc(ret, size, 1, gfp);
100204 return ret;
100205 }
100206
100207-void *__kmalloc(size_t size, gfp_t gfp)
100208+static __always_inline void *
100209+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100210+{
100211+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100212+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
100213+
100214+ if (!ZERO_OR_NULL_PTR(ret))
100215+ kmemleak_alloc(ret, size, 1, gfp);
100216+ return ret;
100217+}
100218+
100219+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
100220 {
100221 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
100222 }
100223@@ -491,34 +515,112 @@ void kfree(const void *block)
100224 return;
100225 kmemleak_free(block);
100226
100227+ VM_BUG_ON(!virt_addr_valid(block));
100228 sp = virt_to_page(block);
100229- if (PageSlab(sp)) {
100230+ VM_BUG_ON(!PageSlab(sp));
100231+ if (!sp->private) {
100232 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100233- unsigned int *m = (unsigned int *)(block - align);
100234- slob_free(m, *m + align);
100235- } else
100236+ slob_t *m = (slob_t *)(block - align);
100237+ slob_free(NULL, m, m[0].units + align);
100238+ } else {
100239+ __ClearPageSlab(sp);
100240+ page_mapcount_reset(sp);
100241+ sp->private = 0;
100242 __free_pages(sp, compound_order(sp));
100243+ }
100244 }
100245 EXPORT_SYMBOL(kfree);
100246
100247+bool is_usercopy_object(const void *ptr)
100248+{
100249+ if (!slab_is_available())
100250+ return false;
100251+
100252+ // PAX: TODO
100253+
100254+ return false;
100255+}
100256+
100257+#ifdef CONFIG_PAX_USERCOPY
100258+const char *check_heap_object(const void *ptr, unsigned long n)
100259+{
100260+ struct page *page;
100261+ const slob_t *free;
100262+ const void *base;
100263+ unsigned long flags;
100264+
100265+ if (ZERO_OR_NULL_PTR(ptr))
100266+ return "<null>";
100267+
100268+ if (!virt_addr_valid(ptr))
100269+ return NULL;
100270+
100271+ page = virt_to_head_page(ptr);
100272+ if (!PageSlab(page))
100273+ return NULL;
100274+
100275+ if (page->private) {
100276+ base = page;
100277+ if (base <= ptr && n <= page->private - (ptr - base))
100278+ return NULL;
100279+ return "<slob>";
100280+ }
100281+
100282+ /* some tricky double walking to find the chunk */
100283+ spin_lock_irqsave(&slob_lock, flags);
100284+ base = (void *)((unsigned long)ptr & PAGE_MASK);
100285+ free = page->freelist;
100286+
100287+ while (!slob_last(free) && (void *)free <= ptr) {
100288+ base = free + slob_units(free);
100289+ free = slob_next(free);
100290+ }
100291+
100292+ while (base < (void *)free) {
100293+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
100294+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
100295+ int offset;
100296+
100297+ if (ptr < base + align)
100298+ break;
100299+
100300+ offset = ptr - base - align;
100301+ if (offset >= m) {
100302+ base += size;
100303+ continue;
100304+ }
100305+
100306+ if (n > m - offset)
100307+ break;
100308+
100309+ spin_unlock_irqrestore(&slob_lock, flags);
100310+ return NULL;
100311+ }
100312+
100313+ spin_unlock_irqrestore(&slob_lock, flags);
100314+ return "<slob>";
100315+}
100316+#endif
100317+
100318 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
100319 size_t ksize(const void *block)
100320 {
100321 struct page *sp;
100322 int align;
100323- unsigned int *m;
100324+ slob_t *m;
100325
100326 BUG_ON(!block);
100327 if (unlikely(block == ZERO_SIZE_PTR))
100328 return 0;
100329
100330 sp = virt_to_page(block);
100331- if (unlikely(!PageSlab(sp)))
100332- return PAGE_SIZE << compound_order(sp);
100333+ VM_BUG_ON(!PageSlab(sp));
100334+ if (sp->private)
100335+ return sp->private;
100336
100337 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100338- m = (unsigned int *)(block - align);
100339- return SLOB_UNITS(*m) * SLOB_UNIT;
100340+ m = (slob_t *)(block - align);
100341+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
100342 }
100343 EXPORT_SYMBOL(ksize);
100344
100345@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
100346
100347 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
100348 {
100349- void *b;
100350+ void *b = NULL;
100351
100352 flags &= gfp_allowed_mask;
100353
100354 lockdep_trace_alloc(flags);
100355
100356+#ifdef CONFIG_PAX_USERCOPY_SLABS
100357+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
100358+#else
100359 if (c->size < PAGE_SIZE) {
100360 b = slob_alloc(c->size, flags, c->align, node);
100361 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100362 SLOB_UNITS(c->size) * SLOB_UNIT,
100363 flags, node);
100364 } else {
100365- b = slob_new_pages(flags, get_order(c->size), node);
100366+ struct page *sp;
100367+
100368+ sp = slob_new_pages(flags, get_order(c->size), node);
100369+ if (sp) {
100370+ b = page_address(sp);
100371+ sp->private = c->size;
100372+ }
100373 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100374 PAGE_SIZE << get_order(c->size),
100375 flags, node);
100376 }
100377+#endif
100378
100379 if (b && c->ctor)
100380 c->ctor(b);
100381@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
100382 EXPORT_SYMBOL(kmem_cache_alloc);
100383
100384 #ifdef CONFIG_NUMA
100385-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
100386+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
100387 {
100388 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
100389 }
100390@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
100391 EXPORT_SYMBOL(kmem_cache_alloc_node);
100392 #endif
100393
100394-static void __kmem_cache_free(void *b, int size)
100395+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
100396 {
100397- if (size < PAGE_SIZE)
100398- slob_free(b, size);
100399+ struct page *sp;
100400+
100401+ sp = virt_to_page(b);
100402+ BUG_ON(!PageSlab(sp));
100403+ if (!sp->private)
100404+ slob_free(c, b, size);
100405 else
100406- slob_free_pages(b, get_order(size));
100407+ slob_free_pages(sp, get_order(size));
100408 }
100409
100410 static void kmem_rcu_free(struct rcu_head *head)
100411@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
100412 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
100413 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
100414
100415- __kmem_cache_free(b, slob_rcu->size);
100416+ __kmem_cache_free(NULL, b, slob_rcu->size);
100417 }
100418
100419 void kmem_cache_free(struct kmem_cache *c, void *b)
100420 {
100421+ int size = c->size;
100422+
100423+#ifdef CONFIG_PAX_USERCOPY_SLABS
100424+ if (size + c->align < PAGE_SIZE) {
100425+ size += c->align;
100426+ b -= c->align;
100427+ }
100428+#endif
100429+
100430 kmemleak_free_recursive(b, c->flags);
100431 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
100432 struct slob_rcu *slob_rcu;
100433- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
100434- slob_rcu->size = c->size;
100435+ slob_rcu = b + (size - sizeof(struct slob_rcu));
100436+ slob_rcu->size = size;
100437 call_rcu(&slob_rcu->head, kmem_rcu_free);
100438 } else {
100439- __kmem_cache_free(b, c->size);
100440+ __kmem_cache_free(c, b, size);
100441 }
100442
100443+#ifdef CONFIG_PAX_USERCOPY_SLABS
100444+ trace_kfree(_RET_IP_, b);
100445+#else
100446 trace_kmem_cache_free(_RET_IP_, b);
100447+#endif
100448+
100449 }
100450 EXPORT_SYMBOL(kmem_cache_free);
100451
100452diff --git a/mm/slub.c b/mm/slub.c
100453index 82c4737..55c316a 100644
100454--- a/mm/slub.c
100455+++ b/mm/slub.c
100456@@ -198,7 +198,7 @@ struct track {
100457
100458 enum track_item { TRACK_ALLOC, TRACK_FREE };
100459
100460-#ifdef CONFIG_SYSFS
100461+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100462 static int sysfs_slab_add(struct kmem_cache *);
100463 static int sysfs_slab_alias(struct kmem_cache *, const char *);
100464 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
100465@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
100466 if (!t->addr)
100467 return;
100468
100469- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
100470+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
100471 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
100472 #ifdef CONFIG_STACKTRACE
100473 {
100474@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
100475
100476 slab_free_hook(s, x);
100477
100478+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100479+ if (!(s->flags & SLAB_NO_SANITIZE)) {
100480+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
100481+ if (s->ctor)
100482+ s->ctor(x);
100483+ }
100484+#endif
100485+
100486 redo:
100487 /*
100488 * Determine the currently cpus per cpu slab.
100489@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
100490 s->inuse = size;
100491
100492 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
100493+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100494+ (!(flags & SLAB_NO_SANITIZE)) ||
100495+#endif
100496 s->ctor)) {
100497 /*
100498 * Relocate free pointer after the object if it is not
100499@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
100500
100501 __setup("slub_min_objects=", setup_slub_min_objects);
100502
100503-void *__kmalloc(size_t size, gfp_t flags)
100504+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
100505 {
100506 struct kmem_cache *s;
100507 void *ret;
100508@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
100509 return ptr;
100510 }
100511
100512-void *__kmalloc_node(size_t size, gfp_t flags, int node)
100513+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
100514 {
100515 struct kmem_cache *s;
100516 void *ret;
100517@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
100518 return slab_ksize(page->slab_cache);
100519 }
100520
100521+bool is_usercopy_object(const void *ptr)
100522+{
100523+ struct page *page;
100524+ struct kmem_cache *s;
100525+
100526+ if (ZERO_OR_NULL_PTR(ptr))
100527+ return false;
100528+
100529+ if (!slab_is_available())
100530+ return false;
100531+
100532+ if (!virt_addr_valid(ptr))
100533+ return false;
100534+
100535+ page = virt_to_head_page(ptr);
100536+
100537+ if (!PageSlab(page))
100538+ return false;
100539+
100540+ s = page->slab_cache;
100541+ return s->flags & SLAB_USERCOPY;
100542+}
100543+
100544+#ifdef CONFIG_PAX_USERCOPY
100545+const char *check_heap_object(const void *ptr, unsigned long n)
100546+{
100547+ struct page *page;
100548+ struct kmem_cache *s;
100549+ unsigned long offset;
100550+
100551+ if (ZERO_OR_NULL_PTR(ptr))
100552+ return "<null>";
100553+
100554+ if (!virt_addr_valid(ptr))
100555+ return NULL;
100556+
100557+ page = virt_to_head_page(ptr);
100558+
100559+ if (!PageSlab(page))
100560+ return NULL;
100561+
100562+ s = page->slab_cache;
100563+ if (!(s->flags & SLAB_USERCOPY))
100564+ return s->name;
100565+
100566+ offset = (ptr - page_address(page)) % s->size;
100567+ if (offset <= s->object_size && n <= s->object_size - offset)
100568+ return NULL;
100569+
100570+ return s->name;
100571+}
100572+#endif
100573+
100574 size_t ksize(const void *object)
100575 {
100576 size_t size = __ksize(object);
100577@@ -3410,6 +3474,7 @@ void kfree(const void *x)
100578 if (unlikely(ZERO_OR_NULL_PTR(x)))
100579 return;
100580
100581+ VM_BUG_ON(!virt_addr_valid(x));
100582 page = virt_to_head_page(x);
100583 if (unlikely(!PageSlab(page))) {
100584 BUG_ON(!PageCompound(page));
100585@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100586
100587 s = find_mergeable(size, align, flags, name, ctor);
100588 if (s) {
100589- s->refcount++;
100590+ atomic_inc(&s->refcount);
100591
100592 /*
100593 * Adjust the object sizes so that we clear
100594@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100595 }
100596
100597 if (sysfs_slab_alias(s, name)) {
100598- s->refcount--;
100599+ atomic_dec(&s->refcount);
100600 s = NULL;
100601 }
100602 }
100603@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100604 }
100605 #endif
100606
100607-#ifdef CONFIG_SYSFS
100608+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100609 static int count_inuse(struct page *page)
100610 {
100611 return page->inuse;
100612@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100613 len += sprintf(buf + len, "%7ld ", l->count);
100614
100615 if (l->addr)
100616+#ifdef CONFIG_GRKERNSEC_HIDESYM
100617+ len += sprintf(buf + len, "%pS", NULL);
100618+#else
100619 len += sprintf(buf + len, "%pS", (void *)l->addr);
100620+#endif
100621 else
100622 len += sprintf(buf + len, "<not-available>");
100623
100624@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
100625 validate_slab_cache(kmalloc_caches[9]);
100626 }
100627 #else
100628-#ifdef CONFIG_SYSFS
100629+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100630 static void resiliency_test(void) {};
100631 #endif
100632 #endif
100633
100634-#ifdef CONFIG_SYSFS
100635+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100636 enum slab_stat_type {
100637 SL_ALL, /* All slabs */
100638 SL_PARTIAL, /* Only partially allocated slabs */
100639@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100640 {
100641 if (!s->ctor)
100642 return 0;
100643+#ifdef CONFIG_GRKERNSEC_HIDESYM
100644+ return sprintf(buf, "%pS\n", NULL);
100645+#else
100646 return sprintf(buf, "%pS\n", s->ctor);
100647+#endif
100648 }
100649 SLAB_ATTR_RO(ctor);
100650
100651 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100652 {
100653- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100654+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100655 }
100656 SLAB_ATTR_RO(aliases);
100657
100658@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100659 SLAB_ATTR_RO(cache_dma);
100660 #endif
100661
100662+#ifdef CONFIG_PAX_USERCOPY_SLABS
100663+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100664+{
100665+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100666+}
100667+SLAB_ATTR_RO(usercopy);
100668+#endif
100669+
100670+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100671+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100672+{
100673+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100674+}
100675+SLAB_ATTR_RO(sanitize);
100676+#endif
100677+
100678 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100679 {
100680 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100681@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
100682 * as well as cause other issues like converting a mergeable
100683 * cache into an umergeable one.
100684 */
100685- if (s->refcount > 1)
100686+ if (atomic_read(&s->refcount) > 1)
100687 return -EINVAL;
100688
100689 s->flags &= ~SLAB_TRACE;
100690@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
100691 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
100692 size_t length)
100693 {
100694- if (s->refcount > 1)
100695+ if (atomic_read(&s->refcount) > 1)
100696 return -EINVAL;
100697
100698 s->flags &= ~SLAB_FAILSLAB;
100699@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
100700 #ifdef CONFIG_ZONE_DMA
100701 &cache_dma_attr.attr,
100702 #endif
100703+#ifdef CONFIG_PAX_USERCOPY_SLABS
100704+ &usercopy_attr.attr,
100705+#endif
100706+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100707+ &sanitize_attr.attr,
100708+#endif
100709 #ifdef CONFIG_NUMA
100710 &remote_node_defrag_ratio_attr.attr,
100711 #endif
100712@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
100713 return name;
100714 }
100715
100716+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100717 static int sysfs_slab_add(struct kmem_cache *s)
100718 {
100719 int err;
100720@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100721 kobject_del(&s->kobj);
100722 kobject_put(&s->kobj);
100723 }
100724+#endif
100725
100726 /*
100727 * Need to buffer aliases during bootup until sysfs becomes
100728@@ -5243,6 +5340,7 @@ struct saved_alias {
100729
100730 static struct saved_alias *alias_list;
100731
100732+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100733 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100734 {
100735 struct saved_alias *al;
100736@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100737 alias_list = al;
100738 return 0;
100739 }
100740+#endif
100741
100742 static int __init slab_sysfs_init(void)
100743 {
100744diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100745index 4cba9c2..b4f9fcc 100644
100746--- a/mm/sparse-vmemmap.c
100747+++ b/mm/sparse-vmemmap.c
100748@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100749 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100750 if (!p)
100751 return NULL;
100752- pud_populate(&init_mm, pud, p);
100753+ pud_populate_kernel(&init_mm, pud, p);
100754 }
100755 return pud;
100756 }
100757@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100758 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100759 if (!p)
100760 return NULL;
100761- pgd_populate(&init_mm, pgd, p);
100762+ pgd_populate_kernel(&init_mm, pgd, p);
100763 }
100764 return pgd;
100765 }
100766diff --git a/mm/sparse.c b/mm/sparse.c
100767index d1b48b6..6e8590e 100644
100768--- a/mm/sparse.c
100769+++ b/mm/sparse.c
100770@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100771
100772 for (i = 0; i < PAGES_PER_SECTION; i++) {
100773 if (PageHWPoison(&memmap[i])) {
100774- atomic_long_sub(1, &num_poisoned_pages);
100775+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100776 ClearPageHWPoison(&memmap[i]);
100777 }
100778 }
100779diff --git a/mm/swap.c b/mm/swap.c
100780index cd3a5e6..40c0c8f 100644
100781--- a/mm/swap.c
100782+++ b/mm/swap.c
100783@@ -31,6 +31,7 @@
100784 #include <linux/memcontrol.h>
100785 #include <linux/gfp.h>
100786 #include <linux/uio.h>
100787+#include <linux/hugetlb.h>
100788
100789 #include "internal.h"
100790
100791@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100792
100793 __page_cache_release(page);
100794 dtor = get_compound_page_dtor(page);
100795+ if (!PageHuge(page))
100796+ BUG_ON(dtor != free_compound_page);
100797 (*dtor)(page);
100798 }
100799
100800diff --git a/mm/swapfile.c b/mm/swapfile.c
100801index 63f55cc..31874e6 100644
100802--- a/mm/swapfile.c
100803+++ b/mm/swapfile.c
100804@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100805
100806 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100807 /* Activity counter to indicate that a swapon or swapoff has occurred */
100808-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100809+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100810
100811 static inline unsigned char swap_count(unsigned char ent)
100812 {
100813@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100814 spin_unlock(&swap_lock);
100815
100816 err = 0;
100817- atomic_inc(&proc_poll_event);
100818+ atomic_inc_unchecked(&proc_poll_event);
100819 wake_up_interruptible(&proc_poll_wait);
100820
100821 out_dput:
100822@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100823
100824 poll_wait(file, &proc_poll_wait, wait);
100825
100826- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100827- seq->poll_event = atomic_read(&proc_poll_event);
100828+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100829+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100830 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100831 }
100832
100833@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100834 return ret;
100835
100836 seq = file->private_data;
100837- seq->poll_event = atomic_read(&proc_poll_event);
100838+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100839 return 0;
100840 }
100841
100842@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100843 (frontswap_map) ? "FS" : "");
100844
100845 mutex_unlock(&swapon_mutex);
100846- atomic_inc(&proc_poll_event);
100847+ atomic_inc_unchecked(&proc_poll_event);
100848 wake_up_interruptible(&proc_poll_wait);
100849
100850 if (S_ISREG(inode->i_mode))
100851diff --git a/mm/util.c b/mm/util.c
100852index 3981ae9..28b585b 100644
100853--- a/mm/util.c
100854+++ b/mm/util.c
100855@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100856 void arch_pick_mmap_layout(struct mm_struct *mm)
100857 {
100858 mm->mmap_base = TASK_UNMAPPED_BASE;
100859+
100860+#ifdef CONFIG_PAX_RANDMMAP
100861+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100862+ mm->mmap_base += mm->delta_mmap;
100863+#endif
100864+
100865 mm->get_unmapped_area = arch_get_unmapped_area;
100866 }
100867 #endif
100868@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100869 if (!mm->arg_end)
100870 goto out_mm; /* Shh! No looking before we're done */
100871
100872+ if (gr_acl_handle_procpidmem(task))
100873+ goto out_mm;
100874+
100875 len = mm->arg_end - mm->arg_start;
100876
100877 if (len > buflen)
100878diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100879index 49abccf..7bd1931 100644
100880--- a/mm/vmalloc.c
100881+++ b/mm/vmalloc.c
100882@@ -39,20 +39,65 @@ struct vfree_deferred {
100883 struct work_struct wq;
100884 };
100885 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100886+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100887+
100888+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100889+struct stack_deferred_llist {
100890+ struct llist_head list;
100891+ void *stack;
100892+ void *lowmem_stack;
100893+};
100894+
100895+struct stack_deferred {
100896+ struct stack_deferred_llist list;
100897+ struct work_struct wq;
100898+};
100899+
100900+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100901+#endif
100902
100903 static void __vunmap(const void *, int);
100904
100905-static void free_work(struct work_struct *w)
100906+static void vfree_work(struct work_struct *w)
100907 {
100908 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100909 struct llist_node *llnode = llist_del_all(&p->list);
100910 while (llnode) {
100911- void *p = llnode;
100912+ void *x = llnode;
100913 llnode = llist_next(llnode);
100914- __vunmap(p, 1);
100915+ __vunmap(x, 1);
100916 }
100917 }
100918
100919+static void vunmap_work(struct work_struct *w)
100920+{
100921+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100922+ struct llist_node *llnode = llist_del_all(&p->list);
100923+ while (llnode) {
100924+ void *x = llnode;
100925+ llnode = llist_next(llnode);
100926+ __vunmap(x, 0);
100927+ }
100928+}
100929+
100930+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100931+static void unmap_work(struct work_struct *w)
100932+{
100933+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100934+ struct llist_node *llnode = llist_del_all(&p->list.list);
100935+ while (llnode) {
100936+ struct stack_deferred_llist *x =
100937+ llist_entry((struct llist_head *)llnode,
100938+ struct stack_deferred_llist, list);
100939+ void *stack = ACCESS_ONCE(x->stack);
100940+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100941+ llnode = llist_next(llnode);
100942+ __vunmap(stack, 0);
100943+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100944+ }
100945+}
100946+#endif
100947+
100948 /*** Page table manipulation functions ***/
100949
100950 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100951@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100952
100953 pte = pte_offset_kernel(pmd, addr);
100954 do {
100955- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100956- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100957+
100958+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100959+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100960+ BUG_ON(!pte_exec(*pte));
100961+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100962+ continue;
100963+ }
100964+#endif
100965+
100966+ {
100967+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100968+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100969+ }
100970 } while (pte++, addr += PAGE_SIZE, addr != end);
100971 }
100972
100973@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100974 pte = pte_alloc_kernel(pmd, addr);
100975 if (!pte)
100976 return -ENOMEM;
100977+
100978+ pax_open_kernel();
100979 do {
100980 struct page *page = pages[*nr];
100981
100982- if (WARN_ON(!pte_none(*pte)))
100983+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100984+ if (pgprot_val(prot) & _PAGE_NX)
100985+#endif
100986+
100987+ if (!pte_none(*pte)) {
100988+ pax_close_kernel();
100989+ WARN_ON(1);
100990 return -EBUSY;
100991- if (WARN_ON(!page))
100992+ }
100993+ if (!page) {
100994+ pax_close_kernel();
100995+ WARN_ON(1);
100996 return -ENOMEM;
100997+ }
100998 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100999 (*nr)++;
101000 } while (pte++, addr += PAGE_SIZE, addr != end);
101001+ pax_close_kernel();
101002 return 0;
101003 }
101004
101005@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
101006 pmd_t *pmd;
101007 unsigned long next;
101008
101009- pmd = pmd_alloc(&init_mm, pud, addr);
101010+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
101011 if (!pmd)
101012 return -ENOMEM;
101013 do {
101014@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
101015 pud_t *pud;
101016 unsigned long next;
101017
101018- pud = pud_alloc(&init_mm, pgd, addr);
101019+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
101020 if (!pud)
101021 return -ENOMEM;
101022 do {
101023@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
101024 if (addr >= MODULES_VADDR && addr < MODULES_END)
101025 return 1;
101026 #endif
101027+
101028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101029+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
101030+ return 1;
101031+#endif
101032+
101033 return is_vmalloc_addr(x);
101034 }
101035
101036@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
101037
101038 if (!pgd_none(*pgd)) {
101039 pud_t *pud = pud_offset(pgd, addr);
101040+#ifdef CONFIG_X86
101041+ if (!pud_large(*pud))
101042+#endif
101043 if (!pud_none(*pud)) {
101044 pmd_t *pmd = pmd_offset(pud, addr);
101045+#ifdef CONFIG_X86
101046+ if (!pmd_large(*pmd))
101047+#endif
101048 if (!pmd_none(*pmd)) {
101049 pte_t *ptep, pte;
101050
101051@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
101052 * Allocate a region of KVA of the specified size and alignment, within the
101053 * vstart and vend.
101054 */
101055-static struct vmap_area *alloc_vmap_area(unsigned long size,
101056+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
101057 unsigned long align,
101058 unsigned long vstart, unsigned long vend,
101059 int node, gfp_t gfp_mask)
101060@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
101061 for_each_possible_cpu(i) {
101062 struct vmap_block_queue *vbq;
101063 struct vfree_deferred *p;
101064+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101065+ struct stack_deferred *p2;
101066+#endif
101067
101068 vbq = &per_cpu(vmap_block_queue, i);
101069 spin_lock_init(&vbq->lock);
101070 INIT_LIST_HEAD(&vbq->free);
101071+
101072 p = &per_cpu(vfree_deferred, i);
101073 init_llist_head(&p->list);
101074- INIT_WORK(&p->wq, free_work);
101075+ INIT_WORK(&p->wq, vfree_work);
101076+
101077+ p = &per_cpu(vunmap_deferred, i);
101078+ init_llist_head(&p->list);
101079+ INIT_WORK(&p->wq, vunmap_work);
101080+
101081+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101082+ p2 = &per_cpu(stack_deferred, i);
101083+ init_llist_head(&p2->list.list);
101084+ INIT_WORK(&p2->wq, unmap_work);
101085+#endif
101086 }
101087
101088 /* Import existing vmlist entries. */
101089@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
101090 struct vm_struct *area;
101091
101092 BUG_ON(in_interrupt());
101093+
101094+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101095+ if (flags & VM_KERNEXEC) {
101096+ if (start != VMALLOC_START || end != VMALLOC_END)
101097+ return NULL;
101098+ start = (unsigned long)MODULES_EXEC_VADDR;
101099+ end = (unsigned long)MODULES_EXEC_END;
101100+ }
101101+#endif
101102+
101103 if (flags & VM_IOREMAP)
101104 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
101105
101106@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
101107 */
101108 void vunmap(const void *addr)
101109 {
101110- BUG_ON(in_interrupt());
101111- might_sleep();
101112- if (addr)
101113+ if (!addr)
101114+ return;
101115+ if (unlikely(in_interrupt())) {
101116+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
101117+ if (llist_add((struct llist_node *)addr, &p->list))
101118+ schedule_work(&p->wq);
101119+ } else {
101120+ might_sleep();
101121 __vunmap(addr, 0);
101122+ }
101123 }
101124 EXPORT_SYMBOL(vunmap);
101125
101126+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101127+void unmap_process_stacks(struct task_struct *task)
101128+{
101129+ if (unlikely(in_interrupt())) {
101130+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
101131+ struct stack_deferred_llist *list = task->stack;
101132+ list->stack = task->stack;
101133+ list->lowmem_stack = task->lowmem_stack;
101134+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
101135+ schedule_work(&p->wq);
101136+ } else {
101137+ __vunmap(task->stack, 0);
101138+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
101139+ }
101140+}
101141+#endif
101142+
101143 /**
101144 * vmap - map an array of pages into virtually contiguous space
101145 * @pages: array of page pointers
101146@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
101147 if (count > totalram_pages)
101148 return NULL;
101149
101150+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101151+ if (!(pgprot_val(prot) & _PAGE_NX))
101152+ flags |= VM_KERNEXEC;
101153+#endif
101154+
101155 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
101156 __builtin_return_address(0));
101157 if (!area)
101158@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
101159 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
101160 goto fail;
101161
101162+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101163+ if (!(pgprot_val(prot) & _PAGE_NX)) {
101164+ vm_flags |= VM_KERNEXEC;
101165+ start = VMALLOC_START;
101166+ end = VMALLOC_END;
101167+ }
101168+#endif
101169+
101170 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
101171 vm_flags, start, end, node, gfp_mask, caller);
101172 if (!area)
101173@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
101174 * For tight control over page level allocator and protection flags
101175 * use __vmalloc() instead.
101176 */
101177-
101178 void *vmalloc_exec(unsigned long size)
101179 {
101180- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
101181+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
101182 NUMA_NO_NODE, __builtin_return_address(0));
101183 }
101184
101185@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
101186 {
101187 struct vm_struct *area;
101188
101189+ BUG_ON(vma->vm_mirror);
101190+
101191 size = PAGE_ALIGN(size);
101192
101193 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
101194@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
101195 v->addr, v->addr + v->size, v->size);
101196
101197 if (v->caller)
101198+#ifdef CONFIG_GRKERNSEC_HIDESYM
101199+ seq_printf(m, " %pK", v->caller);
101200+#else
101201 seq_printf(m, " %pS", v->caller);
101202+#endif
101203
101204 if (v->nr_pages)
101205 seq_printf(m, " pages=%d", v->nr_pages);
101206diff --git a/mm/vmstat.c b/mm/vmstat.c
101207index 4f5cd97..9fb715a 100644
101208--- a/mm/vmstat.c
101209+++ b/mm/vmstat.c
101210@@ -27,6 +27,7 @@
101211 #include <linux/mm_inline.h>
101212 #include <linux/page_ext.h>
101213 #include <linux/page_owner.h>
101214+#include <linux/grsecurity.h>
101215
101216 #include "internal.h"
101217
101218@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
101219 *
101220 * vm_stat contains the global counters
101221 */
101222-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101223+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101224 EXPORT_SYMBOL(vm_stat);
101225
101226 #ifdef CONFIG_SMP
101227@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
101228
101229 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101230 if (diff[i]) {
101231- atomic_long_add(diff[i], &vm_stat[i]);
101232+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
101233 changes++;
101234 }
101235 return changes;
101236@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
101237 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
101238 if (v) {
101239
101240- atomic_long_add(v, &zone->vm_stat[i]);
101241+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101242 global_diff[i] += v;
101243 #ifdef CONFIG_NUMA
101244 /* 3 seconds idle till flush */
101245@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
101246
101247 v = p->vm_stat_diff[i];
101248 p->vm_stat_diff[i] = 0;
101249- atomic_long_add(v, &zone->vm_stat[i]);
101250+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101251 global_diff[i] += v;
101252 }
101253 }
101254@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
101255 if (pset->vm_stat_diff[i]) {
101256 int v = pset->vm_stat_diff[i];
101257 pset->vm_stat_diff[i] = 0;
101258- atomic_long_add(v, &zone->vm_stat[i]);
101259- atomic_long_add(v, &vm_stat[i]);
101260+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101261+ atomic_long_add_unchecked(v, &vm_stat[i]);
101262 }
101263 }
101264 #endif
101265@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
101266 stat_items_size += sizeof(struct vm_event_state);
101267 #endif
101268
101269- v = kmalloc(stat_items_size, GFP_KERNEL);
101270+ v = kzalloc(stat_items_size, GFP_KERNEL);
101271 m->private = v;
101272 if (!v)
101273 return ERR_PTR(-ENOMEM);
101274+
101275+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101276+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101277+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
101278+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
101279+ && !in_group_p(grsec_proc_gid)
101280+#endif
101281+ )
101282+ return (unsigned long *)m->private + *pos;
101283+#endif
101284+#endif
101285+
101286 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101287 v[i] = global_page_state(i);
101288 v += NR_VM_ZONE_STAT_ITEMS;
101289@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
101290 cpu_notifier_register_done();
101291 #endif
101292 #ifdef CONFIG_PROC_FS
101293- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
101294- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
101295- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101296- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
101297+ {
101298+ mode_t gr_mode = S_IRUGO;
101299+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101300+ gr_mode = S_IRUSR;
101301+#endif
101302+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
101303+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
101304+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101305+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
101306+ }
101307 #endif
101308 return 0;
101309 }
101310diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
101311index 64c6bed..b79a5de 100644
101312--- a/net/8021q/vlan.c
101313+++ b/net/8021q/vlan.c
101314@@ -481,7 +481,7 @@ out:
101315 return NOTIFY_DONE;
101316 }
101317
101318-static struct notifier_block vlan_notifier_block __read_mostly = {
101319+static struct notifier_block vlan_notifier_block = {
101320 .notifier_call = vlan_device_event,
101321 };
101322
101323@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
101324 err = -EPERM;
101325 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
101326 break;
101327- if ((args.u.name_type >= 0) &&
101328- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
101329+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
101330 struct vlan_net *vn;
101331
101332 vn = net_generic(net, vlan_net_id);
101333diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
101334index c92b52f..006c052 100644
101335--- a/net/8021q/vlan_netlink.c
101336+++ b/net/8021q/vlan_netlink.c
101337@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
101338 return dev_net(real_dev);
101339 }
101340
101341-struct rtnl_link_ops vlan_link_ops __read_mostly = {
101342+struct rtnl_link_ops vlan_link_ops = {
101343 .kind = "vlan",
101344 .maxtype = IFLA_VLAN_MAX,
101345 .policy = vlan_policy,
101346diff --git a/net/9p/client.c b/net/9p/client.c
101347index e86a9bea..e91f70e 100644
101348--- a/net/9p/client.c
101349+++ b/net/9p/client.c
101350@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
101351 len - inline_len);
101352 } else {
101353 err = copy_from_user(ename + inline_len,
101354- uidata, len - inline_len);
101355+ (char __force_user *)uidata, len - inline_len);
101356 if (err) {
101357 err = -EFAULT;
101358 goto out_err;
101359@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
101360 kernel_buf = 1;
101361 indata = data;
101362 } else
101363- indata = (__force char *)udata;
101364+ indata = (__force_kernel char *)udata;
101365 /*
101366 * response header len is 11
101367 * PDU Header(7) + IO Size (4)
101368@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
101369 kernel_buf = 1;
101370 odata = data;
101371 } else
101372- odata = (char *)udata;
101373+ odata = (char __force_kernel *)udata;
101374 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
101375 P9_ZC_HDR_SZ, kernel_buf, "dqd",
101376 fid->fid, offset, rsize);
101377diff --git a/net/9p/mod.c b/net/9p/mod.c
101378index 6ab36ae..6f1841b 100644
101379--- a/net/9p/mod.c
101380+++ b/net/9p/mod.c
101381@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
101382 void v9fs_register_trans(struct p9_trans_module *m)
101383 {
101384 spin_lock(&v9fs_trans_lock);
101385- list_add_tail(&m->list, &v9fs_trans_list);
101386+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
101387 spin_unlock(&v9fs_trans_lock);
101388 }
101389 EXPORT_SYMBOL(v9fs_register_trans);
101390@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
101391 void v9fs_unregister_trans(struct p9_trans_module *m)
101392 {
101393 spin_lock(&v9fs_trans_lock);
101394- list_del_init(&m->list);
101395+ pax_list_del_init((struct list_head *)&m->list);
101396 spin_unlock(&v9fs_trans_lock);
101397 }
101398 EXPORT_SYMBOL(v9fs_unregister_trans);
101399diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
101400index 80d08f6..de63fd1 100644
101401--- a/net/9p/trans_fd.c
101402+++ b/net/9p/trans_fd.c
101403@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
101404 oldfs = get_fs();
101405 set_fs(get_ds());
101406 /* The cast to a user pointer is valid due to the set_fs() */
101407- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
101408+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
101409 set_fs(oldfs);
101410
101411 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
101412diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
101413index af46bc4..f9adfcd 100644
101414--- a/net/appletalk/atalk_proc.c
101415+++ b/net/appletalk/atalk_proc.c
101416@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
101417 struct proc_dir_entry *p;
101418 int rc = -ENOMEM;
101419
101420- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
101421+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
101422 if (!atalk_proc_dir)
101423 goto out;
101424
101425diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
101426index 876fbe8..8bbea9f 100644
101427--- a/net/atm/atm_misc.c
101428+++ b/net/atm/atm_misc.c
101429@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
101430 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
101431 return 1;
101432 atm_return(vcc, truesize);
101433- atomic_inc(&vcc->stats->rx_drop);
101434+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101435 return 0;
101436 }
101437 EXPORT_SYMBOL(atm_charge);
101438@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
101439 }
101440 }
101441 atm_return(vcc, guess);
101442- atomic_inc(&vcc->stats->rx_drop);
101443+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101444 return NULL;
101445 }
101446 EXPORT_SYMBOL(atm_alloc_charge);
101447@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
101448
101449 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101450 {
101451-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101452+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101453 __SONET_ITEMS
101454 #undef __HANDLE_ITEM
101455 }
101456@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
101457
101458 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101459 {
101460-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101461+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
101462 __SONET_ITEMS
101463 #undef __HANDLE_ITEM
101464 }
101465diff --git a/net/atm/lec.c b/net/atm/lec.c
101466index 4b98f89..5a2f6cb 100644
101467--- a/net/atm/lec.c
101468+++ b/net/atm/lec.c
101469@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
101470 }
101471
101472 static struct lane2_ops lane2_ops = {
101473- lane2_resolve, /* resolve, spec 3.1.3 */
101474- lane2_associate_req, /* associate_req, spec 3.1.4 */
101475- NULL /* associate indicator, spec 3.1.5 */
101476+ .resolve = lane2_resolve,
101477+ .associate_req = lane2_associate_req,
101478+ .associate_indicator = NULL
101479 };
101480
101481 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
101482diff --git a/net/atm/lec.h b/net/atm/lec.h
101483index 4149db1..f2ab682 100644
101484--- a/net/atm/lec.h
101485+++ b/net/atm/lec.h
101486@@ -48,7 +48,7 @@ struct lane2_ops {
101487 const u8 *tlvs, u32 sizeoftlvs);
101488 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
101489 const u8 *tlvs, u32 sizeoftlvs);
101490-};
101491+} __no_const;
101492
101493 /*
101494 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
101495diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
101496index d1b2d9a..d549f7f 100644
101497--- a/net/atm/mpoa_caches.c
101498+++ b/net/atm/mpoa_caches.c
101499@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
101500
101501
101502 static struct in_cache_ops ingress_ops = {
101503- in_cache_add_entry, /* add_entry */
101504- in_cache_get, /* get */
101505- in_cache_get_with_mask, /* get_with_mask */
101506- in_cache_get_by_vcc, /* get_by_vcc */
101507- in_cache_put, /* put */
101508- in_cache_remove_entry, /* remove_entry */
101509- cache_hit, /* cache_hit */
101510- clear_count_and_expired, /* clear_count */
101511- check_resolving_entries, /* check_resolving */
101512- refresh_entries, /* refresh */
101513- in_destroy_cache /* destroy_cache */
101514+ .add_entry = in_cache_add_entry,
101515+ .get = in_cache_get,
101516+ .get_with_mask = in_cache_get_with_mask,
101517+ .get_by_vcc = in_cache_get_by_vcc,
101518+ .put = in_cache_put,
101519+ .remove_entry = in_cache_remove_entry,
101520+ .cache_hit = cache_hit,
101521+ .clear_count = clear_count_and_expired,
101522+ .check_resolving = check_resolving_entries,
101523+ .refresh = refresh_entries,
101524+ .destroy_cache = in_destroy_cache
101525 };
101526
101527 static struct eg_cache_ops egress_ops = {
101528- eg_cache_add_entry, /* add_entry */
101529- eg_cache_get_by_cache_id, /* get_by_cache_id */
101530- eg_cache_get_by_tag, /* get_by_tag */
101531- eg_cache_get_by_vcc, /* get_by_vcc */
101532- eg_cache_get_by_src_ip, /* get_by_src_ip */
101533- eg_cache_put, /* put */
101534- eg_cache_remove_entry, /* remove_entry */
101535- update_eg_cache_entry, /* update */
101536- clear_expired, /* clear_expired */
101537- eg_destroy_cache /* destroy_cache */
101538+ .add_entry = eg_cache_add_entry,
101539+ .get_by_cache_id = eg_cache_get_by_cache_id,
101540+ .get_by_tag = eg_cache_get_by_tag,
101541+ .get_by_vcc = eg_cache_get_by_vcc,
101542+ .get_by_src_ip = eg_cache_get_by_src_ip,
101543+ .put = eg_cache_put,
101544+ .remove_entry = eg_cache_remove_entry,
101545+ .update = update_eg_cache_entry,
101546+ .clear_expired = clear_expired,
101547+ .destroy_cache = eg_destroy_cache
101548 };
101549
101550
101551diff --git a/net/atm/proc.c b/net/atm/proc.c
101552index bbb6461..cf04016 100644
101553--- a/net/atm/proc.c
101554+++ b/net/atm/proc.c
101555@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
101556 const struct k_atm_aal_stats *stats)
101557 {
101558 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
101559- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
101560- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
101561- atomic_read(&stats->rx_drop));
101562+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
101563+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
101564+ atomic_read_unchecked(&stats->rx_drop));
101565 }
101566
101567 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
101568diff --git a/net/atm/resources.c b/net/atm/resources.c
101569index 0447d5d..3cf4728 100644
101570--- a/net/atm/resources.c
101571+++ b/net/atm/resources.c
101572@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
101573 static void copy_aal_stats(struct k_atm_aal_stats *from,
101574 struct atm_aal_stats *to)
101575 {
101576-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101577+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101578 __AAL_STAT_ITEMS
101579 #undef __HANDLE_ITEM
101580 }
101581@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
101582 static void subtract_aal_stats(struct k_atm_aal_stats *from,
101583 struct atm_aal_stats *to)
101584 {
101585-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101586+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
101587 __AAL_STAT_ITEMS
101588 #undef __HANDLE_ITEM
101589 }
101590diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
101591index 919a5ce..cc6b444 100644
101592--- a/net/ax25/sysctl_net_ax25.c
101593+++ b/net/ax25/sysctl_net_ax25.c
101594@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
101595 {
101596 char path[sizeof("net/ax25/") + IFNAMSIZ];
101597 int k;
101598- struct ctl_table *table;
101599+ ctl_table_no_const *table;
101600
101601 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101602 if (!table)
101603diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101604index 00e00e0..710fcd2 100644
101605--- a/net/batman-adv/bat_iv_ogm.c
101606+++ b/net/batman-adv/bat_iv_ogm.c
101607@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101608
101609 /* randomize initial seqno to avoid collision */
101610 get_random_bytes(&random_seqno, sizeof(random_seqno));
101611- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101612+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101613
101614 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101615 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101616@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101617 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101618
101619 /* change sequence number to network order */
101620- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101621+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101622 batadv_ogm_packet->seqno = htonl(seqno);
101623- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101624+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101625
101626 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101627
101628@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101629 return;
101630
101631 /* could be changed by schedule_own_packet() */
101632- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101633+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101634
101635 if (ogm_packet->flags & BATADV_DIRECTLINK)
101636 has_directlink_flag = true;
101637diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101638index 3d1dcaa..4699f4e 100644
101639--- a/net/batman-adv/fragmentation.c
101640+++ b/net/batman-adv/fragmentation.c
101641@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101642 frag_header.packet_type = BATADV_UNICAST_FRAG;
101643 frag_header.version = BATADV_COMPAT_VERSION;
101644 frag_header.ttl = BATADV_TTL;
101645- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101646+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101647 frag_header.reserved = 0;
101648 frag_header.no = 0;
101649 frag_header.total_size = htons(skb->len);
101650diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101651index 5ec31d7..e371631 100644
101652--- a/net/batman-adv/soft-interface.c
101653+++ b/net/batman-adv/soft-interface.c
101654@@ -295,7 +295,7 @@ send:
101655 primary_if->net_dev->dev_addr);
101656
101657 /* set broadcast sequence number */
101658- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101659+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101660 bcast_packet->seqno = htonl(seqno);
101661
101662 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101663@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101664 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101665
101666 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101667- atomic_set(&bat_priv->bcast_seqno, 1);
101668+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101669 atomic_set(&bat_priv->tt.vn, 0);
101670 atomic_set(&bat_priv->tt.local_changes, 0);
101671 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101672@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101673
101674 /* randomize initial seqno to avoid collision */
101675 get_random_bytes(&random_seqno, sizeof(random_seqno));
101676- atomic_set(&bat_priv->frag_seqno, random_seqno);
101677+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101678
101679 bat_priv->primary_if = NULL;
101680 bat_priv->num_ifaces = 0;
101681@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
101682 return 0;
101683 }
101684
101685-struct rtnl_link_ops batadv_link_ops __read_mostly = {
101686+struct rtnl_link_ops batadv_link_ops = {
101687 .kind = "batadv",
101688 .priv_size = sizeof(struct batadv_priv),
101689 .setup = batadv_softif_init_early,
101690diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101691index 9398c3f..0e79657 100644
101692--- a/net/batman-adv/types.h
101693+++ b/net/batman-adv/types.h
101694@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101695 struct batadv_hard_iface_bat_iv {
101696 unsigned char *ogm_buff;
101697 int ogm_buff_len;
101698- atomic_t ogm_seqno;
101699+ atomic_unchecked_t ogm_seqno;
101700 };
101701
101702 /**
101703@@ -766,7 +766,7 @@ struct batadv_priv {
101704 atomic_t bonding;
101705 atomic_t fragmentation;
101706 atomic_t packet_size_max;
101707- atomic_t frag_seqno;
101708+ atomic_unchecked_t frag_seqno;
101709 #ifdef CONFIG_BATMAN_ADV_BLA
101710 atomic_t bridge_loop_avoidance;
101711 #endif
101712@@ -785,7 +785,7 @@ struct batadv_priv {
101713 #endif
101714 uint32_t isolation_mark;
101715 uint32_t isolation_mark_mask;
101716- atomic_t bcast_seqno;
101717+ atomic_unchecked_t bcast_seqno;
101718 atomic_t bcast_queue_left;
101719 atomic_t batman_queue_left;
101720 char num_ifaces;
101721diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101722index 1d65c5b..43e55fd 100644
101723--- a/net/bluetooth/hci_sock.c
101724+++ b/net/bluetooth/hci_sock.c
101725@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101726 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101727 }
101728
101729- len = min_t(unsigned int, len, sizeof(uf));
101730+ len = min((size_t)len, sizeof(uf));
101731 if (copy_from_user(&uf, optval, len)) {
101732 err = -EFAULT;
101733 break;
101734diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101735index 6ba33f9..4afc26f 100644
101736--- a/net/bluetooth/l2cap_core.c
101737+++ b/net/bluetooth/l2cap_core.c
101738@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101739 break;
101740
101741 case L2CAP_CONF_RFC:
101742- if (olen == sizeof(rfc))
101743- memcpy(&rfc, (void *)val, olen);
101744+ if (olen != sizeof(rfc))
101745+ break;
101746+
101747+ memcpy(&rfc, (void *)val, olen);
101748
101749 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101750 rfc.mode != chan->mode)
101751diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101752index 60694f0..32623ed 100644
101753--- a/net/bluetooth/l2cap_sock.c
101754+++ b/net/bluetooth/l2cap_sock.c
101755@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101756 struct sock *sk = sock->sk;
101757 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101758 struct l2cap_options opts;
101759- int len, err = 0;
101760+ int err = 0;
101761+ size_t len = optlen;
101762 u32 opt;
101763
101764 BT_DBG("sk %p", sk);
101765@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101766 opts.max_tx = chan->max_tx;
101767 opts.txwin_size = chan->tx_win;
101768
101769- len = min_t(unsigned int, sizeof(opts), optlen);
101770+ len = min(sizeof(opts), len);
101771 if (copy_from_user((char *) &opts, optval, len)) {
101772 err = -EFAULT;
101773 break;
101774@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101775 struct bt_security sec;
101776 struct bt_power pwr;
101777 struct l2cap_conn *conn;
101778- int len, err = 0;
101779+ int err = 0;
101780+ size_t len = optlen;
101781 u32 opt;
101782
101783 BT_DBG("sk %p", sk);
101784@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101785
101786 sec.level = BT_SECURITY_LOW;
101787
101788- len = min_t(unsigned int, sizeof(sec), optlen);
101789+ len = min(sizeof(sec), len);
101790 if (copy_from_user((char *) &sec, optval, len)) {
101791 err = -EFAULT;
101792 break;
101793@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101794
101795 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101796
101797- len = min_t(unsigned int, sizeof(pwr), optlen);
101798+ len = min(sizeof(pwr), len);
101799 if (copy_from_user((char *) &pwr, optval, len)) {
101800 err = -EFAULT;
101801 break;
101802diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101803index 3c6d2c8..6afc970 100644
101804--- a/net/bluetooth/rfcomm/sock.c
101805+++ b/net/bluetooth/rfcomm/sock.c
101806@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101807 struct sock *sk = sock->sk;
101808 struct bt_security sec;
101809 int err = 0;
101810- size_t len;
101811+ size_t len = optlen;
101812 u32 opt;
101813
101814 BT_DBG("sk %p", sk);
101815@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101816
101817 sec.level = BT_SECURITY_LOW;
101818
101819- len = min_t(unsigned int, sizeof(sec), optlen);
101820+ len = min(sizeof(sec), len);
101821 if (copy_from_user((char *) &sec, optval, len)) {
101822 err = -EFAULT;
101823 break;
101824diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101825index 8e385a0..a5bdd8e 100644
101826--- a/net/bluetooth/rfcomm/tty.c
101827+++ b/net/bluetooth/rfcomm/tty.c
101828@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101829 BT_DBG("tty %p id %d", tty, tty->index);
101830
101831 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101832- dev->channel, dev->port.count);
101833+ dev->channel, atomic_read(&dev->port.count));
101834
101835 err = tty_port_open(&dev->port, tty, filp);
101836 if (err)
101837@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101838 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101839
101840 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101841- dev->port.count);
101842+ atomic_read(&dev->port.count));
101843
101844 tty_port_close(&dev->port, tty, filp);
101845 }
101846diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101847index 4fbcea0..69a6786 100644
101848--- a/net/bridge/br_netlink.c
101849+++ b/net/bridge/br_netlink.c
101850@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
101851 .get_link_af_size = br_get_link_af_size,
101852 };
101853
101854-struct rtnl_link_ops br_link_ops __read_mostly = {
101855+struct rtnl_link_ops br_link_ops = {
101856 .kind = "bridge",
101857 .priv_size = sizeof(struct net_bridge),
101858 .setup = br_dev_setup,
101859diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101860index 91180a7..1301daa 100644
101861--- a/net/bridge/netfilter/ebtables.c
101862+++ b/net/bridge/netfilter/ebtables.c
101863@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101864 tmp.valid_hooks = t->table->valid_hooks;
101865 }
101866 mutex_unlock(&ebt_mutex);
101867- if (copy_to_user(user, &tmp, *len) != 0) {
101868+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101869 BUGPRINT("c2u Didn't work\n");
101870 ret = -EFAULT;
101871 break;
101872@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101873 goto out;
101874 tmp.valid_hooks = t->valid_hooks;
101875
101876- if (copy_to_user(user, &tmp, *len) != 0) {
101877+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101878 ret = -EFAULT;
101879 break;
101880 }
101881@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101882 tmp.entries_size = t->table->entries_size;
101883 tmp.valid_hooks = t->table->valid_hooks;
101884
101885- if (copy_to_user(user, &tmp, *len) != 0) {
101886+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101887 ret = -EFAULT;
101888 break;
101889 }
101890diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101891index f5afda1..dcf770a 100644
101892--- a/net/caif/cfctrl.c
101893+++ b/net/caif/cfctrl.c
101894@@ -10,6 +10,7 @@
101895 #include <linux/spinlock.h>
101896 #include <linux/slab.h>
101897 #include <linux/pkt_sched.h>
101898+#include <linux/sched.h>
101899 #include <net/caif/caif_layer.h>
101900 #include <net/caif/cfpkt.h>
101901 #include <net/caif/cfctrl.h>
101902@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101903 memset(&dev_info, 0, sizeof(dev_info));
101904 dev_info.id = 0xff;
101905 cfsrvl_init(&this->serv, 0, &dev_info, false);
101906- atomic_set(&this->req_seq_no, 1);
101907- atomic_set(&this->rsp_seq_no, 1);
101908+ atomic_set_unchecked(&this->req_seq_no, 1);
101909+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101910 this->serv.layer.receive = cfctrl_recv;
101911 sprintf(this->serv.layer.name, "ctrl");
101912 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101913@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101914 struct cfctrl_request_info *req)
101915 {
101916 spin_lock_bh(&ctrl->info_list_lock);
101917- atomic_inc(&ctrl->req_seq_no);
101918- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101919+ atomic_inc_unchecked(&ctrl->req_seq_no);
101920+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101921 list_add_tail(&req->list, &ctrl->list);
101922 spin_unlock_bh(&ctrl->info_list_lock);
101923 }
101924@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101925 if (p != first)
101926 pr_warn("Requests are not received in order\n");
101927
101928- atomic_set(&ctrl->rsp_seq_no,
101929+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101930 p->sequence_no);
101931 list_del(&p->list);
101932 goto out;
101933diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101934index 67a4a36..8d28068 100644
101935--- a/net/caif/chnl_net.c
101936+++ b/net/caif/chnl_net.c
101937@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101938 };
101939
101940
101941-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101942+static struct rtnl_link_ops ipcaif_link_ops = {
101943 .kind = "caif",
101944 .priv_size = sizeof(struct chnl_net),
101945 .setup = ipcaif_net_setup,
101946diff --git a/net/can/af_can.c b/net/can/af_can.c
101947index 32d710e..93bcf05 100644
101948--- a/net/can/af_can.c
101949+++ b/net/can/af_can.c
101950@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101951 };
101952
101953 /* notifier block for netdevice event */
101954-static struct notifier_block can_netdev_notifier __read_mostly = {
101955+static struct notifier_block can_netdev_notifier = {
101956 .notifier_call = can_notifier,
101957 };
101958
101959diff --git a/net/can/bcm.c b/net/can/bcm.c
101960index ee9ffd9..dfdf3d4 100644
101961--- a/net/can/bcm.c
101962+++ b/net/can/bcm.c
101963@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101964 }
101965
101966 /* create /proc/net/can-bcm directory */
101967- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101968+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101969 return 0;
101970 }
101971
101972diff --git a/net/can/gw.c b/net/can/gw.c
101973index a6f448e..5902171 100644
101974--- a/net/can/gw.c
101975+++ b/net/can/gw.c
101976@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101977 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101978
101979 static HLIST_HEAD(cgw_list);
101980-static struct notifier_block notifier;
101981
101982 static struct kmem_cache *cgw_cache __read_mostly;
101983
101984@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101985 return err;
101986 }
101987
101988+static struct notifier_block notifier = {
101989+ .notifier_call = cgw_notifier
101990+};
101991+
101992 static __init int cgw_module_init(void)
101993 {
101994 /* sanitize given module parameter */
101995@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
101996 return -ENOMEM;
101997
101998 /* set notifier */
101999- notifier.notifier_call = cgw_notifier;
102000 register_netdevice_notifier(&notifier);
102001
102002 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
102003diff --git a/net/can/proc.c b/net/can/proc.c
102004index 1a19b98..df2b4ec 100644
102005--- a/net/can/proc.c
102006+++ b/net/can/proc.c
102007@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
102008 void can_init_proc(void)
102009 {
102010 /* create /proc/net/can directory */
102011- can_dir = proc_mkdir("can", init_net.proc_net);
102012+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
102013
102014 if (!can_dir) {
102015 printk(KERN_INFO "can: failed to create /proc/net/can . "
102016diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
102017index a9f4ae4..ee19b92 100644
102018--- a/net/ceph/messenger.c
102019+++ b/net/ceph/messenger.c
102020@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
102021 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
102022
102023 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
102024-static atomic_t addr_str_seq = ATOMIC_INIT(0);
102025+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
102026
102027 static struct page *zero_page; /* used in certain error cases */
102028
102029@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
102030 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
102031 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
102032
102033- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102034+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102035 s = addr_str[i];
102036
102037 switch (ss->ss_family) {
102038diff --git a/net/compat.c b/net/compat.c
102039index f7bd286..76ea56a 100644
102040--- a/net/compat.c
102041+++ b/net/compat.c
102042@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
102043
102044 #define CMSG_COMPAT_FIRSTHDR(msg) \
102045 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
102046- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
102047+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
102048 (struct compat_cmsghdr __user *)NULL)
102049
102050 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
102051 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
102052 (ucmlen) <= (unsigned long) \
102053 ((mhdr)->msg_controllen - \
102054- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
102055+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
102056
102057 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
102058 struct compat_cmsghdr __user *cmsg, int cmsg_len)
102059 {
102060 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
102061- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
102062+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
102063 msg->msg_controllen)
102064 return NULL;
102065 return (struct compat_cmsghdr __user *)ptr;
102066@@ -203,7 +203,7 @@ Efault:
102067
102068 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
102069 {
102070- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102071+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102072 struct compat_cmsghdr cmhdr;
102073 struct compat_timeval ctv;
102074 struct compat_timespec cts[3];
102075@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
102076
102077 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
102078 {
102079- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102080+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102081 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
102082 int fdnum = scm->fp->count;
102083 struct file **fp = scm->fp->fp;
102084@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
102085 return -EFAULT;
102086 old_fs = get_fs();
102087 set_fs(KERNEL_DS);
102088- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
102089+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
102090 set_fs(old_fs);
102091
102092 return err;
102093@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
102094 len = sizeof(ktime);
102095 old_fs = get_fs();
102096 set_fs(KERNEL_DS);
102097- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
102098+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
102099 set_fs(old_fs);
102100
102101 if (!err) {
102102@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102103 case MCAST_JOIN_GROUP:
102104 case MCAST_LEAVE_GROUP:
102105 {
102106- struct compat_group_req __user *gr32 = (void *)optval;
102107+ struct compat_group_req __user *gr32 = (void __user *)optval;
102108 struct group_req __user *kgr =
102109 compat_alloc_user_space(sizeof(struct group_req));
102110 u32 interface;
102111@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102112 case MCAST_BLOCK_SOURCE:
102113 case MCAST_UNBLOCK_SOURCE:
102114 {
102115- struct compat_group_source_req __user *gsr32 = (void *)optval;
102116+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
102117 struct group_source_req __user *kgsr = compat_alloc_user_space(
102118 sizeof(struct group_source_req));
102119 u32 interface;
102120@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102121 }
102122 case MCAST_MSFILTER:
102123 {
102124- struct compat_group_filter __user *gf32 = (void *)optval;
102125+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102126 struct group_filter __user *kgf;
102127 u32 interface, fmode, numsrc;
102128
102129@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
102130 char __user *optval, int __user *optlen,
102131 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
102132 {
102133- struct compat_group_filter __user *gf32 = (void *)optval;
102134+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102135 struct group_filter __user *kgf;
102136 int __user *koptlen;
102137 u32 interface, fmode, numsrc;
102138@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
102139
102140 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
102141 return -EINVAL;
102142- if (copy_from_user(a, args, nas[call]))
102143+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
102144 return -EFAULT;
102145 a0 = a[0];
102146 a1 = a[1];
102147diff --git a/net/core/datagram.c b/net/core/datagram.c
102148index df493d6..1145766 100644
102149--- a/net/core/datagram.c
102150+++ b/net/core/datagram.c
102151@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
102152 }
102153
102154 kfree_skb(skb);
102155- atomic_inc(&sk->sk_drops);
102156+ atomic_inc_unchecked(&sk->sk_drops);
102157 sk_mem_reclaim_partial(sk);
102158
102159 return err;
102160diff --git a/net/core/dev.c b/net/core/dev.c
102161index 45109b7..6b58f14a 100644
102162--- a/net/core/dev.c
102163+++ b/net/core/dev.c
102164@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
102165 {
102166 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
102167 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
102168- atomic_long_inc(&dev->rx_dropped);
102169+ atomic_long_inc_unchecked(&dev->rx_dropped);
102170 kfree_skb(skb);
102171 return NET_RX_DROP;
102172 }
102173 }
102174
102175 if (unlikely(!is_skb_forwardable(dev, skb))) {
102176- atomic_long_inc(&dev->rx_dropped);
102177+ atomic_long_inc_unchecked(&dev->rx_dropped);
102178 kfree_skb(skb);
102179 return NET_RX_DROP;
102180 }
102181@@ -2987,7 +2987,7 @@ recursion_alert:
102182 drop:
102183 rcu_read_unlock_bh();
102184
102185- atomic_long_inc(&dev->tx_dropped);
102186+ atomic_long_inc_unchecked(&dev->tx_dropped);
102187 kfree_skb_list(skb);
102188 return rc;
102189 out:
102190@@ -3336,7 +3336,7 @@ enqueue:
102191
102192 local_irq_restore(flags);
102193
102194- atomic_long_inc(&skb->dev->rx_dropped);
102195+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102196 kfree_skb(skb);
102197 return NET_RX_DROP;
102198 }
102199@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
102200 }
102201 EXPORT_SYMBOL(netif_rx_ni);
102202
102203-static void net_tx_action(struct softirq_action *h)
102204+static __latent_entropy void net_tx_action(void)
102205 {
102206 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
102207
102208@@ -3751,7 +3751,7 @@ ncls:
102209 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
102210 } else {
102211 drop:
102212- atomic_long_inc(&skb->dev->rx_dropped);
102213+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102214 kfree_skb(skb);
102215 /* Jamal, now you will not able to escape explaining
102216 * me how you were going to use this. :-)
102217@@ -4640,7 +4640,7 @@ out_unlock:
102218 return work;
102219 }
102220
102221-static void net_rx_action(struct softirq_action *h)
102222+static __latent_entropy void net_rx_action(void)
102223 {
102224 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
102225 unsigned long time_limit = jiffies + 2;
102226@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
102227 } else {
102228 netdev_stats_to_stats64(storage, &dev->stats);
102229 }
102230- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
102231- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
102232+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
102233+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
102234 return storage;
102235 }
102236 EXPORT_SYMBOL(dev_get_stats);
102237diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
102238index b94b1d2..da3ed7c 100644
102239--- a/net/core/dev_ioctl.c
102240+++ b/net/core/dev_ioctl.c
102241@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
102242 no_module = !dev;
102243 if (no_module && capable(CAP_NET_ADMIN))
102244 no_module = request_module("netdev-%s", name);
102245- if (no_module && capable(CAP_SYS_MODULE))
102246+ if (no_module && capable(CAP_SYS_MODULE)) {
102247+#ifdef CONFIG_GRKERNSEC_MODHARDEN
102248+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
102249+#else
102250 request_module("%s", name);
102251+#endif
102252+ }
102253 }
102254 EXPORT_SYMBOL(dev_load);
102255
102256diff --git a/net/core/filter.c b/net/core/filter.c
102257index f6bdc2b..76eba8e 100644
102258--- a/net/core/filter.c
102259+++ b/net/core/filter.c
102260@@ -533,7 +533,11 @@ do_pass:
102261
102262 /* Unknown instruction. */
102263 default:
102264- goto err;
102265+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
102266+ fp->code, fp->jt, fp->jf, fp->k);
102267+ kfree(addrs);
102268+ BUG();
102269+ return -EINVAL;
102270 }
102271
102272 insn++;
102273@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
102274 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
102275 int pc, ret = 0;
102276
102277- BUILD_BUG_ON(BPF_MEMWORDS > 16);
102278+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
102279
102280 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
102281 if (!masks)
102282@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
102283 if (!fp)
102284 return -ENOMEM;
102285
102286- memcpy(fp->insns, fprog->filter, fsize);
102287+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
102288
102289 fp->len = fprog->len;
102290 /* Since unattached filters are not copied back to user
102291diff --git a/net/core/flow.c b/net/core/flow.c
102292index 1033725..340f65d 100644
102293--- a/net/core/flow.c
102294+++ b/net/core/flow.c
102295@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
102296 static int flow_entry_valid(struct flow_cache_entry *fle,
102297 struct netns_xfrm *xfrm)
102298 {
102299- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
102300+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
102301 return 0;
102302 if (fle->object && !fle->object->ops->check(fle->object))
102303 return 0;
102304@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
102305 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
102306 fcp->hash_count++;
102307 }
102308- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
102309+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
102310 flo = fle->object;
102311 if (!flo)
102312 goto ret_object;
102313@@ -263,7 +263,7 @@ nocache:
102314 }
102315 flo = resolver(net, key, family, dir, flo, ctx);
102316 if (fle) {
102317- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
102318+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
102319 if (!IS_ERR(flo))
102320 fle->object = flo;
102321 else
102322diff --git a/net/core/neighbour.c b/net/core/neighbour.c
102323index 70fe9e1..926784c 100644
102324--- a/net/core/neighbour.c
102325+++ b/net/core/neighbour.c
102326@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
102327 void __user *buffer, size_t *lenp, loff_t *ppos)
102328 {
102329 int size, ret;
102330- struct ctl_table tmp = *ctl;
102331+ ctl_table_no_const tmp = *ctl;
102332
102333 tmp.extra1 = &zero;
102334 tmp.extra2 = &unres_qlen_max;
102335@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
102336 void __user *buffer,
102337 size_t *lenp, loff_t *ppos)
102338 {
102339- struct ctl_table tmp = *ctl;
102340+ ctl_table_no_const tmp = *ctl;
102341 int ret;
102342
102343 tmp.extra1 = &zero;
102344diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
102345index 2bf8329..2eb1423 100644
102346--- a/net/core/net-procfs.c
102347+++ b/net/core/net-procfs.c
102348@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
102349 struct rtnl_link_stats64 temp;
102350 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
102351
102352- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102353+ if (gr_proc_is_restricted())
102354+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102355+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102356+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
102357+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
102358+ else
102359+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102360 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102361 dev->name, stats->rx_bytes, stats->rx_packets,
102362 stats->rx_errors,
102363@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
102364 return 0;
102365 }
102366
102367-static const struct seq_operations dev_seq_ops = {
102368+const struct seq_operations dev_seq_ops = {
102369 .start = dev_seq_start,
102370 .next = dev_seq_next,
102371 .stop = dev_seq_stop,
102372@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
102373
102374 static int softnet_seq_open(struct inode *inode, struct file *file)
102375 {
102376- return seq_open(file, &softnet_seq_ops);
102377+ return seq_open_restrict(file, &softnet_seq_ops);
102378 }
102379
102380 static const struct file_operations softnet_seq_fops = {
102381@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
102382 else
102383 seq_printf(seq, "%04x", ntohs(pt->type));
102384
102385+#ifdef CONFIG_GRKERNSEC_HIDESYM
102386+ seq_printf(seq, " %-8s %pf\n",
102387+ pt->dev ? pt->dev->name : "", NULL);
102388+#else
102389 seq_printf(seq, " %-8s %pf\n",
102390 pt->dev ? pt->dev->name : "", pt->func);
102391+#endif
102392 }
102393
102394 return 0;
102395diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
102396index f2aa73b..0d1a1ea 100644
102397--- a/net/core/net-sysfs.c
102398+++ b/net/core/net-sysfs.c
102399@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
102400 {
102401 struct net_device *netdev = to_net_dev(dev);
102402 return sprintf(buf, fmt_dec,
102403- atomic_read(&netdev->carrier_changes));
102404+ atomic_read_unchecked(&netdev->carrier_changes));
102405 }
102406 static DEVICE_ATTR_RO(carrier_changes);
102407
102408diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
102409index 70d3450..eb7c528 100644
102410--- a/net/core/net_namespace.c
102411+++ b/net/core/net_namespace.c
102412@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
102413 int error;
102414 LIST_HEAD(net_exit_list);
102415
102416- list_add_tail(&ops->list, list);
102417+ pax_list_add_tail((struct list_head *)&ops->list, list);
102418 if (ops->init || (ops->id && ops->size)) {
102419 for_each_net(net) {
102420 error = ops_init(ops, net);
102421@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
102422
102423 out_undo:
102424 /* If I have an error cleanup all namespaces I initialized */
102425- list_del(&ops->list);
102426+ pax_list_del((struct list_head *)&ops->list);
102427 ops_exit_list(ops, &net_exit_list);
102428 ops_free_list(ops, &net_exit_list);
102429 return error;
102430@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
102431 struct net *net;
102432 LIST_HEAD(net_exit_list);
102433
102434- list_del(&ops->list);
102435+ pax_list_del((struct list_head *)&ops->list);
102436 for_each_net(net)
102437 list_add_tail(&net->exit_list, &net_exit_list);
102438 ops_exit_list(ops, &net_exit_list);
102439@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
102440 mutex_lock(&net_mutex);
102441 error = register_pernet_operations(&pernet_list, ops);
102442 if (!error && (first_device == &pernet_list))
102443- first_device = &ops->list;
102444+ first_device = (struct list_head *)&ops->list;
102445 mutex_unlock(&net_mutex);
102446 return error;
102447 }
102448diff --git a/net/core/netpoll.c b/net/core/netpoll.c
102449index c126a87..10ad89d 100644
102450--- a/net/core/netpoll.c
102451+++ b/net/core/netpoll.c
102452@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102453 struct udphdr *udph;
102454 struct iphdr *iph;
102455 struct ethhdr *eth;
102456- static atomic_t ip_ident;
102457+ static atomic_unchecked_t ip_ident;
102458 struct ipv6hdr *ip6h;
102459
102460 udp_len = len + sizeof(*udph);
102461@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102462 put_unaligned(0x45, (unsigned char *)iph);
102463 iph->tos = 0;
102464 put_unaligned(htons(ip_len), &(iph->tot_len));
102465- iph->id = htons(atomic_inc_return(&ip_ident));
102466+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
102467 iph->frag_off = 0;
102468 iph->ttl = 64;
102469 iph->protocol = IPPROTO_UDP;
102470diff --git a/net/core/pktgen.c b/net/core/pktgen.c
102471index 508155b..fad080f 100644
102472--- a/net/core/pktgen.c
102473+++ b/net/core/pktgen.c
102474@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
102475 pn->net = net;
102476 INIT_LIST_HEAD(&pn->pktgen_threads);
102477 pn->pktgen_exiting = false;
102478- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
102479+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
102480 if (!pn->proc_dir) {
102481 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
102482 return -ENODEV;
102483diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
102484index 7ebed55..378bf34 100644
102485--- a/net/core/rtnetlink.c
102486+++ b/net/core/rtnetlink.c
102487@@ -61,7 +61,7 @@ struct rtnl_link {
102488 rtnl_doit_func doit;
102489 rtnl_dumpit_func dumpit;
102490 rtnl_calcit_func calcit;
102491-};
102492+} __no_const;
102493
102494 static DEFINE_MUTEX(rtnl_mutex);
102495
102496@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
102497 * to use the ops for creating device. So do not
102498 * fill up dellink as well. That disables rtnl_dellink.
102499 */
102500- if (ops->setup && !ops->dellink)
102501- ops->dellink = unregister_netdevice_queue;
102502+ if (ops->setup && !ops->dellink) {
102503+ pax_open_kernel();
102504+ *(void **)&ops->dellink = unregister_netdevice_queue;
102505+ pax_close_kernel();
102506+ }
102507
102508- list_add_tail(&ops->list, &link_ops);
102509+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
102510 return 0;
102511 }
102512 EXPORT_SYMBOL_GPL(__rtnl_link_register);
102513@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
102514 for_each_net(net) {
102515 __rtnl_kill_links(net, ops);
102516 }
102517- list_del(&ops->list);
102518+ pax_list_del((struct list_head *)&ops->list);
102519 }
102520 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
102521
102522@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
102523 (dev->ifalias &&
102524 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
102525 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
102526- atomic_read(&dev->carrier_changes)))
102527+ atomic_read_unchecked(&dev->carrier_changes)))
102528 goto nla_put_failure;
102529
102530 if (1) {
102531diff --git a/net/core/scm.c b/net/core/scm.c
102532index 3b6899b..cf36238 100644
102533--- a/net/core/scm.c
102534+++ b/net/core/scm.c
102535@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
102536 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102537 {
102538 struct cmsghdr __user *cm
102539- = (__force struct cmsghdr __user *)msg->msg_control;
102540+ = (struct cmsghdr __force_user *)msg->msg_control;
102541 struct cmsghdr cmhdr;
102542 int cmlen = CMSG_LEN(len);
102543 int err;
102544@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102545 err = -EFAULT;
102546 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
102547 goto out;
102548- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
102549+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
102550 goto out;
102551 cmlen = CMSG_SPACE(len);
102552 if (msg->msg_controllen < cmlen)
102553@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
102554 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102555 {
102556 struct cmsghdr __user *cm
102557- = (__force struct cmsghdr __user*)msg->msg_control;
102558+ = (struct cmsghdr __force_user *)msg->msg_control;
102559
102560 int fdmax = 0;
102561 int fdnum = scm->fp->count;
102562@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102563 if (fdnum < fdmax)
102564 fdmax = fdnum;
102565
102566- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
102567+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
102568 i++, cmfptr++)
102569 {
102570 struct socket *sock;
102571diff --git a/net/core/skbuff.c b/net/core/skbuff.c
102572index 98d45fe..4f9608f 100644
102573--- a/net/core/skbuff.c
102574+++ b/net/core/skbuff.c
102575@@ -2121,7 +2121,7 @@ EXPORT_SYMBOL(__skb_checksum);
102576 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102577 int len, __wsum csum)
102578 {
102579- const struct skb_checksum_ops ops = {
102580+ static const struct skb_checksum_ops ops = {
102581 .update = csum_partial_ext,
102582 .combine = csum_block_add_ext,
102583 };
102584@@ -3361,12 +3361,14 @@ void __init skb_init(void)
102585 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102586 sizeof(struct sk_buff),
102587 0,
102588- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102589+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102590+ SLAB_NO_SANITIZE,
102591 NULL);
102592 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102593 sizeof(struct sk_buff_fclones),
102594 0,
102595- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102596+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102597+ SLAB_NO_SANITIZE,
102598 NULL);
102599 }
102600
102601diff --git a/net/core/sock.c b/net/core/sock.c
102602index 71e3e5f..ab90920 100644
102603--- a/net/core/sock.c
102604+++ b/net/core/sock.c
102605@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102606 struct sk_buff_head *list = &sk->sk_receive_queue;
102607
102608 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102609- atomic_inc(&sk->sk_drops);
102610+ atomic_inc_unchecked(&sk->sk_drops);
102611 trace_sock_rcvqueue_full(sk, skb);
102612 return -ENOMEM;
102613 }
102614@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102615 return err;
102616
102617 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102618- atomic_inc(&sk->sk_drops);
102619+ atomic_inc_unchecked(&sk->sk_drops);
102620 return -ENOBUFS;
102621 }
102622
102623@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102624 skb_dst_force(skb);
102625
102626 spin_lock_irqsave(&list->lock, flags);
102627- skb->dropcount = atomic_read(&sk->sk_drops);
102628+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102629 __skb_queue_tail(list, skb);
102630 spin_unlock_irqrestore(&list->lock, flags);
102631
102632@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102633 skb->dev = NULL;
102634
102635 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102636- atomic_inc(&sk->sk_drops);
102637+ atomic_inc_unchecked(&sk->sk_drops);
102638 goto discard_and_relse;
102639 }
102640 if (nested)
102641@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102642 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102643 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102644 bh_unlock_sock(sk);
102645- atomic_inc(&sk->sk_drops);
102646+ atomic_inc_unchecked(&sk->sk_drops);
102647 goto discard_and_relse;
102648 }
102649
102650@@ -910,6 +910,7 @@ set_rcvbuf:
102651 }
102652 break;
102653
102654+#ifndef GRKERNSEC_BPF_HARDEN
102655 case SO_ATTACH_BPF:
102656 ret = -EINVAL;
102657 if (optlen == sizeof(u32)) {
102658@@ -922,7 +923,7 @@ set_rcvbuf:
102659 ret = sk_attach_bpf(ufd, sk);
102660 }
102661 break;
102662-
102663+#endif
102664 case SO_DETACH_FILTER:
102665 ret = sk_detach_filter(sk);
102666 break;
102667@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102668 struct timeval tm;
102669 } v;
102670
102671- int lv = sizeof(int);
102672- int len;
102673+ unsigned int lv = sizeof(int);
102674+ unsigned int len;
102675
102676 if (get_user(len, optlen))
102677 return -EFAULT;
102678- if (len < 0)
102679+ if (len > INT_MAX)
102680 return -EINVAL;
102681
102682 memset(&v, 0, sizeof(v));
102683@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102684
102685 case SO_PEERNAME:
102686 {
102687- char address[128];
102688+ char address[_K_SS_MAXSIZE];
102689
102690 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102691 return -ENOTCONN;
102692- if (lv < len)
102693+ if (lv < len || sizeof address < len)
102694 return -EINVAL;
102695 if (copy_to_user(optval, address, len))
102696 return -EFAULT;
102697@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102698
102699 if (len > lv)
102700 len = lv;
102701- if (copy_to_user(optval, &v, len))
102702+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102703 return -EFAULT;
102704 lenout:
102705 if (put_user(len, optlen))
102706@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102707 */
102708 smp_wmb();
102709 atomic_set(&sk->sk_refcnt, 1);
102710- atomic_set(&sk->sk_drops, 0);
102711+ atomic_set_unchecked(&sk->sk_drops, 0);
102712 }
102713 EXPORT_SYMBOL(sock_init_data);
102714
102715@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102716 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102717 int level, int type)
102718 {
102719+ struct sock_extended_err ee;
102720 struct sock_exterr_skb *serr;
102721 struct sk_buff *skb;
102722 int copied, err;
102723@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102724 sock_recv_timestamp(msg, sk, skb);
102725
102726 serr = SKB_EXT_ERR(skb);
102727- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102728+ ee = serr->ee;
102729+ put_cmsg(msg, level, type, sizeof ee, &ee);
102730
102731 msg->msg_flags |= MSG_ERRQUEUE;
102732 err = copied;
102733diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102734index ad704c7..ca48aff 100644
102735--- a/net/core/sock_diag.c
102736+++ b/net/core/sock_diag.c
102737@@ -9,26 +9,33 @@
102738 #include <linux/inet_diag.h>
102739 #include <linux/sock_diag.h>
102740
102741-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102742+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102743 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102744 static DEFINE_MUTEX(sock_diag_table_mutex);
102745
102746 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102747 {
102748+#ifndef CONFIG_GRKERNSEC_HIDESYM
102749 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102750 cookie[1] != INET_DIAG_NOCOOKIE) &&
102751 ((u32)(unsigned long)sk != cookie[0] ||
102752 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102753 return -ESTALE;
102754 else
102755+#endif
102756 return 0;
102757 }
102758 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102759
102760 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102761 {
102762+#ifdef CONFIG_GRKERNSEC_HIDESYM
102763+ cookie[0] = 0;
102764+ cookie[1] = 0;
102765+#else
102766 cookie[0] = (u32)(unsigned long)sk;
102767 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102768+#endif
102769 }
102770 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102771
102772@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102773 mutex_lock(&sock_diag_table_mutex);
102774 if (sock_diag_handlers[hndl->family])
102775 err = -EBUSY;
102776- else
102777+ else {
102778+ pax_open_kernel();
102779 sock_diag_handlers[hndl->family] = hndl;
102780+ pax_close_kernel();
102781+ }
102782 mutex_unlock(&sock_diag_table_mutex);
102783
102784 return err;
102785@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102786
102787 mutex_lock(&sock_diag_table_mutex);
102788 BUG_ON(sock_diag_handlers[family] != hnld);
102789+ pax_open_kernel();
102790 sock_diag_handlers[family] = NULL;
102791+ pax_close_kernel();
102792 mutex_unlock(&sock_diag_table_mutex);
102793 }
102794 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102795diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102796index 8ce351f..2c388f7 100644
102797--- a/net/core/sysctl_net_core.c
102798+++ b/net/core/sysctl_net_core.c
102799@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102800 {
102801 unsigned int orig_size, size;
102802 int ret, i;
102803- struct ctl_table tmp = {
102804+ ctl_table_no_const tmp = {
102805 .data = &size,
102806 .maxlen = sizeof(size),
102807 .mode = table->mode
102808@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102809 void __user *buffer, size_t *lenp, loff_t *ppos)
102810 {
102811 char id[IFNAMSIZ];
102812- struct ctl_table tbl = {
102813+ ctl_table_no_const tbl = {
102814 .data = id,
102815 .maxlen = IFNAMSIZ,
102816 };
102817@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102818 static int proc_do_rss_key(struct ctl_table *table, int write,
102819 void __user *buffer, size_t *lenp, loff_t *ppos)
102820 {
102821- struct ctl_table fake_table;
102822+ ctl_table_no_const fake_table;
102823 char buf[NETDEV_RSS_KEY_LEN * 3];
102824
102825 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102826@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102827 .mode = 0444,
102828 .proc_handler = proc_do_rss_key,
102829 },
102830-#ifdef CONFIG_BPF_JIT
102831+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102832 {
102833 .procname = "bpf_jit_enable",
102834 .data = &bpf_jit_enable,
102835@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
102836
102837 static __net_init int sysctl_core_net_init(struct net *net)
102838 {
102839- struct ctl_table *tbl;
102840+ ctl_table_no_const *tbl = NULL;
102841
102842 net->core.sysctl_somaxconn = SOMAXCONN;
102843
102844- tbl = netns_core_table;
102845 if (!net_eq(net, &init_net)) {
102846- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102847+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102848 if (tbl == NULL)
102849 goto err_dup;
102850
102851@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102852 if (net->user_ns != &init_user_ns) {
102853 tbl[0].procname = NULL;
102854 }
102855- }
102856-
102857- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102858+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102859+ } else
102860+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102861 if (net->core.sysctl_hdr == NULL)
102862 goto err_reg;
102863
102864 return 0;
102865
102866 err_reg:
102867- if (tbl != netns_core_table)
102868- kfree(tbl);
102869+ kfree(tbl);
102870 err_dup:
102871 return -ENOMEM;
102872 }
102873@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102874 kfree(tbl);
102875 }
102876
102877-static __net_initdata struct pernet_operations sysctl_core_ops = {
102878+static __net_initconst struct pernet_operations sysctl_core_ops = {
102879 .init = sysctl_core_net_init,
102880 .exit = sysctl_core_net_exit,
102881 };
102882diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102883index 8102286..a0c2755 100644
102884--- a/net/decnet/af_decnet.c
102885+++ b/net/decnet/af_decnet.c
102886@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102887 .sysctl_rmem = sysctl_decnet_rmem,
102888 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102889 .obj_size = sizeof(struct dn_sock),
102890+ .slab_flags = SLAB_USERCOPY,
102891 };
102892
102893 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102894diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102895index b2c26b0..41f803e 100644
102896--- a/net/decnet/dn_dev.c
102897+++ b/net/decnet/dn_dev.c
102898@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102899 .extra1 = &min_t3,
102900 .extra2 = &max_t3
102901 },
102902- {0}
102903+ { }
102904 },
102905 };
102906
102907diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102908index 5325b54..a0d4d69 100644
102909--- a/net/decnet/sysctl_net_decnet.c
102910+++ b/net/decnet/sysctl_net_decnet.c
102911@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102912
102913 if (len > *lenp) len = *lenp;
102914
102915- if (copy_to_user(buffer, addr, len))
102916+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102917 return -EFAULT;
102918
102919 *lenp = len;
102920@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102921
102922 if (len > *lenp) len = *lenp;
102923
102924- if (copy_to_user(buffer, devname, len))
102925+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102926 return -EFAULT;
102927
102928 *lenp = len;
102929diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102930index a2c7e4c..3dc9f67 100644
102931--- a/net/hsr/hsr_netlink.c
102932+++ b/net/hsr/hsr_netlink.c
102933@@ -102,7 +102,7 @@ nla_put_failure:
102934 return -EMSGSIZE;
102935 }
102936
102937-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102938+static struct rtnl_link_ops hsr_link_ops = {
102939 .kind = "hsr",
102940 .maxtype = IFLA_HSR_MAX,
102941 .policy = hsr_policy,
102942diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
102943index 055fbb7..c0dbe60 100644
102944--- a/net/ieee802154/6lowpan/core.c
102945+++ b/net/ieee802154/6lowpan/core.c
102946@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102947 dev_put(real_dev);
102948 }
102949
102950-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102951+static struct rtnl_link_ops lowpan_link_ops = {
102952 .kind = "lowpan",
102953 .priv_size = sizeof(struct lowpan_dev_info),
102954 .setup = lowpan_setup,
102955diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
102956index f46e4d1..30231f1 100644
102957--- a/net/ieee802154/6lowpan/reassembly.c
102958+++ b/net/ieee802154/6lowpan/reassembly.c
102959@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102960
102961 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102962 {
102963- struct ctl_table *table;
102964+ ctl_table_no_const *table = NULL;
102965 struct ctl_table_header *hdr;
102966 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102967 net_ieee802154_lowpan(net);
102968
102969- table = lowpan_frags_ns_ctl_table;
102970 if (!net_eq(net, &init_net)) {
102971- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102972+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102973 GFP_KERNEL);
102974 if (table == NULL)
102975 goto err_alloc;
102976@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102977 /* Don't export sysctls to unprivileged users */
102978 if (net->user_ns != &init_user_ns)
102979 table[0].procname = NULL;
102980- }
102981-
102982- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102983+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102984+ } else
102985+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102986 if (hdr == NULL)
102987 goto err_reg;
102988
102989@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102990 return 0;
102991
102992 err_reg:
102993- if (!net_eq(net, &init_net))
102994- kfree(table);
102995+ kfree(table);
102996 err_alloc:
102997 return -ENOMEM;
102998 }
102999diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
103000index d2e49ba..f78e8aa 100644
103001--- a/net/ipv4/af_inet.c
103002+++ b/net/ipv4/af_inet.c
103003@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
103004 return ip_recv_error(sk, msg, len, addr_len);
103005 #if IS_ENABLED(CONFIG_IPV6)
103006 if (sk->sk_family == AF_INET6)
103007- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
103008+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
103009 #endif
103010 return -EINVAL;
103011 }
103012diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
103013index 3a8985c..9d2a870 100644
103014--- a/net/ipv4/devinet.c
103015+++ b/net/ipv4/devinet.c
103016@@ -69,7 +69,8 @@
103017
103018 static struct ipv4_devconf ipv4_devconf = {
103019 .data = {
103020- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
103021+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
103022+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
103023 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
103024 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
103025 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
103026@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
103027
103028 static struct ipv4_devconf ipv4_devconf_dflt = {
103029 .data = {
103030- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
103031+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
103032+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
103033 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
103034 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
103035 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
103036@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
103037 idx = 0;
103038 head = &net->dev_index_head[h];
103039 rcu_read_lock();
103040- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103041+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103042 net->dev_base_seq;
103043 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103044 if (idx < s_idx)
103045@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
103046 idx = 0;
103047 head = &net->dev_index_head[h];
103048 rcu_read_lock();
103049- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103050+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103051 net->dev_base_seq;
103052 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103053 if (idx < s_idx)
103054@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
103055 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
103056 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
103057
103058-static struct devinet_sysctl_table {
103059+static const struct devinet_sysctl_table {
103060 struct ctl_table_header *sysctl_header;
103061 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
103062 } devinet_sysctl = {
103063@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
103064 int err;
103065 struct ipv4_devconf *all, *dflt;
103066 #ifdef CONFIG_SYSCTL
103067- struct ctl_table *tbl = ctl_forward_entry;
103068+ ctl_table_no_const *tbl = NULL;
103069 struct ctl_table_header *forw_hdr;
103070 #endif
103071
103072@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
103073 goto err_alloc_dflt;
103074
103075 #ifdef CONFIG_SYSCTL
103076- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
103077+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
103078 if (tbl == NULL)
103079 goto err_alloc_ctl;
103080
103081@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
103082 goto err_reg_dflt;
103083
103084 err = -ENOMEM;
103085- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103086+ if (!net_eq(net, &init_net))
103087+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103088+ else
103089+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
103090 if (forw_hdr == NULL)
103091 goto err_reg_ctl;
103092 net->ipv4.forw_hdr = forw_hdr;
103093@@ -2289,8 +2294,7 @@ err_reg_ctl:
103094 err_reg_dflt:
103095 __devinet_sysctl_unregister(all);
103096 err_reg_all:
103097- if (tbl != ctl_forward_entry)
103098- kfree(tbl);
103099+ kfree(tbl);
103100 err_alloc_ctl:
103101 #endif
103102 if (dflt != &ipv4_devconf_dflt)
103103diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
103104index 23b9b3e..60cf0c4 100644
103105--- a/net/ipv4/fib_frontend.c
103106+++ b/net/ipv4/fib_frontend.c
103107@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
103108 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103109 fib_sync_up(dev);
103110 #endif
103111- atomic_inc(&net->ipv4.dev_addr_genid);
103112+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103113 rt_cache_flush(dev_net(dev));
103114 break;
103115 case NETDEV_DOWN:
103116 fib_del_ifaddr(ifa, NULL);
103117- atomic_inc(&net->ipv4.dev_addr_genid);
103118+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103119 if (ifa->ifa_dev->ifa_list == NULL) {
103120 /* Last address was deleted from this interface.
103121 * Disable IP.
103122@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
103123 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103124 fib_sync_up(dev);
103125 #endif
103126- atomic_inc(&net->ipv4.dev_addr_genid);
103127+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103128 rt_cache_flush(net);
103129 break;
103130 case NETDEV_DOWN:
103131diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
103132index 1e2090e..351a724 100644
103133--- a/net/ipv4/fib_semantics.c
103134+++ b/net/ipv4/fib_semantics.c
103135@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
103136 nh->nh_saddr = inet_select_addr(nh->nh_dev,
103137 nh->nh_gw,
103138 nh->nh_parent->fib_scope);
103139- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
103140+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
103141
103142 return nh->nh_saddr;
103143 }
103144diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
103145index ff069f6..335e752 100644
103146--- a/net/ipv4/fou.c
103147+++ b/net/ipv4/fou.c
103148@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
103149
103150 #ifdef CONFIG_NET_FOU_IP_TUNNELS
103151
103152-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
103153+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
103154 .encap_hlen = fou_encap_hlen,
103155 .build_header = fou_build_header,
103156 };
103157
103158-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
103159+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
103160 .encap_hlen = gue_encap_hlen,
103161 .build_header = gue_build_header,
103162 };
103163diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
103164index 9111a4e..3576905 100644
103165--- a/net/ipv4/inet_hashtables.c
103166+++ b/net/ipv4/inet_hashtables.c
103167@@ -18,6 +18,7 @@
103168 #include <linux/sched.h>
103169 #include <linux/slab.h>
103170 #include <linux/wait.h>
103171+#include <linux/security.h>
103172
103173 #include <net/inet_connection_sock.h>
103174 #include <net/inet_hashtables.h>
103175@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
103176 return inet_ehashfn(net, laddr, lport, faddr, fport);
103177 }
103178
103179+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
103180+
103181 /*
103182 * Allocate and initialize a new local port bind bucket.
103183 * The bindhash mutex for snum's hash chain must be held here.
103184@@ -554,6 +557,8 @@ ok:
103185 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
103186 spin_unlock(&head->lock);
103187
103188+ gr_update_task_in_ip_table(inet_sk(sk));
103189+
103190 if (tw) {
103191 inet_twsk_deschedule(tw, death_row);
103192 while (twrefcnt) {
103193diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
103194index 241afd7..31b95d5 100644
103195--- a/net/ipv4/inetpeer.c
103196+++ b/net/ipv4/inetpeer.c
103197@@ -461,7 +461,7 @@ relookup:
103198 if (p) {
103199 p->daddr = *daddr;
103200 atomic_set(&p->refcnt, 1);
103201- atomic_set(&p->rid, 0);
103202+ atomic_set_unchecked(&p->rid, 0);
103203 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
103204 p->rate_tokens = 0;
103205 /* 60*HZ is arbitrary, but chosen enough high so that the first
103206diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
103207index 145a50c..5dd8cc5 100644
103208--- a/net/ipv4/ip_fragment.c
103209+++ b/net/ipv4/ip_fragment.c
103210@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
103211 return 0;
103212
103213 start = qp->rid;
103214- end = atomic_inc_return(&peer->rid);
103215+ end = atomic_inc_return_unchecked(&peer->rid);
103216 qp->rid = end;
103217
103218 rc = qp->q.fragments && (end - start) > max;
103219@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
103220
103221 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103222 {
103223- struct ctl_table *table;
103224+ ctl_table_no_const *table = NULL;
103225 struct ctl_table_header *hdr;
103226
103227- table = ip4_frags_ns_ctl_table;
103228 if (!net_eq(net, &init_net)) {
103229- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103230+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103231 if (table == NULL)
103232 goto err_alloc;
103233
103234@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103235 /* Don't export sysctls to unprivileged users */
103236 if (net->user_ns != &init_user_ns)
103237 table[0].procname = NULL;
103238- }
103239+ hdr = register_net_sysctl(net, "net/ipv4", table);
103240+ } else
103241+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
103242
103243- hdr = register_net_sysctl(net, "net/ipv4", table);
103244 if (hdr == NULL)
103245 goto err_reg;
103246
103247@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103248 return 0;
103249
103250 err_reg:
103251- if (!net_eq(net, &init_net))
103252- kfree(table);
103253+ kfree(table);
103254 err_alloc:
103255 return -ENOMEM;
103256 }
103257diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
103258index 6207275f..00323a2 100644
103259--- a/net/ipv4/ip_gre.c
103260+++ b/net/ipv4/ip_gre.c
103261@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
103262 module_param(log_ecn_error, bool, 0644);
103263 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103264
103265-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
103266+static struct rtnl_link_ops ipgre_link_ops;
103267 static int ipgre_tunnel_init(struct net_device *dev);
103268
103269 static int ipgre_net_id __read_mostly;
103270@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
103271 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
103272 };
103273
103274-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103275+static struct rtnl_link_ops ipgre_link_ops = {
103276 .kind = "gre",
103277 .maxtype = IFLA_GRE_MAX,
103278 .policy = ipgre_policy,
103279@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103280 .get_link_net = ip_tunnel_get_link_net,
103281 };
103282
103283-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
103284+static struct rtnl_link_ops ipgre_tap_ops = {
103285 .kind = "gretap",
103286 .maxtype = IFLA_GRE_MAX,
103287 .policy = ipgre_policy,
103288diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
103289index 3d4da2c..40f9c29 100644
103290--- a/net/ipv4/ip_input.c
103291+++ b/net/ipv4/ip_input.c
103292@@ -147,6 +147,10 @@
103293 #include <linux/mroute.h>
103294 #include <linux/netlink.h>
103295
103296+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103297+extern int grsec_enable_blackhole;
103298+#endif
103299+
103300 /*
103301 * Process Router Attention IP option (RFC 2113)
103302 */
103303@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
103304 if (!raw) {
103305 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
103306 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
103307+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103308+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103309+#endif
103310 icmp_send(skb, ICMP_DEST_UNREACH,
103311 ICMP_PROT_UNREACH, 0);
103312 }
103313diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
103314index 5cd9927..8610b9f 100644
103315--- a/net/ipv4/ip_sockglue.c
103316+++ b/net/ipv4/ip_sockglue.c
103317@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103318 len = min_t(unsigned int, len, opt->optlen);
103319 if (put_user(len, optlen))
103320 return -EFAULT;
103321- if (copy_to_user(optval, opt->__data, len))
103322+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
103323+ copy_to_user(optval, opt->__data, len))
103324 return -EFAULT;
103325 return 0;
103326 }
103327@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103328 if (sk->sk_type != SOCK_STREAM)
103329 return -ENOPROTOOPT;
103330
103331- msg.msg_control = (__force void *) optval;
103332+ msg.msg_control = (__force_kernel void *) optval;
103333 msg.msg_controllen = len;
103334 msg.msg_flags = flags;
103335
103336diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
103337index 94efe14..1453fcc 100644
103338--- a/net/ipv4/ip_vti.c
103339+++ b/net/ipv4/ip_vti.c
103340@@ -45,7 +45,7 @@
103341 #include <net/net_namespace.h>
103342 #include <net/netns/generic.h>
103343
103344-static struct rtnl_link_ops vti_link_ops __read_mostly;
103345+static struct rtnl_link_ops vti_link_ops;
103346
103347 static int vti_net_id __read_mostly;
103348 static int vti_tunnel_init(struct net_device *dev);
103349@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
103350 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
103351 };
103352
103353-static struct rtnl_link_ops vti_link_ops __read_mostly = {
103354+static struct rtnl_link_ops vti_link_ops = {
103355 .kind = "vti",
103356 .maxtype = IFLA_VTI_MAX,
103357 .policy = vti_policy,
103358diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
103359index b26376e..fc3d733 100644
103360--- a/net/ipv4/ipconfig.c
103361+++ b/net/ipv4/ipconfig.c
103362@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
103363
103364 mm_segment_t oldfs = get_fs();
103365 set_fs(get_ds());
103366- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103367+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103368 set_fs(oldfs);
103369 return res;
103370 }
103371@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
103372
103373 mm_segment_t oldfs = get_fs();
103374 set_fs(get_ds());
103375- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103376+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103377 set_fs(oldfs);
103378 return res;
103379 }
103380@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
103381
103382 mm_segment_t oldfs = get_fs();
103383 set_fs(get_ds());
103384- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
103385+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
103386 set_fs(oldfs);
103387 return res;
103388 }
103389diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
103390index 915d215..48d1db7 100644
103391--- a/net/ipv4/ipip.c
103392+++ b/net/ipv4/ipip.c
103393@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103394 static int ipip_net_id __read_mostly;
103395
103396 static int ipip_tunnel_init(struct net_device *dev);
103397-static struct rtnl_link_ops ipip_link_ops __read_mostly;
103398+static struct rtnl_link_ops ipip_link_ops;
103399
103400 static int ipip_err(struct sk_buff *skb, u32 info)
103401 {
103402@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
103403 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
103404 };
103405
103406-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
103407+static struct rtnl_link_ops ipip_link_ops = {
103408 .kind = "ipip",
103409 .maxtype = IFLA_IPTUN_MAX,
103410 .policy = ipip_policy,
103411diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
103412index f95b6f9..2ee2097 100644
103413--- a/net/ipv4/netfilter/arp_tables.c
103414+++ b/net/ipv4/netfilter/arp_tables.c
103415@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
103416 #endif
103417
103418 static int get_info(struct net *net, void __user *user,
103419- const int *len, int compat)
103420+ int len, int compat)
103421 {
103422 char name[XT_TABLE_MAXNAMELEN];
103423 struct xt_table *t;
103424 int ret;
103425
103426- if (*len != sizeof(struct arpt_getinfo)) {
103427- duprintf("length %u != %Zu\n", *len,
103428+ if (len != sizeof(struct arpt_getinfo)) {
103429+ duprintf("length %u != %Zu\n", len,
103430 sizeof(struct arpt_getinfo));
103431 return -EINVAL;
103432 }
103433@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
103434 info.size = private->size;
103435 strcpy(info.name, name);
103436
103437- if (copy_to_user(user, &info, *len) != 0)
103438+ if (copy_to_user(user, &info, len) != 0)
103439 ret = -EFAULT;
103440 else
103441 ret = 0;
103442@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
103443
103444 switch (cmd) {
103445 case ARPT_SO_GET_INFO:
103446- ret = get_info(sock_net(sk), user, len, 1);
103447+ ret = get_info(sock_net(sk), user, *len, 1);
103448 break;
103449 case ARPT_SO_GET_ENTRIES:
103450 ret = compat_get_entries(sock_net(sk), user, len);
103451@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
103452
103453 switch (cmd) {
103454 case ARPT_SO_GET_INFO:
103455- ret = get_info(sock_net(sk), user, len, 0);
103456+ ret = get_info(sock_net(sk), user, *len, 0);
103457 break;
103458
103459 case ARPT_SO_GET_ENTRIES:
103460diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
103461index cf5e82f..75a20f5 100644
103462--- a/net/ipv4/netfilter/ip_tables.c
103463+++ b/net/ipv4/netfilter/ip_tables.c
103464@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
103465 #endif
103466
103467 static int get_info(struct net *net, void __user *user,
103468- const int *len, int compat)
103469+ int len, int compat)
103470 {
103471 char name[XT_TABLE_MAXNAMELEN];
103472 struct xt_table *t;
103473 int ret;
103474
103475- if (*len != sizeof(struct ipt_getinfo)) {
103476- duprintf("length %u != %zu\n", *len,
103477+ if (len != sizeof(struct ipt_getinfo)) {
103478+ duprintf("length %u != %zu\n", len,
103479 sizeof(struct ipt_getinfo));
103480 return -EINVAL;
103481 }
103482@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
103483 info.size = private->size;
103484 strcpy(info.name, name);
103485
103486- if (copy_to_user(user, &info, *len) != 0)
103487+ if (copy_to_user(user, &info, len) != 0)
103488 ret = -EFAULT;
103489 else
103490 ret = 0;
103491@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103492
103493 switch (cmd) {
103494 case IPT_SO_GET_INFO:
103495- ret = get_info(sock_net(sk), user, len, 1);
103496+ ret = get_info(sock_net(sk), user, *len, 1);
103497 break;
103498 case IPT_SO_GET_ENTRIES:
103499 ret = compat_get_entries(sock_net(sk), user, len);
103500@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103501
103502 switch (cmd) {
103503 case IPT_SO_GET_INFO:
103504- ret = get_info(sock_net(sk), user, len, 0);
103505+ ret = get_info(sock_net(sk), user, *len, 0);
103506 break;
103507
103508 case IPT_SO_GET_ENTRIES:
103509diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103510index e90f83a..3e6acca 100644
103511--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
103512+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103513@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
103514 spin_lock_init(&cn->lock);
103515
103516 #ifdef CONFIG_PROC_FS
103517- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
103518+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
103519 if (!cn->procdir) {
103520 pr_err("Unable to proc dir entry\n");
103521 return -ENOMEM;
103522diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
103523index 208d543..ab6c0ba 100644
103524--- a/net/ipv4/ping.c
103525+++ b/net/ipv4/ping.c
103526@@ -59,7 +59,7 @@ struct ping_table {
103527 };
103528
103529 static struct ping_table ping_table;
103530-struct pingv6_ops pingv6_ops;
103531+struct pingv6_ops *pingv6_ops;
103532 EXPORT_SYMBOL_GPL(pingv6_ops);
103533
103534 static u16 ping_port_rover;
103535@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
103536 if (sk_hashed(sk)) {
103537 write_lock_bh(&ping_table.lock);
103538 hlist_nulls_del(&sk->sk_nulls_node);
103539+ sk_nulls_node_init(&sk->sk_nulls_node);
103540 sock_put(sk);
103541 isk->inet_num = 0;
103542 isk->inet_sport = 0;
103543@@ -358,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
103544 return -ENODEV;
103545 }
103546 }
103547- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
103548+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
103549 scoped);
103550 rcu_read_unlock();
103551
103552@@ -566,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103553 }
103554 #if IS_ENABLED(CONFIG_IPV6)
103555 } else if (skb->protocol == htons(ETH_P_IPV6)) {
103556- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
103557+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
103558 #endif
103559 }
103560
103561@@ -584,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103562 info, (u8 *)icmph);
103563 #if IS_ENABLED(CONFIG_IPV6)
103564 } else if (family == AF_INET6) {
103565- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
103566+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
103567 info, (u8 *)icmph);
103568 #endif
103569 }
103570@@ -918,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103571 }
103572
103573 if (inet6_sk(sk)->rxopt.all)
103574- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
103575+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
103576 if (skb->protocol == htons(ETH_P_IPV6) &&
103577 inet6_sk(sk)->rxopt.all)
103578- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
103579+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
103580 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
103581 ip_cmsg_recv(msg, skb);
103582 #endif
103583@@ -1116,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103584 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103585 0, sock_i_ino(sp),
103586 atomic_read(&sp->sk_refcnt), sp,
103587- atomic_read(&sp->sk_drops));
103588+ atomic_read_unchecked(&sp->sk_drops));
103589 }
103590
103591 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103592diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103593index f027a70..2e64edc 100644
103594--- a/net/ipv4/raw.c
103595+++ b/net/ipv4/raw.c
103596@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103597 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103598 {
103599 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103600- atomic_inc(&sk->sk_drops);
103601+ atomic_inc_unchecked(&sk->sk_drops);
103602 kfree_skb(skb);
103603 return NET_RX_DROP;
103604 }
103605@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
103606
103607 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103608 {
103609+ struct icmp_filter filter;
103610+
103611 if (optlen > sizeof(struct icmp_filter))
103612 optlen = sizeof(struct icmp_filter);
103613- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103614+ if (copy_from_user(&filter, optval, optlen))
103615 return -EFAULT;
103616+ raw_sk(sk)->filter = filter;
103617 return 0;
103618 }
103619
103620 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103621 {
103622 int len, ret = -EFAULT;
103623+ struct icmp_filter filter;
103624
103625 if (get_user(len, optlen))
103626 goto out;
103627@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103628 if (len > sizeof(struct icmp_filter))
103629 len = sizeof(struct icmp_filter);
103630 ret = -EFAULT;
103631- if (put_user(len, optlen) ||
103632- copy_to_user(optval, &raw_sk(sk)->filter, len))
103633+ filter = raw_sk(sk)->filter;
103634+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103635 goto out;
103636 ret = 0;
103637 out: return ret;
103638@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103639 0, 0L, 0,
103640 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103641 0, sock_i_ino(sp),
103642- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103643+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103644 }
103645
103646 static int raw_seq_show(struct seq_file *seq, void *v)
103647diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103648index ad50643..53b7b44 100644
103649--- a/net/ipv4/route.c
103650+++ b/net/ipv4/route.c
103651@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103652
103653 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103654 {
103655- return seq_open(file, &rt_cache_seq_ops);
103656+ return seq_open_restrict(file, &rt_cache_seq_ops);
103657 }
103658
103659 static const struct file_operations rt_cache_seq_fops = {
103660@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103661
103662 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103663 {
103664- return seq_open(file, &rt_cpu_seq_ops);
103665+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103666 }
103667
103668 static const struct file_operations rt_cpu_seq_fops = {
103669@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103670
103671 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103672 {
103673- return single_open(file, rt_acct_proc_show, NULL);
103674+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103675 }
103676
103677 static const struct file_operations rt_acct_proc_fops = {
103678@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103679
103680 #define IP_IDENTS_SZ 2048u
103681 struct ip_ident_bucket {
103682- atomic_t id;
103683+ atomic_unchecked_t id;
103684 u32 stamp32;
103685 };
103686
103687-static struct ip_ident_bucket *ip_idents __read_mostly;
103688+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103689
103690 /* In order to protect privacy, we add a perturbation to identifiers
103691 * if one generator is seldom used. This makes hard for an attacker
103692@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103693 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103694 delta = prandom_u32_max(now - old);
103695
103696- return atomic_add_return(segs + delta, &bucket->id) - segs;
103697+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103698 }
103699 EXPORT_SYMBOL(ip_idents_reserve);
103700
103701@@ -2642,34 +2642,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103702 .maxlen = sizeof(int),
103703 .mode = 0200,
103704 .proc_handler = ipv4_sysctl_rtcache_flush,
103705+ .extra1 = &init_net,
103706 },
103707 { },
103708 };
103709
103710 static __net_init int sysctl_route_net_init(struct net *net)
103711 {
103712- struct ctl_table *tbl;
103713+ ctl_table_no_const *tbl = NULL;
103714
103715- tbl = ipv4_route_flush_table;
103716 if (!net_eq(net, &init_net)) {
103717- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103718+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103719 if (tbl == NULL)
103720 goto err_dup;
103721
103722 /* Don't export sysctls to unprivileged users */
103723 if (net->user_ns != &init_user_ns)
103724 tbl[0].procname = NULL;
103725- }
103726- tbl[0].extra1 = net;
103727+ tbl[0].extra1 = net;
103728+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103729+ } else
103730+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103731
103732- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103733 if (net->ipv4.route_hdr == NULL)
103734 goto err_reg;
103735 return 0;
103736
103737 err_reg:
103738- if (tbl != ipv4_route_flush_table)
103739- kfree(tbl);
103740+ kfree(tbl);
103741 err_dup:
103742 return -ENOMEM;
103743 }
103744@@ -2692,8 +2692,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103745
103746 static __net_init int rt_genid_init(struct net *net)
103747 {
103748- atomic_set(&net->ipv4.rt_genid, 0);
103749- atomic_set(&net->fnhe_genid, 0);
103750+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103751+ atomic_set_unchecked(&net->fnhe_genid, 0);
103752 get_random_bytes(&net->ipv4.dev_addr_genid,
103753 sizeof(net->ipv4.dev_addr_genid));
103754 return 0;
103755@@ -2737,11 +2737,7 @@ int __init ip_rt_init(void)
103756 int rc = 0;
103757 int cpu;
103758
103759- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103760- if (!ip_idents)
103761- panic("IP: failed to allocate ip_idents\n");
103762-
103763- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103764+ prandom_bytes(ip_idents, sizeof(ip_idents));
103765
103766 for_each_possible_cpu(cpu) {
103767 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
103768diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103769index d151539..5f5e247 100644
103770--- a/net/ipv4/sysctl_net_ipv4.c
103771+++ b/net/ipv4/sysctl_net_ipv4.c
103772@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103773 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103774 int ret;
103775 int range[2];
103776- struct ctl_table tmp = {
103777+ ctl_table_no_const tmp = {
103778 .data = &range,
103779 .maxlen = sizeof(range),
103780 .mode = table->mode,
103781@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103782 int ret;
103783 gid_t urange[2];
103784 kgid_t low, high;
103785- struct ctl_table tmp = {
103786+ ctl_table_no_const tmp = {
103787 .data = &urange,
103788 .maxlen = sizeof(urange),
103789 .mode = table->mode,
103790@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103791 void __user *buffer, size_t *lenp, loff_t *ppos)
103792 {
103793 char val[TCP_CA_NAME_MAX];
103794- struct ctl_table tbl = {
103795+ ctl_table_no_const tbl = {
103796 .data = val,
103797 .maxlen = TCP_CA_NAME_MAX,
103798 };
103799@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103800 void __user *buffer, size_t *lenp,
103801 loff_t *ppos)
103802 {
103803- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103804+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103805 int ret;
103806
103807 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103808@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103809 void __user *buffer, size_t *lenp,
103810 loff_t *ppos)
103811 {
103812- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103813+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103814 int ret;
103815
103816 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103817@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103818 void __user *buffer, size_t *lenp,
103819 loff_t *ppos)
103820 {
103821- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103822+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103823 struct tcp_fastopen_context *ctxt;
103824 int ret;
103825 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103826@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
103827
103828 static __net_init int ipv4_sysctl_init_net(struct net *net)
103829 {
103830- struct ctl_table *table;
103831+ ctl_table_no_const *table = NULL;
103832
103833- table = ipv4_net_table;
103834 if (!net_eq(net, &init_net)) {
103835 int i;
103836
103837- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103838+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103839 if (table == NULL)
103840 goto err_alloc;
103841
103842@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103843 table[i].data += (void *)net - (void *)&init_net;
103844 }
103845
103846- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103847+ if (!net_eq(net, &init_net))
103848+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103849+ else
103850+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103851 if (net->ipv4.ipv4_hdr == NULL)
103852 goto err_reg;
103853
103854diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
103855index 995a225..e1e9183 100644
103856--- a/net/ipv4/tcp.c
103857+++ b/net/ipv4/tcp.c
103858@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
103859
103860 /* Race breaker. If space is freed after
103861 * wspace test but before the flags are set,
103862- * IO signal will be lost.
103863+ * IO signal will be lost. Memory barrier
103864+ * pairs with the input side.
103865 */
103866+ smp_mb__after_atomic();
103867 if (sk_stream_is_writeable(sk))
103868 mask |= POLLOUT | POLLWRNORM;
103869 }
103870diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103871index f501ac04..0c5a1b2 100644
103872--- a/net/ipv4/tcp_input.c
103873+++ b/net/ipv4/tcp_input.c
103874@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103875 * without any lock. We want to make sure compiler wont store
103876 * intermediate values in this location.
103877 */
103878- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103879+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103880 sk->sk_max_pacing_rate);
103881 }
103882
103883@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103884 * simplifies code)
103885 */
103886 static void
103887-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103888+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103889 struct sk_buff *head, struct sk_buff *tail,
103890 u32 start, u32 end)
103891 {
103892@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
103893 {
103894 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
103895 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
103896+ /* pairs with tcp_poll() */
103897+ smp_mb__after_atomic();
103898 if (sk->sk_socket &&
103899 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
103900 tcp_new_space(sk);
103901@@ -5525,6 +5527,7 @@ discard:
103902 tcp_paws_reject(&tp->rx_opt, 0))
103903 goto discard_and_undo;
103904
103905+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103906 if (th->syn) {
103907 /* We see SYN without ACK. It is attempt of
103908 * simultaneous connect with crossed SYNs.
103909@@ -5575,6 +5578,7 @@ discard:
103910 goto discard;
103911 #endif
103912 }
103913+#endif
103914 /* "fifth, if neither of the SYN or RST bits is set then
103915 * drop the segment and return."
103916 */
103917@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103918 goto discard;
103919
103920 if (th->syn) {
103921- if (th->fin)
103922+ if (th->fin || th->urg || th->psh)
103923 goto discard;
103924 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103925 return 1;
103926diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103927index f1756ee..8908cb0 100644
103928--- a/net/ipv4/tcp_ipv4.c
103929+++ b/net/ipv4/tcp_ipv4.c
103930@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103931 int sysctl_tcp_low_latency __read_mostly;
103932 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103933
103934+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103935+extern int grsec_enable_blackhole;
103936+#endif
103937+
103938 #ifdef CONFIG_TCP_MD5SIG
103939 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103940 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103941@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103942 return 0;
103943
103944 reset:
103945+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103946+ if (!grsec_enable_blackhole)
103947+#endif
103948 tcp_v4_send_reset(rsk, skb);
103949 discard:
103950 kfree_skb(skb);
103951@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103952 TCP_SKB_CB(skb)->sacked = 0;
103953
103954 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103955- if (!sk)
103956+ if (!sk) {
103957+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103958+ ret = 1;
103959+#endif
103960 goto no_tcp_socket;
103961-
103962+ }
103963 process:
103964- if (sk->sk_state == TCP_TIME_WAIT)
103965+ if (sk->sk_state == TCP_TIME_WAIT) {
103966+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103967+ ret = 2;
103968+#endif
103969 goto do_time_wait;
103970+ }
103971
103972 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103973 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103974@@ -1700,6 +1714,10 @@ csum_error:
103975 bad_packet:
103976 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103977 } else {
103978+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103979+ if (!grsec_enable_blackhole || (ret == 1 &&
103980+ (skb->dev->flags & IFF_LOOPBACK)))
103981+#endif
103982 tcp_v4_send_reset(NULL, skb);
103983 }
103984
103985diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103986index dd11ac7..c0872da 100644
103987--- a/net/ipv4/tcp_minisocks.c
103988+++ b/net/ipv4/tcp_minisocks.c
103989@@ -27,6 +27,10 @@
103990 #include <net/inet_common.h>
103991 #include <net/xfrm.h>
103992
103993+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103994+extern int grsec_enable_blackhole;
103995+#endif
103996+
103997 int sysctl_tcp_syncookies __read_mostly = 1;
103998 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103999
104000@@ -785,7 +789,10 @@ embryonic_reset:
104001 * avoid becoming vulnerable to outside attack aiming at
104002 * resetting legit local connections.
104003 */
104004- req->rsk_ops->send_reset(sk, skb);
104005+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104006+ if (!grsec_enable_blackhole)
104007+#endif
104008+ req->rsk_ops->send_reset(sk, skb);
104009 } else if (fastopen) { /* received a valid RST pkt */
104010 reqsk_fastopen_remove(sk, req, true);
104011 tcp_reset(sk);
104012diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
104013index ebf5ff5..4d1ff32 100644
104014--- a/net/ipv4/tcp_probe.c
104015+++ b/net/ipv4/tcp_probe.c
104016@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
104017 if (cnt + width >= len)
104018 break;
104019
104020- if (copy_to_user(buf + cnt, tbuf, width))
104021+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
104022 return -EFAULT;
104023 cnt += width;
104024 }
104025diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
104026index 0732b78..a82bdc6 100644
104027--- a/net/ipv4/tcp_timer.c
104028+++ b/net/ipv4/tcp_timer.c
104029@@ -22,6 +22,10 @@
104030 #include <linux/gfp.h>
104031 #include <net/tcp.h>
104032
104033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104034+extern int grsec_lastack_retries;
104035+#endif
104036+
104037 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
104038 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
104039 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
104040@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
104041 }
104042 }
104043
104044+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104045+ if ((sk->sk_state == TCP_LAST_ACK) &&
104046+ (grsec_lastack_retries > 0) &&
104047+ (grsec_lastack_retries < retry_until))
104048+ retry_until = grsec_lastack_retries;
104049+#endif
104050+
104051 if (retransmits_timed_out(sk, retry_until,
104052 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
104053 /* Has it gone just too far? */
104054diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
104055index 97ef1f8b..e446c33 100644
104056--- a/net/ipv4/udp.c
104057+++ b/net/ipv4/udp.c
104058@@ -87,6 +87,7 @@
104059 #include <linux/types.h>
104060 #include <linux/fcntl.h>
104061 #include <linux/module.h>
104062+#include <linux/security.h>
104063 #include <linux/socket.h>
104064 #include <linux/sockios.h>
104065 #include <linux/igmp.h>
104066@@ -114,6 +115,10 @@
104067 #include <net/busy_poll.h>
104068 #include "udp_impl.h"
104069
104070+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104071+extern int grsec_enable_blackhole;
104072+#endif
104073+
104074 struct udp_table udp_table __read_mostly;
104075 EXPORT_SYMBOL(udp_table);
104076
104077@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
104078 return true;
104079 }
104080
104081+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
104082+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
104083+
104084 /*
104085 * This routine is called by the ICMP module when it gets some
104086 * sort of error condition. If err < 0 then the socket should
104087@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104088 dport = usin->sin_port;
104089 if (dport == 0)
104090 return -EINVAL;
104091+
104092+ err = gr_search_udp_sendmsg(sk, usin);
104093+ if (err)
104094+ return err;
104095 } else {
104096 if (sk->sk_state != TCP_ESTABLISHED)
104097 return -EDESTADDRREQ;
104098+
104099+ err = gr_search_udp_sendmsg(sk, NULL);
104100+ if (err)
104101+ return err;
104102+
104103 daddr = inet->inet_daddr;
104104 dport = inet->inet_dport;
104105 /* Open fast path for connected socket.
104106@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
104107 IS_UDPLITE(sk));
104108 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104109 IS_UDPLITE(sk));
104110- atomic_inc(&sk->sk_drops);
104111+ atomic_inc_unchecked(&sk->sk_drops);
104112 __skb_unlink(skb, rcvq);
104113 __skb_queue_tail(&list_kill, skb);
104114 }
104115@@ -1275,6 +1292,10 @@ try_again:
104116 if (!skb)
104117 goto out;
104118
104119+ err = gr_search_udp_recvmsg(sk, skb);
104120+ if (err)
104121+ goto out_free;
104122+
104123 ulen = skb->len - sizeof(struct udphdr);
104124 copied = len;
104125 if (copied > ulen)
104126@@ -1307,7 +1328,7 @@ try_again:
104127 if (unlikely(err)) {
104128 trace_kfree_skb(skb, udp_recvmsg);
104129 if (!peeked) {
104130- atomic_inc(&sk->sk_drops);
104131+ atomic_inc_unchecked(&sk->sk_drops);
104132 UDP_INC_STATS_USER(sock_net(sk),
104133 UDP_MIB_INERRORS, is_udplite);
104134 }
104135@@ -1605,7 +1626,7 @@ csum_error:
104136 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104137 drop:
104138 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104139- atomic_inc(&sk->sk_drops);
104140+ atomic_inc_unchecked(&sk->sk_drops);
104141 kfree_skb(skb);
104142 return -1;
104143 }
104144@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104145 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104146
104147 if (!skb1) {
104148- atomic_inc(&sk->sk_drops);
104149+ atomic_inc_unchecked(&sk->sk_drops);
104150 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104151 IS_UDPLITE(sk));
104152 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104153@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104154 goto csum_error;
104155
104156 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104157+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104158+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104159+#endif
104160 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
104161
104162 /*
104163@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
104164 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104165 0, sock_i_ino(sp),
104166 atomic_read(&sp->sk_refcnt), sp,
104167- atomic_read(&sp->sk_drops));
104168+ atomic_read_unchecked(&sp->sk_drops));
104169 }
104170
104171 int udp4_seq_show(struct seq_file *seq, void *v)
104172diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
104173index 6156f68..d6ab46d 100644
104174--- a/net/ipv4/xfrm4_policy.c
104175+++ b/net/ipv4/xfrm4_policy.c
104176@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104177 fl4->flowi4_tos = iph->tos;
104178 }
104179
104180-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
104181+static int xfrm4_garbage_collect(struct dst_ops *ops)
104182 {
104183 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
104184
104185- xfrm4_policy_afinfo.garbage_collect(net);
104186+ xfrm_garbage_collect_deferred(net);
104187 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
104188 }
104189
104190@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
104191
104192 static int __net_init xfrm4_net_init(struct net *net)
104193 {
104194- struct ctl_table *table;
104195+ ctl_table_no_const *table = NULL;
104196 struct ctl_table_header *hdr;
104197
104198- table = xfrm4_policy_table;
104199 if (!net_eq(net, &init_net)) {
104200- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104201+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104202 if (!table)
104203 goto err_alloc;
104204
104205 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
104206- }
104207-
104208- hdr = register_net_sysctl(net, "net/ipv4", table);
104209+ hdr = register_net_sysctl(net, "net/ipv4", table);
104210+ } else
104211+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
104212 if (!hdr)
104213 goto err_reg;
104214
104215@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
104216 return 0;
104217
104218 err_reg:
104219- if (!net_eq(net, &init_net))
104220- kfree(table);
104221+ kfree(table);
104222 err_alloc:
104223 return -ENOMEM;
104224 }
104225diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
104226index b603002..0de5c88 100644
104227--- a/net/ipv6/addrconf.c
104228+++ b/net/ipv6/addrconf.c
104229@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
104230 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
104231 .mtu6 = IPV6_MIN_MTU,
104232 .accept_ra = 1,
104233- .accept_redirects = 1,
104234+ .accept_redirects = 0,
104235 .autoconf = 1,
104236 .force_mld_version = 0,
104237 .mldv1_unsolicited_report_interval = 10 * HZ,
104238@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
104239 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
104240 .mtu6 = IPV6_MIN_MTU,
104241 .accept_ra = 1,
104242- .accept_redirects = 1,
104243+ .accept_redirects = 0,
104244 .autoconf = 1,
104245 .force_mld_version = 0,
104246 .mldv1_unsolicited_report_interval = 10 * HZ,
104247@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
104248 idx = 0;
104249 head = &net->dev_index_head[h];
104250 rcu_read_lock();
104251- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
104252+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
104253 net->dev_base_seq;
104254 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104255 if (idx < s_idx)
104256@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
104257 p.iph.ihl = 5;
104258 p.iph.protocol = IPPROTO_IPV6;
104259 p.iph.ttl = 64;
104260- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
104261+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
104262
104263 if (ops->ndo_do_ioctl) {
104264 mm_segment_t oldfs = get_fs();
104265@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
104266 .release = seq_release_net,
104267 };
104268
104269+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
104270+extern void unregister_ipv6_seq_ops_addr(void);
104271+
104272 static int __net_init if6_proc_net_init(struct net *net)
104273 {
104274- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
104275+ register_ipv6_seq_ops_addr(&if6_seq_ops);
104276+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
104277+ unregister_ipv6_seq_ops_addr();
104278 return -ENOMEM;
104279+ }
104280 return 0;
104281 }
104282
104283 static void __net_exit if6_proc_net_exit(struct net *net)
104284 {
104285 remove_proc_entry("if_inet6", net->proc_net);
104286+ unregister_ipv6_seq_ops_addr();
104287 }
104288
104289 static struct pernet_operations if6_proc_net_ops = {
104290@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
104291 s_ip_idx = ip_idx = cb->args[2];
104292
104293 rcu_read_lock();
104294- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104295+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104296 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
104297 idx = 0;
104298 head = &net->dev_index_head[h];
104299@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104300 rt_genid_bump_ipv6(net);
104301 break;
104302 }
104303- atomic_inc(&net->ipv6.dev_addr_genid);
104304+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
104305 }
104306
104307 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104308@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
104309 int *valp = ctl->data;
104310 int val = *valp;
104311 loff_t pos = *ppos;
104312- struct ctl_table lctl;
104313+ ctl_table_no_const lctl;
104314 int ret;
104315
104316 /*
104317@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
104318 {
104319 struct inet6_dev *idev = ctl->extra1;
104320 int min_mtu = IPV6_MIN_MTU;
104321- struct ctl_table lctl;
104322+ ctl_table_no_const lctl;
104323
104324 lctl = *ctl;
104325 lctl.extra1 = &min_mtu;
104326@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
104327 int *valp = ctl->data;
104328 int val = *valp;
104329 loff_t pos = *ppos;
104330- struct ctl_table lctl;
104331+ ctl_table_no_const lctl;
104332 int ret;
104333
104334 /*
104335diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
104336index e8c4400..a4cd5da 100644
104337--- a/net/ipv6/af_inet6.c
104338+++ b/net/ipv6/af_inet6.c
104339@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
104340 net->ipv6.sysctl.icmpv6_time = 1*HZ;
104341 net->ipv6.sysctl.flowlabel_consistency = 1;
104342 net->ipv6.sysctl.auto_flowlabels = 0;
104343- atomic_set(&net->ipv6.fib6_sernum, 1);
104344+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
104345
104346 err = ipv6_init_mibs(net);
104347 if (err)
104348diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
104349index ace8dac..bd6942d 100644
104350--- a/net/ipv6/datagram.c
104351+++ b/net/ipv6/datagram.c
104352@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
104353 0,
104354 sock_i_ino(sp),
104355 atomic_read(&sp->sk_refcnt), sp,
104356- atomic_read(&sp->sk_drops));
104357+ atomic_read_unchecked(&sp->sk_drops));
104358 }
104359diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
104360index a5e9519..16b7412 100644
104361--- a/net/ipv6/icmp.c
104362+++ b/net/ipv6/icmp.c
104363@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
104364
104365 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
104366 {
104367- struct ctl_table *table;
104368+ ctl_table_no_const *table;
104369
104370 table = kmemdup(ipv6_icmp_table_template,
104371 sizeof(ipv6_icmp_table_template),
104372diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
104373index 263ef41..88c7be8 100644
104374--- a/net/ipv6/ip6_fib.c
104375+++ b/net/ipv6/ip6_fib.c
104376@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
104377 int new, old;
104378
104379 do {
104380- old = atomic_read(&net->ipv6.fib6_sernum);
104381+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
104382 new = old < INT_MAX ? old + 1 : 1;
104383- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
104384+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
104385 old, new) != old);
104386 return new;
104387 }
104388diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
104389index bc28b7d..a08feea 100644
104390--- a/net/ipv6/ip6_gre.c
104391+++ b/net/ipv6/ip6_gre.c
104392@@ -71,8 +71,8 @@ struct ip6gre_net {
104393 struct net_device *fb_tunnel_dev;
104394 };
104395
104396-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
104397-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
104398+static struct rtnl_link_ops ip6gre_link_ops;
104399+static struct rtnl_link_ops ip6gre_tap_ops;
104400 static int ip6gre_tunnel_init(struct net_device *dev);
104401 static void ip6gre_tunnel_setup(struct net_device *dev);
104402 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
104403@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
104404 }
104405
104406
104407-static struct inet6_protocol ip6gre_protocol __read_mostly = {
104408+static struct inet6_protocol ip6gre_protocol = {
104409 .handler = ip6gre_rcv,
104410 .err_handler = ip6gre_err,
104411 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
104412@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
104413 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
104414 };
104415
104416-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104417+static struct rtnl_link_ops ip6gre_link_ops = {
104418 .kind = "ip6gre",
104419 .maxtype = IFLA_GRE_MAX,
104420 .policy = ip6gre_policy,
104421@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104422 .get_link_net = ip6_tnl_get_link_net,
104423 };
104424
104425-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
104426+static struct rtnl_link_ops ip6gre_tap_ops = {
104427 .kind = "ip6gretap",
104428 .maxtype = IFLA_GRE_MAX,
104429 .policy = ip6gre_policy,
104430diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
104431index ddd94ec..b7cfefb 100644
104432--- a/net/ipv6/ip6_tunnel.c
104433+++ b/net/ipv6/ip6_tunnel.c
104434@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104435
104436 static int ip6_tnl_dev_init(struct net_device *dev);
104437 static void ip6_tnl_dev_setup(struct net_device *dev);
104438-static struct rtnl_link_ops ip6_link_ops __read_mostly;
104439+static struct rtnl_link_ops ip6_link_ops;
104440
104441 static int ip6_tnl_net_id __read_mostly;
104442 struct ip6_tnl_net {
104443@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
104444 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
104445 };
104446
104447-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
104448+static struct rtnl_link_ops ip6_link_ops = {
104449 .kind = "ip6tnl",
104450 .maxtype = IFLA_IPTUN_MAX,
104451 .policy = ip6_tnl_policy,
104452diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
104453index 5fb9e21..92bf04b 100644
104454--- a/net/ipv6/ip6_vti.c
104455+++ b/net/ipv6/ip6_vti.c
104456@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104457
104458 static int vti6_dev_init(struct net_device *dev);
104459 static void vti6_dev_setup(struct net_device *dev);
104460-static struct rtnl_link_ops vti6_link_ops __read_mostly;
104461+static struct rtnl_link_ops vti6_link_ops;
104462
104463 static int vti6_net_id __read_mostly;
104464 struct vti6_net {
104465@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
104466 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
104467 };
104468
104469-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
104470+static struct rtnl_link_ops vti6_link_ops = {
104471 .kind = "vti6",
104472 .maxtype = IFLA_VTI_MAX,
104473 .policy = vti6_policy,
104474diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
104475index 8d766d9..dcdfea7 100644
104476--- a/net/ipv6/ipv6_sockglue.c
104477+++ b/net/ipv6/ipv6_sockglue.c
104478@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
104479 if (sk->sk_type != SOCK_STREAM)
104480 return -ENOPROTOOPT;
104481
104482- msg.msg_control = optval;
104483+ msg.msg_control = (void __force_kernel *)optval;
104484 msg.msg_controllen = len;
104485 msg.msg_flags = flags;
104486
104487diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
104488index bb00c6f..16c90d7 100644
104489--- a/net/ipv6/netfilter/ip6_tables.c
104490+++ b/net/ipv6/netfilter/ip6_tables.c
104491@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
104492 #endif
104493
104494 static int get_info(struct net *net, void __user *user,
104495- const int *len, int compat)
104496+ int len, int compat)
104497 {
104498 char name[XT_TABLE_MAXNAMELEN];
104499 struct xt_table *t;
104500 int ret;
104501
104502- if (*len != sizeof(struct ip6t_getinfo)) {
104503- duprintf("length %u != %zu\n", *len,
104504+ if (len != sizeof(struct ip6t_getinfo)) {
104505+ duprintf("length %u != %zu\n", len,
104506 sizeof(struct ip6t_getinfo));
104507 return -EINVAL;
104508 }
104509@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
104510 info.size = private->size;
104511 strcpy(info.name, name);
104512
104513- if (copy_to_user(user, &info, *len) != 0)
104514+ if (copy_to_user(user, &info, len) != 0)
104515 ret = -EFAULT;
104516 else
104517 ret = 0;
104518@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104519
104520 switch (cmd) {
104521 case IP6T_SO_GET_INFO:
104522- ret = get_info(sock_net(sk), user, len, 1);
104523+ ret = get_info(sock_net(sk), user, *len, 1);
104524 break;
104525 case IP6T_SO_GET_ENTRIES:
104526 ret = compat_get_entries(sock_net(sk), user, len);
104527@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104528
104529 switch (cmd) {
104530 case IP6T_SO_GET_INFO:
104531- ret = get_info(sock_net(sk), user, len, 0);
104532+ ret = get_info(sock_net(sk), user, *len, 0);
104533 break;
104534
104535 case IP6T_SO_GET_ENTRIES:
104536diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
104537index 6f187c8..34b367f 100644
104538--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
104539+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
104540@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
104541
104542 static int nf_ct_frag6_sysctl_register(struct net *net)
104543 {
104544- struct ctl_table *table;
104545+ ctl_table_no_const *table = NULL;
104546 struct ctl_table_header *hdr;
104547
104548- table = nf_ct_frag6_sysctl_table;
104549 if (!net_eq(net, &init_net)) {
104550- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
104551+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
104552 GFP_KERNEL);
104553 if (table == NULL)
104554 goto err_alloc;
104555@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104556 table[2].data = &net->nf_frag.frags.high_thresh;
104557 table[2].extra1 = &net->nf_frag.frags.low_thresh;
104558 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
104559- }
104560-
104561- hdr = register_net_sysctl(net, "net/netfilter", table);
104562+ hdr = register_net_sysctl(net, "net/netfilter", table);
104563+ } else
104564+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
104565 if (hdr == NULL)
104566 goto err_reg;
104567
104568@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104569 return 0;
104570
104571 err_reg:
104572- if (!net_eq(net, &init_net))
104573- kfree(table);
104574+ kfree(table);
104575 err_alloc:
104576 return -ENOMEM;
104577 }
104578diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
104579index a2dfff6..1e52e6d 100644
104580--- a/net/ipv6/ping.c
104581+++ b/net/ipv6/ping.c
104582@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
104583 };
104584 #endif
104585
104586+static struct pingv6_ops real_pingv6_ops = {
104587+ .ipv6_recv_error = ipv6_recv_error,
104588+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
104589+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
104590+ .icmpv6_err_convert = icmpv6_err_convert,
104591+ .ipv6_icmp_error = ipv6_icmp_error,
104592+ .ipv6_chk_addr = ipv6_chk_addr,
104593+};
104594+
104595+static struct pingv6_ops dummy_pingv6_ops = {
104596+ .ipv6_recv_error = dummy_ipv6_recv_error,
104597+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
104598+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
104599+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
104600+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
104601+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
104602+};
104603+
104604 int __init pingv6_init(void)
104605 {
104606 #ifdef CONFIG_PROC_FS
104607@@ -248,13 +266,7 @@ int __init pingv6_init(void)
104608 if (ret)
104609 return ret;
104610 #endif
104611- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104612- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104613- pingv6_ops.ip6_datagram_recv_specific_ctl =
104614- ip6_datagram_recv_specific_ctl;
104615- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104616- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104617- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104618+ pingv6_ops = &real_pingv6_ops;
104619 return inet6_register_protosw(&pingv6_protosw);
104620 }
104621
104622@@ -263,14 +275,9 @@ int __init pingv6_init(void)
104623 */
104624 void pingv6_exit(void)
104625 {
104626- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104627- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104628- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104629- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104630- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104631- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104632 #ifdef CONFIG_PROC_FS
104633 unregister_pernet_subsys(&ping_v6_net_ops);
104634 #endif
104635+ pingv6_ops = &dummy_pingv6_ops;
104636 inet6_unregister_protosw(&pingv6_protosw);
104637 }
104638diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104639index 679253d0..70b653c 100644
104640--- a/net/ipv6/proc.c
104641+++ b/net/ipv6/proc.c
104642@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104643 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104644 goto proc_snmp6_fail;
104645
104646- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104647+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104648 if (!net->mib.proc_net_devsnmp6)
104649 goto proc_dev_snmp6_fail;
104650 return 0;
104651diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104652index dae7f1a..783b20d 100644
104653--- a/net/ipv6/raw.c
104654+++ b/net/ipv6/raw.c
104655@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104656 {
104657 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104658 skb_checksum_complete(skb)) {
104659- atomic_inc(&sk->sk_drops);
104660+ atomic_inc_unchecked(&sk->sk_drops);
104661 kfree_skb(skb);
104662 return NET_RX_DROP;
104663 }
104664@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104665 struct raw6_sock *rp = raw6_sk(sk);
104666
104667 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104668- atomic_inc(&sk->sk_drops);
104669+ atomic_inc_unchecked(&sk->sk_drops);
104670 kfree_skb(skb);
104671 return NET_RX_DROP;
104672 }
104673@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104674
104675 if (inet->hdrincl) {
104676 if (skb_checksum_complete(skb)) {
104677- atomic_inc(&sk->sk_drops);
104678+ atomic_inc_unchecked(&sk->sk_drops);
104679 kfree_skb(skb);
104680 return NET_RX_DROP;
104681 }
104682@@ -609,7 +609,7 @@ out:
104683 return err;
104684 }
104685
104686-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
104687+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
104688 struct flowi6 *fl6, struct dst_entry **dstp,
104689 unsigned int flags)
104690 {
104691@@ -915,12 +915,15 @@ do_confirm:
104692 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104693 char __user *optval, int optlen)
104694 {
104695+ struct icmp6_filter filter;
104696+
104697 switch (optname) {
104698 case ICMPV6_FILTER:
104699 if (optlen > sizeof(struct icmp6_filter))
104700 optlen = sizeof(struct icmp6_filter);
104701- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104702+ if (copy_from_user(&filter, optval, optlen))
104703 return -EFAULT;
104704+ raw6_sk(sk)->filter = filter;
104705 return 0;
104706 default:
104707 return -ENOPROTOOPT;
104708@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104709 char __user *optval, int __user *optlen)
104710 {
104711 int len;
104712+ struct icmp6_filter filter;
104713
104714 switch (optname) {
104715 case ICMPV6_FILTER:
104716@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104717 len = sizeof(struct icmp6_filter);
104718 if (put_user(len, optlen))
104719 return -EFAULT;
104720- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104721+ filter = raw6_sk(sk)->filter;
104722+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104723 return -EFAULT;
104724 return 0;
104725 default:
104726diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104727index d7d70e6..bd5e9fc 100644
104728--- a/net/ipv6/reassembly.c
104729+++ b/net/ipv6/reassembly.c
104730@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104731
104732 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104733 {
104734- struct ctl_table *table;
104735+ ctl_table_no_const *table = NULL;
104736 struct ctl_table_header *hdr;
104737
104738- table = ip6_frags_ns_ctl_table;
104739 if (!net_eq(net, &init_net)) {
104740- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104741+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104742 if (table == NULL)
104743 goto err_alloc;
104744
104745@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104746 /* Don't export sysctls to unprivileged users */
104747 if (net->user_ns != &init_user_ns)
104748 table[0].procname = NULL;
104749- }
104750+ hdr = register_net_sysctl(net, "net/ipv6", table);
104751+ } else
104752+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104753
104754- hdr = register_net_sysctl(net, "net/ipv6", table);
104755 if (hdr == NULL)
104756 goto err_reg;
104757
104758@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104759 return 0;
104760
104761 err_reg:
104762- if (!net_eq(net, &init_net))
104763- kfree(table);
104764+ kfree(table);
104765 err_alloc:
104766 return -ENOMEM;
104767 }
104768diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104769index 4688bd4..584453d 100644
104770--- a/net/ipv6/route.c
104771+++ b/net/ipv6/route.c
104772@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
104773
104774 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104775 {
104776- struct ctl_table *table;
104777+ ctl_table_no_const *table;
104778
104779 table = kmemdup(ipv6_route_table_template,
104780 sizeof(ipv6_route_table_template),
104781diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104782index e4cbd57..02b1aaa 100644
104783--- a/net/ipv6/sit.c
104784+++ b/net/ipv6/sit.c
104785@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104786 static void ipip6_dev_free(struct net_device *dev);
104787 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104788 __be32 *v4dst);
104789-static struct rtnl_link_ops sit_link_ops __read_mostly;
104790+static struct rtnl_link_ops sit_link_ops;
104791
104792 static int sit_net_id __read_mostly;
104793 struct sit_net {
104794@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104795 unregister_netdevice_queue(dev, head);
104796 }
104797
104798-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104799+static struct rtnl_link_ops sit_link_ops = {
104800 .kind = "sit",
104801 .maxtype = IFLA_IPTUN_MAX,
104802 .policy = ipip6_policy,
104803diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104804index c5c10fa..2577d51 100644
104805--- a/net/ipv6/sysctl_net_ipv6.c
104806+++ b/net/ipv6/sysctl_net_ipv6.c
104807@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104808
104809 static int __net_init ipv6_sysctl_net_init(struct net *net)
104810 {
104811- struct ctl_table *ipv6_table;
104812+ ctl_table_no_const *ipv6_table;
104813 struct ctl_table *ipv6_route_table;
104814 struct ctl_table *ipv6_icmp_table;
104815 int err;
104816diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104817index 1f5e622..8387d90 100644
104818--- a/net/ipv6/tcp_ipv6.c
104819+++ b/net/ipv6/tcp_ipv6.c
104820@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104821 }
104822 }
104823
104824+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104825+extern int grsec_enable_blackhole;
104826+#endif
104827+
104828 static void tcp_v6_hash(struct sock *sk)
104829 {
104830 if (sk->sk_state != TCP_CLOSE) {
104831@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104832 return 0;
104833
104834 reset:
104835+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104836+ if (!grsec_enable_blackhole)
104837+#endif
104838 tcp_v6_send_reset(sk, skb);
104839 discard:
104840 if (opt_skb)
104841@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104842
104843 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104844 inet6_iif(skb));
104845- if (!sk)
104846+ if (!sk) {
104847+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104848+ ret = 1;
104849+#endif
104850 goto no_tcp_socket;
104851+ }
104852
104853 process:
104854- if (sk->sk_state == TCP_TIME_WAIT)
104855+ if (sk->sk_state == TCP_TIME_WAIT) {
104856+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104857+ ret = 2;
104858+#endif
104859 goto do_time_wait;
104860+ }
104861
104862 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104863 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104864@@ -1510,6 +1525,10 @@ csum_error:
104865 bad_packet:
104866 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104867 } else {
104868+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104869+ if (!grsec_enable_blackhole || (ret == 1 &&
104870+ (skb->dev->flags & IFF_LOOPBACK)))
104871+#endif
104872 tcp_v6_send_reset(NULL, skb);
104873 }
104874
104875diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104876index d048d46..bf141c3 100644
104877--- a/net/ipv6/udp.c
104878+++ b/net/ipv6/udp.c
104879@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104880 udp_ipv6_hash_secret + net_hash_mix(net));
104881 }
104882
104883+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104884+extern int grsec_enable_blackhole;
104885+#endif
104886+
104887 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104888 {
104889 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104890@@ -448,7 +452,7 @@ try_again:
104891 if (unlikely(err)) {
104892 trace_kfree_skb(skb, udpv6_recvmsg);
104893 if (!peeked) {
104894- atomic_inc(&sk->sk_drops);
104895+ atomic_inc_unchecked(&sk->sk_drops);
104896 if (is_udp4)
104897 UDP_INC_STATS_USER(sock_net(sk),
104898 UDP_MIB_INERRORS,
104899@@ -714,7 +718,7 @@ csum_error:
104900 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104901 drop:
104902 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104903- atomic_inc(&sk->sk_drops);
104904+ atomic_inc_unchecked(&sk->sk_drops);
104905 kfree_skb(skb);
104906 return -1;
104907 }
104908@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104909 if (likely(skb1 == NULL))
104910 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104911 if (!skb1) {
104912- atomic_inc(&sk->sk_drops);
104913+ atomic_inc_unchecked(&sk->sk_drops);
104914 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104915 IS_UDPLITE(sk));
104916 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104917@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104918 goto csum_error;
104919
104920 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104921+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104922+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104923+#endif
104924 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104925
104926 kfree_skb(skb);
104927diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104928index 8d2d01b4..313511e 100644
104929--- a/net/ipv6/xfrm6_policy.c
104930+++ b/net/ipv6/xfrm6_policy.c
104931@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104932 }
104933 }
104934
104935-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104936+static int xfrm6_garbage_collect(struct dst_ops *ops)
104937 {
104938 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104939
104940- xfrm6_policy_afinfo.garbage_collect(net);
104941+ xfrm_garbage_collect_deferred(net);
104942 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104943 }
104944
104945@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104946
104947 static int __net_init xfrm6_net_init(struct net *net)
104948 {
104949- struct ctl_table *table;
104950+ ctl_table_no_const *table = NULL;
104951 struct ctl_table_header *hdr;
104952
104953- table = xfrm6_policy_table;
104954 if (!net_eq(net, &init_net)) {
104955- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104956+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104957 if (!table)
104958 goto err_alloc;
104959
104960 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104961- }
104962+ hdr = register_net_sysctl(net, "net/ipv6", table);
104963+ } else
104964+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104965
104966- hdr = register_net_sysctl(net, "net/ipv6", table);
104967 if (!hdr)
104968 goto err_reg;
104969
104970@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104971 return 0;
104972
104973 err_reg:
104974- if (!net_eq(net, &init_net))
104975- kfree(table);
104976+ kfree(table);
104977 err_alloc:
104978 return -ENOMEM;
104979 }
104980diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104981index c1d247e..9e5949d 100644
104982--- a/net/ipx/ipx_proc.c
104983+++ b/net/ipx/ipx_proc.c
104984@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104985 struct proc_dir_entry *p;
104986 int rc = -ENOMEM;
104987
104988- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104989+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104990
104991 if (!ipx_proc_dir)
104992 goto out;
104993diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104994index 683346d..cb0e12d 100644
104995--- a/net/irda/ircomm/ircomm_tty.c
104996+++ b/net/irda/ircomm/ircomm_tty.c
104997@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104998 add_wait_queue(&port->open_wait, &wait);
104999
105000 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
105001- __FILE__, __LINE__, tty->driver->name, port->count);
105002+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105003
105004 spin_lock_irqsave(&port->lock, flags);
105005- port->count--;
105006+ atomic_dec(&port->count);
105007 port->blocked_open++;
105008 spin_unlock_irqrestore(&port->lock, flags);
105009
105010@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105011 }
105012
105013 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
105014- __FILE__, __LINE__, tty->driver->name, port->count);
105015+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105016
105017 schedule();
105018 }
105019@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105020
105021 spin_lock_irqsave(&port->lock, flags);
105022 if (!tty_hung_up_p(filp))
105023- port->count++;
105024+ atomic_inc(&port->count);
105025 port->blocked_open--;
105026 spin_unlock_irqrestore(&port->lock, flags);
105027
105028 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
105029- __FILE__, __LINE__, tty->driver->name, port->count);
105030+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105031
105032 if (!retval)
105033 port->flags |= ASYNC_NORMAL_ACTIVE;
105034@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
105035
105036 /* ++ is not atomic, so this should be protected - Jean II */
105037 spin_lock_irqsave(&self->port.lock, flags);
105038- self->port.count++;
105039+ atomic_inc(&self->port.count);
105040 spin_unlock_irqrestore(&self->port.lock, flags);
105041 tty_port_tty_set(&self->port, tty);
105042
105043 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
105044- self->line, self->port.count);
105045+ self->line, atomic_read(&self->port.count));
105046
105047 /* Not really used by us, but lets do it anyway */
105048 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
105049@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
105050 tty_kref_put(port->tty);
105051 }
105052 port->tty = NULL;
105053- port->count = 0;
105054+ atomic_set(&port->count, 0);
105055 spin_unlock_irqrestore(&port->lock, flags);
105056
105057 wake_up_interruptible(&port->open_wait);
105058@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
105059 seq_putc(m, '\n');
105060
105061 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
105062- seq_printf(m, "Open count: %d\n", self->port.count);
105063+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
105064 seq_printf(m, "Max data size: %d\n", self->max_data_size);
105065 seq_printf(m, "Max header size: %d\n", self->max_header_size);
105066
105067diff --git a/net/irda/irproc.c b/net/irda/irproc.c
105068index b9ac598..f88cc56 100644
105069--- a/net/irda/irproc.c
105070+++ b/net/irda/irproc.c
105071@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
105072 {
105073 int i;
105074
105075- proc_irda = proc_mkdir("irda", init_net.proc_net);
105076+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
105077 if (proc_irda == NULL)
105078 return;
105079
105080diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
105081index 53d9311..cbaf99f 100644
105082--- a/net/iucv/af_iucv.c
105083+++ b/net/iucv/af_iucv.c
105084@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
105085 {
105086 char name[12];
105087
105088- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
105089+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105090 while (__iucv_get_sock_by_name(name)) {
105091 sprintf(name, "%08x",
105092- atomic_inc_return(&iucv_sk_list.autobind_name));
105093+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105094 }
105095 memcpy(iucv->src_name, name, 8);
105096 }
105097diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
105098index 2a6a1fd..6c112b0 100644
105099--- a/net/iucv/iucv.c
105100+++ b/net/iucv/iucv.c
105101@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
105102 return NOTIFY_OK;
105103 }
105104
105105-static struct notifier_block __refdata iucv_cpu_notifier = {
105106+static struct notifier_block iucv_cpu_notifier = {
105107 .notifier_call = iucv_cpu_notify,
105108 };
105109
105110diff --git a/net/key/af_key.c b/net/key/af_key.c
105111index f8ac939..1e189bf 100644
105112--- a/net/key/af_key.c
105113+++ b/net/key/af_key.c
105114@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
105115 static u32 get_acqseq(void)
105116 {
105117 u32 res;
105118- static atomic_t acqseq;
105119+ static atomic_unchecked_t acqseq;
105120
105121 do {
105122- res = atomic_inc_return(&acqseq);
105123+ res = atomic_inc_return_unchecked(&acqseq);
105124 } while (!res);
105125 return res;
105126 }
105127diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
105128index 781b3a2..73a7434 100644
105129--- a/net/l2tp/l2tp_eth.c
105130+++ b/net/l2tp/l2tp_eth.c
105131@@ -42,12 +42,12 @@ struct l2tp_eth {
105132 struct sock *tunnel_sock;
105133 struct l2tp_session *session;
105134 struct list_head list;
105135- atomic_long_t tx_bytes;
105136- atomic_long_t tx_packets;
105137- atomic_long_t tx_dropped;
105138- atomic_long_t rx_bytes;
105139- atomic_long_t rx_packets;
105140- atomic_long_t rx_errors;
105141+ atomic_long_unchecked_t tx_bytes;
105142+ atomic_long_unchecked_t tx_packets;
105143+ atomic_long_unchecked_t tx_dropped;
105144+ atomic_long_unchecked_t rx_bytes;
105145+ atomic_long_unchecked_t rx_packets;
105146+ atomic_long_unchecked_t rx_errors;
105147 };
105148
105149 /* via l2tp_session_priv() */
105150@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
105151 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
105152
105153 if (likely(ret == NET_XMIT_SUCCESS)) {
105154- atomic_long_add(len, &priv->tx_bytes);
105155- atomic_long_inc(&priv->tx_packets);
105156+ atomic_long_add_unchecked(len, &priv->tx_bytes);
105157+ atomic_long_inc_unchecked(&priv->tx_packets);
105158 } else {
105159- atomic_long_inc(&priv->tx_dropped);
105160+ atomic_long_inc_unchecked(&priv->tx_dropped);
105161 }
105162 return NETDEV_TX_OK;
105163 }
105164@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
105165 {
105166 struct l2tp_eth *priv = netdev_priv(dev);
105167
105168- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
105169- stats->tx_packets = atomic_long_read(&priv->tx_packets);
105170- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
105171- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
105172- stats->rx_packets = atomic_long_read(&priv->rx_packets);
105173- stats->rx_errors = atomic_long_read(&priv->rx_errors);
105174+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
105175+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
105176+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
105177+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
105178+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
105179+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
105180 return stats;
105181 }
105182
105183@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
105184 nf_reset(skb);
105185
105186 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
105187- atomic_long_inc(&priv->rx_packets);
105188- atomic_long_add(data_len, &priv->rx_bytes);
105189+ atomic_long_inc_unchecked(&priv->rx_packets);
105190+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
105191 } else {
105192- atomic_long_inc(&priv->rx_errors);
105193+ atomic_long_inc_unchecked(&priv->rx_errors);
105194 }
105195 return;
105196
105197 error:
105198- atomic_long_inc(&priv->rx_errors);
105199+ atomic_long_inc_unchecked(&priv->rx_errors);
105200 kfree_skb(skb);
105201 }
105202
105203diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
105204index 05dfc8aa..df6cfd7 100644
105205--- a/net/l2tp/l2tp_ip.c
105206+++ b/net/l2tp/l2tp_ip.c
105207@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
105208 .ops = &l2tp_ip_ops,
105209 };
105210
105211-static struct net_protocol l2tp_ip_protocol __read_mostly = {
105212+static const struct net_protocol l2tp_ip_protocol = {
105213 .handler = l2tp_ip_recv,
105214 .netns_ok = 1,
105215 };
105216diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
105217index 8611f1b..bc60a2d 100644
105218--- a/net/l2tp/l2tp_ip6.c
105219+++ b/net/l2tp/l2tp_ip6.c
105220@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
105221 .ops = &l2tp_ip6_ops,
105222 };
105223
105224-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
105225+static const struct inet6_protocol l2tp_ip6_protocol = {
105226 .handler = l2tp_ip6_recv,
105227 };
105228
105229diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
105230index 1a3c7e0..80f8b0c 100644
105231--- a/net/llc/llc_proc.c
105232+++ b/net/llc/llc_proc.c
105233@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
105234 int rc = -ENOMEM;
105235 struct proc_dir_entry *p;
105236
105237- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
105238+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
105239 if (!llc_proc_dir)
105240 goto out;
105241
105242diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
105243index dd4ff36..3462997 100644
105244--- a/net/mac80211/cfg.c
105245+++ b/net/mac80211/cfg.c
105246@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
105247 ret = ieee80211_vif_use_channel(sdata, chandef,
105248 IEEE80211_CHANCTX_EXCLUSIVE);
105249 }
105250- } else if (local->open_count == local->monitors) {
105251+ } else if (local_read(&local->open_count) == local->monitors) {
105252 local->_oper_chandef = *chandef;
105253 ieee80211_hw_config(local, 0);
105254 }
105255@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
105256 else
105257 local->probe_req_reg--;
105258
105259- if (!local->open_count)
105260+ if (!local_read(&local->open_count))
105261 break;
105262
105263 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
105264@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
105265 if (chanctx_conf) {
105266 *chandef = sdata->vif.bss_conf.chandef;
105267 ret = 0;
105268- } else if (local->open_count > 0 &&
105269- local->open_count == local->monitors &&
105270+ } else if (local_read(&local->open_count) > 0 &&
105271+ local_read(&local->open_count) == local->monitors &&
105272 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
105273 if (local->use_chanctx)
105274 *chandef = local->monitor_chandef;
105275diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
105276index 8d53d65..a4ac794 100644
105277--- a/net/mac80211/ieee80211_i.h
105278+++ b/net/mac80211/ieee80211_i.h
105279@@ -29,6 +29,7 @@
105280 #include <net/ieee80211_radiotap.h>
105281 #include <net/cfg80211.h>
105282 #include <net/mac80211.h>
105283+#include <asm/local.h>
105284 #include "key.h"
105285 #include "sta_info.h"
105286 #include "debug.h"
105287@@ -1126,7 +1127,7 @@ struct ieee80211_local {
105288 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
105289 spinlock_t queue_stop_reason_lock;
105290
105291- int open_count;
105292+ local_t open_count;
105293 int monitors, cooked_mntrs;
105294 /* number of interfaces with corresponding FIF_ flags */
105295 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
105296diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
105297index 81a2751..c06a026 100644
105298--- a/net/mac80211/iface.c
105299+++ b/net/mac80211/iface.c
105300@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105301 break;
105302 }
105303
105304- if (local->open_count == 0) {
105305+ if (local_read(&local->open_count) == 0) {
105306 res = drv_start(local);
105307 if (res)
105308 goto err_del_bss;
105309@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105310 res = drv_add_interface(local, sdata);
105311 if (res)
105312 goto err_stop;
105313- } else if (local->monitors == 0 && local->open_count == 0) {
105314+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
105315 res = ieee80211_add_virtual_monitor(local);
105316 if (res)
105317 goto err_stop;
105318@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105319 atomic_inc(&local->iff_promiscs);
105320
105321 if (coming_up)
105322- local->open_count++;
105323+ local_inc(&local->open_count);
105324
105325 if (hw_reconf_flags)
105326 ieee80211_hw_config(local, hw_reconf_flags);
105327@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105328 err_del_interface:
105329 drv_remove_interface(local, sdata);
105330 err_stop:
105331- if (!local->open_count)
105332+ if (!local_read(&local->open_count))
105333 drv_stop(local);
105334 err_del_bss:
105335 sdata->bss = NULL;
105336@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105337 }
105338
105339 if (going_down)
105340- local->open_count--;
105341+ local_dec(&local->open_count);
105342
105343 switch (sdata->vif.type) {
105344 case NL80211_IFTYPE_AP_VLAN:
105345@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105346 }
105347 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
105348
105349- if (local->open_count == 0)
105350+ if (local_read(&local->open_count) == 0)
105351 ieee80211_clear_tx_pending(local);
105352
105353 /*
105354@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105355 if (cancel_scan)
105356 flush_delayed_work(&local->scan_work);
105357
105358- if (local->open_count == 0) {
105359+ if (local_read(&local->open_count) == 0) {
105360 ieee80211_stop_device(local);
105361
105362 /* no reconfiguring after stop! */
105363@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105364 ieee80211_configure_filter(local);
105365 ieee80211_hw_config(local, hw_reconf_flags);
105366
105367- if (local->monitors == local->open_count)
105368+ if (local->monitors == local_read(&local->open_count))
105369 ieee80211_add_virtual_monitor(local);
105370 }
105371
105372diff --git a/net/mac80211/main.c b/net/mac80211/main.c
105373index 5e09d35..e2fdbe2 100644
105374--- a/net/mac80211/main.c
105375+++ b/net/mac80211/main.c
105376@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
105377 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
105378 IEEE80211_CONF_CHANGE_POWER);
105379
105380- if (changed && local->open_count) {
105381+ if (changed && local_read(&local->open_count)) {
105382 ret = drv_config(local, changed);
105383 /*
105384 * Goal:
105385diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
105386index ca405b6..6cc8bee 100644
105387--- a/net/mac80211/pm.c
105388+++ b/net/mac80211/pm.c
105389@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105390 struct ieee80211_sub_if_data *sdata;
105391 struct sta_info *sta;
105392
105393- if (!local->open_count)
105394+ if (!local_read(&local->open_count))
105395 goto suspend;
105396
105397 ieee80211_scan_cancel(local);
105398@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105399 cancel_work_sync(&local->dynamic_ps_enable_work);
105400 del_timer_sync(&local->dynamic_ps_timer);
105401
105402- local->wowlan = wowlan && local->open_count;
105403+ local->wowlan = wowlan && local_read(&local->open_count);
105404 if (local->wowlan) {
105405 int err = drv_suspend(local, wowlan);
105406 if (err < 0) {
105407@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105408 WARN_ON(!list_empty(&local->chanctx_list));
105409
105410 /* stop hardware - this must stop RX */
105411- if (local->open_count)
105412+ if (local_read(&local->open_count))
105413 ieee80211_stop_device(local);
105414
105415 suspend:
105416diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
105417index d53355b..21f583a 100644
105418--- a/net/mac80211/rate.c
105419+++ b/net/mac80211/rate.c
105420@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
105421
105422 ASSERT_RTNL();
105423
105424- if (local->open_count)
105425+ if (local_read(&local->open_count))
105426 return -EBUSY;
105427
105428 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
105429diff --git a/net/mac80211/util.c b/net/mac80211/util.c
105430index 747bdcf..eb2b981 100644
105431--- a/net/mac80211/util.c
105432+++ b/net/mac80211/util.c
105433@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105434 bool sched_scan_stopped = false;
105435
105436 /* nothing to do if HW shouldn't run */
105437- if (!local->open_count)
105438+ if (!local_read(&local->open_count))
105439 goto wake_up;
105440
105441 #ifdef CONFIG_PM
105442@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105443 local->in_reconfig = false;
105444 barrier();
105445
105446- if (local->monitors == local->open_count && local->monitors > 0)
105447+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
105448 ieee80211_add_virtual_monitor(local);
105449
105450 /*
105451@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105452 * If this is for hw restart things are still running.
105453 * We may want to change that later, however.
105454 */
105455- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
105456+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
105457 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
105458
105459 if (!local->suspended)
105460@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105461 flush_delayed_work(&local->scan_work);
105462 }
105463
105464- if (local->open_count && !reconfig_due_to_wowlan)
105465+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
105466 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
105467
105468 list_for_each_entry(sdata, &local->interfaces, list) {
105469diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
105470index b02660f..c0f791c 100644
105471--- a/net/netfilter/Kconfig
105472+++ b/net/netfilter/Kconfig
105473@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
105474
105475 To compile it as a module, choose M here. If unsure, say N.
105476
105477+config NETFILTER_XT_MATCH_GRADM
105478+ tristate '"gradm" match support'
105479+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
105480+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
105481+ ---help---
105482+ The gradm match allows to match on grsecurity RBAC being enabled.
105483+ It is useful when iptables rules are applied early on bootup to
105484+ prevent connections to the machine (except from a trusted host)
105485+ while the RBAC system is disabled.
105486+
105487 config NETFILTER_XT_MATCH_HASHLIMIT
105488 tristate '"hashlimit" match support'
105489 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
105490diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
105491index 89f73a9..e4e5bd9 100644
105492--- a/net/netfilter/Makefile
105493+++ b/net/netfilter/Makefile
105494@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
105495 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
105496 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
105497 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
105498+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
105499 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
105500 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
105501 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
105502diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
105503index d259da3..6a32b2c 100644
105504--- a/net/netfilter/ipset/ip_set_core.c
105505+++ b/net/netfilter/ipset/ip_set_core.c
105506@@ -1952,7 +1952,7 @@ done:
105507 return ret;
105508 }
105509
105510-static struct nf_sockopt_ops so_set __read_mostly = {
105511+static struct nf_sockopt_ops so_set = {
105512 .pf = PF_INET,
105513 .get_optmin = SO_IP_SET,
105514 .get_optmax = SO_IP_SET + 1,
105515diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
105516index b0f7b62..0541842 100644
105517--- a/net/netfilter/ipvs/ip_vs_conn.c
105518+++ b/net/netfilter/ipvs/ip_vs_conn.c
105519@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
105520 /* Increase the refcnt counter of the dest */
105521 ip_vs_dest_hold(dest);
105522
105523- conn_flags = atomic_read(&dest->conn_flags);
105524+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
105525 if (cp->protocol != IPPROTO_UDP)
105526 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
105527 flags = cp->flags;
105528@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
105529
105530 cp->control = NULL;
105531 atomic_set(&cp->n_control, 0);
105532- atomic_set(&cp->in_pkts, 0);
105533+ atomic_set_unchecked(&cp->in_pkts, 0);
105534
105535 cp->packet_xmit = NULL;
105536 cp->app = NULL;
105537@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
105538
105539 /* Don't drop the entry if its number of incoming packets is not
105540 located in [0, 8] */
105541- i = atomic_read(&cp->in_pkts);
105542+ i = atomic_read_unchecked(&cp->in_pkts);
105543 if (i > 8 || i < 0) return 0;
105544
105545 if (!todrop_rate[i]) return 0;
105546diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
105547index b87ca32..76c7799 100644
105548--- a/net/netfilter/ipvs/ip_vs_core.c
105549+++ b/net/netfilter/ipvs/ip_vs_core.c
105550@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
105551 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
105552 /* do not touch skb anymore */
105553
105554- atomic_inc(&cp->in_pkts);
105555+ atomic_inc_unchecked(&cp->in_pkts);
105556 ip_vs_conn_put(cp);
105557 return ret;
105558 }
105559@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
105560 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
105561 pkts = sysctl_sync_threshold(ipvs);
105562 else
105563- pkts = atomic_add_return(1, &cp->in_pkts);
105564+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105565
105566 if (ipvs->sync_state & IP_VS_STATE_MASTER)
105567 ip_vs_sync_conn(net, cp, pkts);
105568diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
105569index ed99448..3ba6cad 100644
105570--- a/net/netfilter/ipvs/ip_vs_ctl.c
105571+++ b/net/netfilter/ipvs/ip_vs_ctl.c
105572@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
105573 */
105574 ip_vs_rs_hash(ipvs, dest);
105575 }
105576- atomic_set(&dest->conn_flags, conn_flags);
105577+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
105578
105579 /* bind the service */
105580 old_svc = rcu_dereference_protected(dest->svc, 1);
105581@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
105582 * align with netns init in ip_vs_control_net_init()
105583 */
105584
105585-static struct ctl_table vs_vars[] = {
105586+static ctl_table_no_const vs_vars[] __read_only = {
105587 {
105588 .procname = "amemthresh",
105589 .maxlen = sizeof(int),
105590@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105591 " %-7s %-6d %-10d %-10d\n",
105592 &dest->addr.in6,
105593 ntohs(dest->port),
105594- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105595+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105596 atomic_read(&dest->weight),
105597 atomic_read(&dest->activeconns),
105598 atomic_read(&dest->inactconns));
105599@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105600 "%-7s %-6d %-10d %-10d\n",
105601 ntohl(dest->addr.ip),
105602 ntohs(dest->port),
105603- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105604+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105605 atomic_read(&dest->weight),
105606 atomic_read(&dest->activeconns),
105607 atomic_read(&dest->inactconns));
105608@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
105609
105610 entry.addr = dest->addr.ip;
105611 entry.port = dest->port;
105612- entry.conn_flags = atomic_read(&dest->conn_flags);
105613+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
105614 entry.weight = atomic_read(&dest->weight);
105615 entry.u_threshold = dest->u_threshold;
105616 entry.l_threshold = dest->l_threshold;
105617@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
105618 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
105619 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
105620 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
105621- (atomic_read(&dest->conn_flags) &
105622+ (atomic_read_unchecked(&dest->conn_flags) &
105623 IP_VS_CONN_F_FWD_MASK)) ||
105624 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
105625 atomic_read(&dest->weight)) ||
105626@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
105627 {
105628 int idx;
105629 struct netns_ipvs *ipvs = net_ipvs(net);
105630- struct ctl_table *tbl;
105631+ ctl_table_no_const *tbl;
105632
105633 atomic_set(&ipvs->dropentry, 0);
105634 spin_lock_init(&ipvs->dropentry_lock);
105635diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
105636index 127f140..553d652 100644
105637--- a/net/netfilter/ipvs/ip_vs_lblc.c
105638+++ b/net/netfilter/ipvs/ip_vs_lblc.c
105639@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
105640 * IPVS LBLC sysctl table
105641 */
105642 #ifdef CONFIG_SYSCTL
105643-static struct ctl_table vs_vars_table[] = {
105644+static ctl_table_no_const vs_vars_table[] __read_only = {
105645 {
105646 .procname = "lblc_expiration",
105647 .data = NULL,
105648diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105649index 2229d2d..b32b785 100644
105650--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105651+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105652@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105653 * IPVS LBLCR sysctl table
105654 */
105655
105656-static struct ctl_table vs_vars_table[] = {
105657+static ctl_table_no_const vs_vars_table[] __read_only = {
105658 {
105659 .procname = "lblcr_expiration",
105660 .data = NULL,
105661diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105662index d93ceeb..4556144 100644
105663--- a/net/netfilter/ipvs/ip_vs_sync.c
105664+++ b/net/netfilter/ipvs/ip_vs_sync.c
105665@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105666 cp = cp->control;
105667 if (cp) {
105668 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105669- pkts = atomic_add_return(1, &cp->in_pkts);
105670+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105671 else
105672 pkts = sysctl_sync_threshold(ipvs);
105673 ip_vs_sync_conn(net, cp->control, pkts);
105674@@ -771,7 +771,7 @@ control:
105675 if (!cp)
105676 return;
105677 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105678- pkts = atomic_add_return(1, &cp->in_pkts);
105679+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105680 else
105681 pkts = sysctl_sync_threshold(ipvs);
105682 goto sloop;
105683@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105684
105685 if (opt)
105686 memcpy(&cp->in_seq, opt, sizeof(*opt));
105687- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105688+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105689 cp->state = state;
105690 cp->old_state = cp->state;
105691 /*
105692diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105693index 3aedbda..6a63567 100644
105694--- a/net/netfilter/ipvs/ip_vs_xmit.c
105695+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105696@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105697 else
105698 rc = NF_ACCEPT;
105699 /* do not touch skb anymore */
105700- atomic_inc(&cp->in_pkts);
105701+ atomic_inc_unchecked(&cp->in_pkts);
105702 goto out;
105703 }
105704
105705@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105706 else
105707 rc = NF_ACCEPT;
105708 /* do not touch skb anymore */
105709- atomic_inc(&cp->in_pkts);
105710+ atomic_inc_unchecked(&cp->in_pkts);
105711 goto out;
105712 }
105713
105714diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105715index a4b5e2a..13b1de3 100644
105716--- a/net/netfilter/nf_conntrack_acct.c
105717+++ b/net/netfilter/nf_conntrack_acct.c
105718@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105719 #ifdef CONFIG_SYSCTL
105720 static int nf_conntrack_acct_init_sysctl(struct net *net)
105721 {
105722- struct ctl_table *table;
105723+ ctl_table_no_const *table;
105724
105725 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105726 GFP_KERNEL);
105727diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105728index 13fad86..18c984c 100644
105729--- a/net/netfilter/nf_conntrack_core.c
105730+++ b/net/netfilter/nf_conntrack_core.c
105731@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
105732 #define DYING_NULLS_VAL ((1<<30)+1)
105733 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105734
105735+#ifdef CONFIG_GRKERNSEC_HIDESYM
105736+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105737+#endif
105738+
105739 int nf_conntrack_init_net(struct net *net)
105740 {
105741 int ret = -ENOMEM;
105742@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
105743 if (!net->ct.stat)
105744 goto err_pcpu_lists;
105745
105746+#ifdef CONFIG_GRKERNSEC_HIDESYM
105747+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105748+#else
105749 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105750+#endif
105751 if (!net->ct.slabname)
105752 goto err_slabname;
105753
105754diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105755index 4e78c57..ec8fb74 100644
105756--- a/net/netfilter/nf_conntrack_ecache.c
105757+++ b/net/netfilter/nf_conntrack_ecache.c
105758@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105759 #ifdef CONFIG_SYSCTL
105760 static int nf_conntrack_event_init_sysctl(struct net *net)
105761 {
105762- struct ctl_table *table;
105763+ ctl_table_no_const *table;
105764
105765 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105766 GFP_KERNEL);
105767diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105768index bd9d315..989947e 100644
105769--- a/net/netfilter/nf_conntrack_helper.c
105770+++ b/net/netfilter/nf_conntrack_helper.c
105771@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105772
105773 static int nf_conntrack_helper_init_sysctl(struct net *net)
105774 {
105775- struct ctl_table *table;
105776+ ctl_table_no_const *table;
105777
105778 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105779 GFP_KERNEL);
105780diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105781index b65d586..beec902 100644
105782--- a/net/netfilter/nf_conntrack_proto.c
105783+++ b/net/netfilter/nf_conntrack_proto.c
105784@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105785
105786 static void
105787 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105788- struct ctl_table **table,
105789+ ctl_table_no_const **table,
105790 unsigned int users)
105791 {
105792 if (users > 0)
105793diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105794index fc823fa..8311af3 100644
105795--- a/net/netfilter/nf_conntrack_standalone.c
105796+++ b/net/netfilter/nf_conntrack_standalone.c
105797@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105798
105799 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105800 {
105801- struct ctl_table *table;
105802+ ctl_table_no_const *table;
105803
105804 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105805 GFP_KERNEL);
105806diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105807index 7a394df..bd91a8a 100644
105808--- a/net/netfilter/nf_conntrack_timestamp.c
105809+++ b/net/netfilter/nf_conntrack_timestamp.c
105810@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105811 #ifdef CONFIG_SYSCTL
105812 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105813 {
105814- struct ctl_table *table;
105815+ ctl_table_no_const *table;
105816
105817 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105818 GFP_KERNEL);
105819diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105820index 675d12c..b36e825 100644
105821--- a/net/netfilter/nf_log.c
105822+++ b/net/netfilter/nf_log.c
105823@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
105824
105825 #ifdef CONFIG_SYSCTL
105826 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105827-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105828+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105829
105830 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105831 void __user *buffer, size_t *lenp, loff_t *ppos)
105832@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105833 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105834 mutex_unlock(&nf_log_mutex);
105835 } else {
105836+ ctl_table_no_const nf_log_table = *table;
105837+
105838 mutex_lock(&nf_log_mutex);
105839 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105840 if (!logger)
105841- table->data = "NONE";
105842+ nf_log_table.data = "NONE";
105843 else
105844- table->data = logger->name;
105845- r = proc_dostring(table, write, buffer, lenp, ppos);
105846+ nf_log_table.data = logger->name;
105847+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105848 mutex_unlock(&nf_log_mutex);
105849 }
105850
105851diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105852index c68c1e5..8b5d670 100644
105853--- a/net/netfilter/nf_sockopt.c
105854+++ b/net/netfilter/nf_sockopt.c
105855@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105856 }
105857 }
105858
105859- list_add(&reg->list, &nf_sockopts);
105860+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105861 out:
105862 mutex_unlock(&nf_sockopt_mutex);
105863 return ret;
105864@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105865 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105866 {
105867 mutex_lock(&nf_sockopt_mutex);
105868- list_del(&reg->list);
105869+ pax_list_del((struct list_head *)&reg->list);
105870 mutex_unlock(&nf_sockopt_mutex);
105871 }
105872 EXPORT_SYMBOL(nf_unregister_sockopt);
105873diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105874index 11d85b3..7fcc420 100644
105875--- a/net/netfilter/nfnetlink_log.c
105876+++ b/net/netfilter/nfnetlink_log.c
105877@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105878 struct nfnl_log_net {
105879 spinlock_t instances_lock;
105880 struct hlist_head instance_table[INSTANCE_BUCKETS];
105881- atomic_t global_seq;
105882+ atomic_unchecked_t global_seq;
105883 };
105884
105885 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105886@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105887 /* global sequence number */
105888 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105889 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105890- htonl(atomic_inc_return(&log->global_seq))))
105891+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105892 goto nla_put_failure;
105893
105894 if (data_len) {
105895diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105896index 65f3e2b..2e9d6a0 100644
105897--- a/net/netfilter/nft_compat.c
105898+++ b/net/netfilter/nft_compat.c
105899@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
105900 return;
105901 }
105902
105903- switch(ret) {
105904- case true:
105905- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
105906- break;
105907- case false:
105908- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
105909- break;
105910- }
105911+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
105912 }
105913
105914 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
105915diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105916new file mode 100644
105917index 0000000..c566332
105918--- /dev/null
105919+++ b/net/netfilter/xt_gradm.c
105920@@ -0,0 +1,51 @@
105921+/*
105922+ * gradm match for netfilter
105923